mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-24 04:13:16 +00:00
Compare commits
12 Commits
v0.2.0-dev
...
v0.3.1-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c88869778d | ||
|
|
3fd647b291 | ||
|
|
2f255952b7 | ||
|
|
956b6f7d95 | ||
|
|
c1a039de3f | ||
|
|
f8b18e09d6 | ||
|
|
b20a7a679b | ||
|
|
36d866375e | ||
|
|
024edc30a3 | ||
|
|
6aa5e0b5a8 | ||
|
|
1a38550fdd | ||
|
|
3e7ebb5a84 |
@@ -6,7 +6,7 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -16,7 +16,17 @@ func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error
|
||||
newNode, _ := dag.newBlockNode(blockHeader, newBlockSet())
|
||||
newNode.status = statusInvalidAncestor
|
||||
dag.index.AddNode(newNode)
|
||||
return dag.index.flushToDB()
|
||||
|
||||
dbTx, err := dbaccess.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = dag.index.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dbTx.Commit()
|
||||
}
|
||||
|
||||
// maybeAcceptBlock potentially accepts a block into the block DAG. It
|
||||
@@ -62,13 +72,26 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
|
||||
// expensive connection logic. It also has some other nice properties
|
||||
// such as making blocks that never become part of the DAG or
|
||||
// blocks that fail to connect available for further analysis.
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
err := dbStoreBlock(dbTx, block)
|
||||
dbTx, err := dbaccess.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
blockExists, err := dbaccess.HasBlock(dbTx, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockExists {
|
||||
err := storeBlock(dbTx, block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dag.index.flushToDBWithTx(dbTx)
|
||||
})
|
||||
}
|
||||
err = dag.index.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
// TestBlockHeap tests pushing, popping, and determining the length of the heap.
|
||||
func TestBlockHeap(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestBlockHeap", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestBlockHeap", true, Config{
|
||||
DAGParams: &dagconfig.MainnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// idByHashIndexBucketName is the name of the db bucket used to house
|
||||
// the block hash -> block id index.
|
||||
idByHashIndexBucketName = []byte("idbyhashidx")
|
||||
|
||||
// hashByIDIndexBucketName is the name of the db bucket used to house
|
||||
// the block id -> block hash index.
|
||||
hashByIDIndexBucketName = []byte("hashbyididx")
|
||||
|
||||
currentBlockIDKey = []byte("currentblockid")
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// This is a mapping between block hashes and unique IDs. The ID
|
||||
// is simply a sequentially incremented uint64 that is used instead of block hash
|
||||
// for the indexers. This is useful because it is only 8 bytes versus 32 bytes
|
||||
// hashes and thus saves a ton of space when a block is referenced in an index.
|
||||
// It consists of three buckets: the first bucket maps the hash of each
|
||||
// block to the unique ID and the second maps that ID back to the block hash.
|
||||
// The third bucket contains the last received block ID, and is used
|
||||
// when starting the node to check that the enabled indexes are up to date
|
||||
// with the latest received block, and if not, initiate recovery process.
|
||||
//
|
||||
// The serialized format for keys and values in the block hash to ID bucket is:
|
||||
// <hash> = <ID>
|
||||
//
|
||||
// Field Type Size
|
||||
// hash daghash.Hash 32 bytes
|
||||
// ID uint64 8 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// The serialized format for keys and values in the ID to block hash bucket is:
|
||||
// <ID> = <hash>
|
||||
//
|
||||
// Field Type Size
|
||||
// ID uint64 8 bytes
|
||||
// hash daghash.Hash 32 bytes
|
||||
// -----
|
||||
// Total: 40 bytes
|
||||
//
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const blockIDSize = 8 // 8 bytes for block ID
|
||||
|
||||
// DBFetchBlockIDByHash uses an existing database transaction to retrieve the
|
||||
// block id for the provided hash from the index.
|
||||
func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error) {
|
||||
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
|
||||
serializedID := hashIndex.Get(hash[:])
|
||||
if serializedID == nil {
|
||||
return 0, errors.Errorf("no entry in the block ID index for block with hash %s", hash)
|
||||
}
|
||||
|
||||
return DeserializeBlockID(serializedID), nil
|
||||
}
|
||||
|
||||
// DBFetchBlockHashBySerializedID uses an existing database transaction to
|
||||
// retrieve the hash for the provided serialized block id from the index.
|
||||
func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
|
||||
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
|
||||
hashBytes := idIndex.Get(serializedID)
|
||||
if hashBytes == nil {
|
||||
return nil, errors.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
|
||||
}
|
||||
|
||||
var hash daghash.Hash
|
||||
copy(hash[:], hashBytes)
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
|
||||
// the index entries for the hash to id and id to hash mappings for the provided
|
||||
// values.
|
||||
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, serializedID []byte) error {
|
||||
// Add the block hash to ID mapping to the index.
|
||||
meta := dbTx.Metadata()
|
||||
hashIndex := meta.Bucket(idByHashIndexBucketName)
|
||||
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the block ID to hash mapping to the index.
|
||||
idIndex := meta.Bucket(hashByIDIndexBucketName)
|
||||
return idIndex.Put(serializedID[:], hash[:])
|
||||
}
|
||||
|
||||
// DBFetchCurrentBlockID returns the last known block ID.
|
||||
func DBFetchCurrentBlockID(dbTx database.Tx) uint64 {
|
||||
serializedID := dbTx.Metadata().Get(currentBlockIDKey)
|
||||
if serializedID == nil {
|
||||
return 0
|
||||
}
|
||||
return DeserializeBlockID(serializedID)
|
||||
}
|
||||
|
||||
// DeserializeBlockID returns a deserialized block id
|
||||
func DeserializeBlockID(serializedID []byte) uint64 {
|
||||
return byteOrder.Uint64(serializedID)
|
||||
}
|
||||
|
||||
// SerializeBlockID returns a serialized block id
|
||||
func SerializeBlockID(blockID uint64) []byte {
|
||||
serializedBlockID := make([]byte, blockIDSize)
|
||||
byteOrder.PutUint64(serializedBlockID, blockID)
|
||||
return serializedBlockID
|
||||
}
|
||||
|
||||
// DBFetchBlockHashByID uses an existing database transaction to retrieve the
|
||||
// hash for the provided block id from the index.
|
||||
func DBFetchBlockHashByID(dbTx database.Tx, id uint64) (*daghash.Hash, error) {
|
||||
return DBFetchBlockHashBySerializedID(dbTx, SerializeBlockID(id))
|
||||
}
|
||||
|
||||
func createBlockID(dbTx database.Tx, blockHash *daghash.Hash) (uint64, error) {
|
||||
currentBlockID := DBFetchCurrentBlockID(dbTx)
|
||||
newBlockID := currentBlockID + 1
|
||||
serializedNewBlockID := SerializeBlockID(newBlockID)
|
||||
err := dbTx.Metadata().Put(currentBlockIDKey, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dbPutBlockIDIndexEntry(dbTx, blockHash, serializedNewBlockID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return newBlockID, nil
|
||||
}
|
||||
@@ -5,10 +5,10 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"sync"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
@@ -18,7 +18,6 @@ type blockIndex struct {
|
||||
// The following fields are set when the instance is created and can't
|
||||
// be changed afterwards, so there is no need to protect them with a
|
||||
// separate mutex.
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
|
||||
sync.RWMutex
|
||||
@@ -29,9 +28,8 @@ type blockIndex struct {
|
||||
// newBlockIndex returns a new empty instance of a block index. The index will
|
||||
// be dynamically populated as block nodes are loaded from the database and
|
||||
// manually added.
|
||||
func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
|
||||
func newBlockIndex(dagParams *dagconfig.Params) *blockIndex {
|
||||
return &blockIndex{
|
||||
db: db,
|
||||
dagParams: dagParams,
|
||||
index: make(map[daghash.Hash]*blockNode),
|
||||
dirty: make(map[*blockNode]struct{}),
|
||||
@@ -111,17 +109,8 @@ func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
bi.dirty[node] = struct{}{}
|
||||
}
|
||||
|
||||
// flushToDB writes all dirty block nodes to the database. If all writes
|
||||
// succeed, this clears the dirty set.
|
||||
func (bi *blockIndex) flushToDB() error {
|
||||
return bi.db.Update(func(dbTx database.Tx) error {
|
||||
return bi.flushToDBWithTx(dbTx)
|
||||
})
|
||||
}
|
||||
|
||||
// flushToDBWithTx writes all dirty block nodes to the database. If all
|
||||
// writes succeed, this clears the dirty set.
|
||||
func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
|
||||
// flushToDB writes all dirty block nodes to the database.
|
||||
func (bi *blockIndex) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
if len(bi.dirty) == 0 {
|
||||
@@ -129,7 +118,12 @@ func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
|
||||
}
|
||||
|
||||
for node := range bi.dirty {
|
||||
err := dbStoreBlockNode(dbTx, node)
|
||||
serializedBlockNode, err := serializeBlockNode(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := blockIndexKey(node.hash, node.blueScore)
|
||||
err = dbaccess.StoreIndexBlock(dbContext, key, serializedBlockNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
func TestAncestorErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
// This test is to ensure the size BlueAnticoneSizesSize is serialized to the size of KType.
|
||||
// We verify that by serializing and deserializing the block while making sure that we stay within the expected range.
|
||||
func TestBlueAnticoneSizesSize(t *testing.T) {
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
func TestBlueBlockWindow(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/txsort"
|
||||
@@ -73,55 +73,24 @@ func (cfr *compactFeeIterator) next() (uint64, error) {
|
||||
}
|
||||
|
||||
// The following functions relate to storing and retrieving fee data from the database
|
||||
var feeBucket = []byte("fees")
|
||||
|
||||
// getBluesFeeData returns the compactFeeData for all nodes's blues,
|
||||
// used to calculate the fees this blockNode needs to pay
|
||||
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
|
||||
bluesFeeData := make(map[daghash.Hash]compactFeeData)
|
||||
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
|
||||
}
|
||||
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbaccess.FetchFeeData(dbaccess.NoTx(), blueBlock.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
}
|
||||
|
||||
return bluesFeeData, nil
|
||||
}
|
||||
|
||||
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
|
||||
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error creating or retrieving fee bucket: %s", err)
|
||||
}
|
||||
|
||||
return feeBucket.Put(blockHash.CloneBytes(), feeData)
|
||||
}
|
||||
|
||||
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
|
||||
feeBucket := dbTx.Metadata().Bucket(feeBucket)
|
||||
if feeBucket == nil {
|
||||
return nil, errors.New("Fee bucket does not exist")
|
||||
}
|
||||
|
||||
feeData := feeBucket.Get(blockHash.CloneBytes())
|
||||
if feeData == nil {
|
||||
return nil, errors.Errorf("No fee data found for block %s", blockHash)
|
||||
}
|
||||
|
||||
return feeData, nil
|
||||
}
|
||||
|
||||
// The following functions deal with building and validating the coinbase transaction
|
||||
|
||||
func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
@@ -95,7 +94,7 @@ func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
|
||||
// it is not usable with all functions and the tests must take care when making
|
||||
// use of it.
|
||||
func newTestDAG(params *dagconfig.Params) *BlockDAG {
|
||||
index := newBlockIndex(nil, params)
|
||||
index := newBlockIndex(params)
|
||||
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
|
||||
dag := &BlockDAG{
|
||||
dagParams: params,
|
||||
|
||||
251
blockdag/dag.go
251
blockdag/dag.go
@@ -6,6 +6,7 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
@@ -17,7 +18,6 @@ import (
|
||||
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
@@ -60,7 +60,6 @@ type BlockDAG struct {
|
||||
// The following fields are set when the instance is created and can't
|
||||
// be changed afterwards, so there is no need to protect them with a
|
||||
// separate mutex.
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
timeSource TimeSource
|
||||
sigCache *txscript.SigCache
|
||||
@@ -151,7 +150,6 @@ type BlockDAG struct {
|
||||
|
||||
lastFinalityPoint *blockNode
|
||||
|
||||
SubnetworkStore *SubnetworkStore
|
||||
utxoDiffStore *utxoDiffStore
|
||||
reachabilityStore *reachabilityStore
|
||||
multisetStore *multisetStore
|
||||
@@ -488,7 +486,17 @@ func (dag *BlockDAG) addBlock(node *blockNode,
|
||||
if err != nil {
|
||||
if errors.As(err, &RuleError{}) {
|
||||
dag.index.SetStatusFlags(node, statusValidateFailed)
|
||||
err := dag.index.flushToDB()
|
||||
|
||||
dbTx, err := dbaccess.NewTx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = dag.index.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -577,7 +585,7 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
|
||||
}
|
||||
|
||||
// Apply all changes to the DAG.
|
||||
virtualUTXODiff, virtualTxsAcceptanceData, chainUpdates, err := dag.applyDAGChanges(node, newBlockUTXO, newBlockMultiSet, selectedParentAnticone)
|
||||
virtualUTXODiff, chainUpdates, err := dag.applyDAGChanges(node, newBlockUTXO, newBlockMultiSet, selectedParentAnticone)
|
||||
if err != nil {
|
||||
// Since all validation logic has already ran, if applyDAGChanges errors out,
|
||||
// this means we have a problem in the internal structure of the DAG - a problem which is
|
||||
@@ -586,7 +594,7 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = dag.saveChangesFromBlock(block, virtualUTXODiff, txsAcceptanceData, virtualTxsAcceptanceData, newBlockFeeData)
|
||||
err = dag.saveChangesFromBlock(block, virtualUTXODiff, txsAcceptanceData, newBlockFeeData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -715,81 +723,86 @@ func addTxToMultiset(ms *secp256k1.MultiSet, tx *wire.MsgTx, pastUTXO UTXOSet, b
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UTXODiff,
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData,
|
||||
feeData compactFeeData) error {
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData compactFeeData) error {
|
||||
|
||||
// Atomically insert info into the database.
|
||||
err := dag.db.Update(func(dbTx database.Tx) error {
|
||||
err := dag.index.flushToDBWithTx(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dag.utxoDiffStore.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dag.reachabilityStore.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dag.multisetStore.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update best block state.
|
||||
state := &dagState{
|
||||
TipHashes: dag.TipHashes(),
|
||||
LastFinalityPoint: dag.lastFinalityPoint.hash,
|
||||
}
|
||||
err = dbPutDAGState(dbTx, state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the UTXO set using the diffSet that was melded into the
|
||||
// full UTXO set.
|
||||
err = dbPutUTXODiff(dbTx, virtualUTXODiff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Scan all accepted transactions and register any subnetwork registry
|
||||
// transaction. If any subnetwork registry transaction is not well-formed,
|
||||
// fail the entire block.
|
||||
err = registerSubnetworks(dbTx, block.Transactions())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockID, err := createBlockID(dbTx, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Allow the index manager to call each of the currently active
|
||||
// optional indexes with the block being connected so they can
|
||||
// update themselves accordingly.
|
||||
if dag.indexManager != nil {
|
||||
err := dag.indexManager.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the fee data into the database
|
||||
return dbStoreFeeData(dbTx, block.Hash(), feeData)
|
||||
})
|
||||
dbTx, err := dbaccess.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
|
||||
err = dag.index.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dag.utxoDiffStore.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dag.reachabilityStore.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dag.multisetStore.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update DAG state.
|
||||
state := &dagState{
|
||||
TipHashes: dag.TipHashes(),
|
||||
LastFinalityPoint: dag.lastFinalityPoint.hash,
|
||||
LocalSubnetworkID: dag.subnetworkID,
|
||||
}
|
||||
err = saveDAGState(dbTx, state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the UTXO set using the diffSet that was melded into the
|
||||
// full UTXO set.
|
||||
err = updateUTXOSet(dbTx, virtualUTXODiff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Scan all accepted transactions and register any subnetwork registry
|
||||
// transaction. If any subnetwork registry transaction is not well-formed,
|
||||
// fail the entire block.
|
||||
err = registerSubnetworks(dbTx, block.Transactions())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Allow the index manager to call each of the currently active
|
||||
// optional indexes with the block being connected so they can
|
||||
// update themselves accordingly.
|
||||
if dag.indexManager != nil {
|
||||
err := dag.indexManager.ConnectBlock(dbTx, block.Hash(), txsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the fee data into the database
|
||||
err = dbaccess.StoreFeeData(dbTx, block.Hash(), feeData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dag.index.clearDirtyEntries()
|
||||
dag.utxoDiffStore.clearDirtyEntries()
|
||||
dag.reachabilityStore.clearDirtyEntries()
|
||||
dag.multisetStore.clearNewEntries()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -813,7 +826,7 @@ func (dag *BlockDAG) validateGasLimit(block *util.Block) error {
|
||||
if !msgTx.SubnetworkID.IsEqual(currentSubnetworkID) {
|
||||
currentSubnetworkID = &msgTx.SubnetworkID
|
||||
currentGasUsage = 0
|
||||
currentSubnetworkGasLimit, err = dag.SubnetworkStore.GasLimit(currentSubnetworkID)
|
||||
currentSubnetworkGasLimit, err = GasLimit(currentSubnetworkID)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error getting gas limit for subnetworkID '%s': %s", currentSubnetworkID, err)
|
||||
}
|
||||
@@ -911,9 +924,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
|
||||
}
|
||||
}
|
||||
if deleteDiffData {
|
||||
err := dag.db.Update(func(dbTx database.Tx) error {
|
||||
return dag.utxoDiffStore.removeBlocksDiffData(dbTx, blockHashesToDelete)
|
||||
})
|
||||
err := dag.utxoDiffStore.removeBlocksDiffData(dbaccess.NoTx(), blockHashesToDelete)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error removing diff data from utxoDiffStore: %s", err))
|
||||
}
|
||||
@@ -996,34 +1007,33 @@ func (dag *BlockDAG) TxsAcceptedByBlockHash(blockHash *daghash.Hash) (MultiBlock
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, newBlockMultiset *secp256k1.MultiSet, selectedParentAnticone []*blockNode) (
|
||||
virtualUTXODiff *UTXODiff, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData,
|
||||
chainUpdates *chainUpdates, err error) {
|
||||
virtualUTXODiff *UTXODiff, chainUpdates *chainUpdates, err error) {
|
||||
|
||||
// Add the block to the reachability structures
|
||||
err = dag.updateReachability(node, selectedParentAnticone)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "failed updating reachability")
|
||||
return nil, nil, errors.Wrap(err, "failed updating reachability")
|
||||
}
|
||||
|
||||
dag.multisetStore.setMultiset(node, newBlockMultiset)
|
||||
|
||||
if err = node.updateParents(dag, newBlockUTXO); err != nil {
|
||||
return nil, nil, nil, errors.Wrapf(err, "failed updating parents of %s", node)
|
||||
return nil, nil, errors.Wrapf(err, "failed updating parents of %s", node)
|
||||
}
|
||||
|
||||
// Update the virtual block's parents (the DAG tips) to include the new block.
|
||||
chainUpdates = dag.virtual.AddTip(node)
|
||||
|
||||
// Build a UTXO set for the new virtual block
|
||||
newVirtualUTXO, _, virtualTxsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
|
||||
newVirtualUTXO, _, _, err := dag.pastUTXO(&dag.virtual.blockNode)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "could not restore past UTXO for virtual")
|
||||
return nil, nil, errors.Wrap(err, "could not restore past UTXO for virtual")
|
||||
}
|
||||
|
||||
// Apply new utxoDiffs to all the tips
|
||||
err = updateTipsUTXO(dag, newVirtualUTXO)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "failed updating the tips' UTXO")
|
||||
return nil, nil, errors.Wrap(err, "failed updating the tips' UTXO")
|
||||
}
|
||||
|
||||
// It is now safe to meld the UTXO set to base.
|
||||
@@ -1031,7 +1041,7 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, newB
|
||||
virtualUTXODiff = diffSet.UTXODiff
|
||||
err = dag.meldVirtualUTXO(diffSet)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "failed melding the virtual UTXO")
|
||||
return nil, nil, errors.Wrap(err, "failed melding the virtual UTXO")
|
||||
}
|
||||
|
||||
dag.index.SetStatusFlags(node, statusValid)
|
||||
@@ -1039,7 +1049,7 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, newB
|
||||
// And now we can update the finality point of the DAG (if required)
|
||||
dag.updateFinalityPoint()
|
||||
|
||||
return virtualUTXODiff, virtualTxsAcceptanceData, chainUpdates, nil
|
||||
return virtualUTXODiff, chainUpdates, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) meldVirtualUTXO(newVirtualUTXODiffSet *DiffUTXOSet) error {
|
||||
@@ -1151,21 +1161,17 @@ func genesisPastUTXO(virtual *virtualBlock) UTXOSet {
|
||||
return genesisPastUTXO
|
||||
}
|
||||
|
||||
func (node *blockNode) fetchBlueBlocks(db database.DB) ([]*util.Block, error) {
|
||||
func (node *blockNode) fetchBlueBlocks() ([]*util.Block, error) {
|
||||
blueBlocks := make([]*util.Block, len(node.blues))
|
||||
err := db.View(func(dbTx database.Tx) error {
|
||||
for i, blueBlockNode := range node.blues {
|
||||
blueBlock, err := dbFetchBlockByNode(dbTx, blueBlockNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blueBlocks[i] = blueBlock
|
||||
for i, blueBlockNode := range node.blues {
|
||||
blueBlock, err := fetchBlockByHash(dbaccess.NoTx(), blueBlockNode.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
return blueBlocks, err
|
||||
blueBlocks[i] = blueBlock
|
||||
}
|
||||
return blueBlocks, nil
|
||||
}
|
||||
|
||||
// applyBlueBlocks adds all transactions in the blue blocks to the selectedParent's UTXO set
|
||||
@@ -1272,7 +1278,7 @@ func (dag *BlockDAG) pastUTXO(node *blockNode) (
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
blueBlocks, err := node.fetchBlueBlocks(dag.db)
|
||||
blueBlocks, err := node.fetchBlueBlocks()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
@@ -1935,6 +1941,21 @@ func (dag *BlockDAG) SubnetworkID() *subnetworkid.SubnetworkID {
|
||||
return dag.subnetworkID
|
||||
}
|
||||
|
||||
// ForEachHash runs the given fn on every hash that's currently known to
|
||||
// the DAG.
|
||||
//
|
||||
// This function is NOT safe for concurrent access. It is meant to be
|
||||
// used either on initialization or when the dag lock is held for reads.
|
||||
func (dag *BlockDAG) ForEachHash(fn func(hash daghash.Hash) error) error {
|
||||
for hash := range dag.index.index {
|
||||
err := fn(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) addDelayedBlock(block *util.Block, delay time.Duration) error {
|
||||
processTime := dag.Now().Add(delay)
|
||||
log.Debugf("Adding block to delayed blocks queue (block hash: %s, process time: %s)", block.Hash().String(), processTime)
|
||||
@@ -1988,25 +2009,16 @@ func (dag *BlockDAG) peekDelayedBlock() *delayedBlock {
|
||||
// connected to the DAG for the purpose of supporting optional indexes.
|
||||
type IndexManager interface {
|
||||
// Init is invoked during DAG initialize in order to allow the index
|
||||
// manager to initialize itself and any indexes it is managing. The
|
||||
// channel parameter specifies a channel the caller can close to signal
|
||||
// that the process should be interrupted. It can be nil if that
|
||||
// behavior is not desired.
|
||||
Init(database.DB, *BlockDAG, <-chan struct{}) error
|
||||
// manager to initialize itself and any indexes it is managing.
|
||||
Init(*BlockDAG) error
|
||||
|
||||
// ConnectBlock is invoked when a new block has been connected to the
|
||||
// DAG.
|
||||
ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *BlockDAG, acceptedTxsData MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData) error
|
||||
ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash, acceptedTxsData MultiBlockTxsAcceptanceData) error
|
||||
}
|
||||
|
||||
// Config is a descriptor which specifies the blockDAG instance configuration.
|
||||
type Config struct {
|
||||
// DB defines the database which houses the blocks and will be used to
|
||||
// store all metadata created by this package such as the utxo set.
|
||||
//
|
||||
// This field is required.
|
||||
DB database.DB
|
||||
|
||||
// Interrupt specifies a channel the caller can close to signal that
|
||||
// long running operations, such as catching up indexes or performing
|
||||
// database migrations, should be interrupted.
|
||||
@@ -2050,9 +2062,6 @@ type Config struct {
|
||||
// New returns a BlockDAG instance using the provided configuration details.
|
||||
func New(config *Config) (*BlockDAG, error) {
|
||||
// Enforce required config fields.
|
||||
if config.DB == nil {
|
||||
return nil, AssertError("BlockDAG.New database is nil")
|
||||
}
|
||||
if config.DAGParams == nil {
|
||||
return nil, AssertError("BlockDAG.New DAG parameters nil")
|
||||
}
|
||||
@@ -2063,9 +2072,8 @@ func New(config *Config) (*BlockDAG, error) {
|
||||
params := config.DAGParams
|
||||
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
|
||||
|
||||
index := newBlockIndex(config.DB, params)
|
||||
index := newBlockIndex(params)
|
||||
dag := &BlockDAG{
|
||||
db: config.DB,
|
||||
dagParams: params,
|
||||
timeSource: config.TimeSource,
|
||||
sigCache: config.SigCache,
|
||||
@@ -2082,7 +2090,6 @@ func New(config *Config) (*BlockDAG, error) {
|
||||
warningCaches: newThresholdCaches(vbNumBits),
|
||||
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
|
||||
blockCount: 0,
|
||||
SubnetworkStore: newSubnetworkStore(config.DB),
|
||||
subnetworkID: config.SubnetworkID,
|
||||
}
|
||||
|
||||
@@ -2098,19 +2105,11 @@ func New(config *Config) (*BlockDAG, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err := dag.removeDAGState()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Couldn't remove the DAG State: %s", err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Initialize and catch up all of the currently active optional indexes
|
||||
// as needed.
|
||||
if config.IndexManager != nil {
|
||||
err = config.IndexManager.Init(dag.db, dag, config.Interrupt)
|
||||
err = config.IndexManager.Init(dag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -13,7 +14,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
@@ -40,7 +40,7 @@ func TestBlockCount(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestBlockCount", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestBlockCount", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -93,7 +93,7 @@ func TestIsKnownBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("haveblock", Config{
|
||||
dag, teardownFunc, err := DAGSetup("haveblock", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -550,17 +550,16 @@ func TestNew(t *testing.T) {
|
||||
|
||||
dbPath := filepath.Join(tempDir, "TestNew")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
db, err := database.Create(testDbType, dbPath, blockDataNet)
|
||||
err := dbaccess.Open(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
db.Close()
|
||||
dbaccess.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
}()
|
||||
config := &Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
DB: db,
|
||||
TimeSource: NewTimeSource(),
|
||||
SigCache: txscript.NewSigCache(1000),
|
||||
}
|
||||
@@ -590,19 +589,18 @@ func TestAcceptingInInit(t *testing.T) {
|
||||
// Create a test database
|
||||
dbPath := filepath.Join(tempDir, "TestAcceptingInInit")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
db, err := database.Create(testDbType, dbPath, blockDataNet)
|
||||
err := dbaccess.Open(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
db.Close()
|
||||
dbaccess.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
}()
|
||||
|
||||
// Create a DAG to add the test block into
|
||||
config := &Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
DB: db,
|
||||
TimeSource: NewTimeSource(),
|
||||
SigCache: txscript.NewSigCache(1000),
|
||||
}
|
||||
@@ -625,16 +623,30 @@ func TestAcceptingInInit(t *testing.T) {
|
||||
testNode.status = statusDataStored
|
||||
|
||||
// Manually add the test block to the database
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
err := dbStoreBlock(dbTx, testBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dbStoreBlockNode(dbTx, testNode)
|
||||
})
|
||||
dbTx, err := dbaccess.NewTx()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open database "+
|
||||
"transaction: %s", err)
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = storeBlock(dbTx, testBlock)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to store block: %s", err)
|
||||
}
|
||||
dbTestNode, err := serializeBlockNode(testNode)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to serialize blockNode: %s", err)
|
||||
}
|
||||
key := blockIndexKey(testNode.hash, testNode.blueScore)
|
||||
err = dbaccess.StoreIndexBlock(dbTx, key, dbTestNode)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update block index: %s", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to commit database "+
|
||||
"transaction: %s", err)
|
||||
}
|
||||
|
||||
// Create a new DAG. We expect this DAG to process the
|
||||
// test node
|
||||
@@ -654,7 +666,7 @@ func TestConfirmations(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestConfirmations", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestConfirmations", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -757,7 +769,7 @@ func TestAcceptingBlock(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 3
|
||||
dag, teardownFunc, err := DAGSetup("TestAcceptingBlock", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestAcceptingBlock", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -887,7 +899,7 @@ func TestFinalizeNodesBelowFinalityPoint(t *testing.T) {
|
||||
func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("testFinalizeNodesBelowFinalityPoint", Config{
|
||||
dag, teardownFunc, err := DAGSetup("testFinalizeNodesBelowFinalityPoint", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -899,13 +911,20 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
|
||||
blockTime := dag.genesis.Header().Timestamp
|
||||
|
||||
flushUTXODiffStore := func() {
|
||||
err := dag.db.Update(func(dbTx database.Tx) error {
|
||||
return dag.utxoDiffStore.flushToDB(dbTx)
|
||||
})
|
||||
dbTx, err := dbaccess.NewTx()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open database transaction: %s", err)
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = dag.utxoDiffStore.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
|
||||
}
|
||||
dag.utxoDiffStore.clearDirtyEntries()
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to commit database transaction: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
addNode := func(parent *blockNode) *blockNode {
|
||||
@@ -946,12 +965,22 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
|
||||
} else if !deleteDiffData && !ok {
|
||||
t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData)
|
||||
}
|
||||
if diffData, err := dag.utxoDiffStore.diffDataFromDB(node.hash); err != nil {
|
||||
|
||||
_, err := dag.utxoDiffStore.diffDataFromDB(node.hash)
|
||||
exists := !dbaccess.IsNotFoundError(err)
|
||||
if exists && err != nil {
|
||||
t.Errorf("diffDataFromDB: %s", err)
|
||||
} else if deleteDiffData && diffData != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if deleteDiffData && exists {
|
||||
t.Errorf("The diff data of node with blue score %d should have been deleted from the database if deleteDiffData is %T", node.blueScore, deleteDiffData)
|
||||
} else if !deleteDiffData && diffData == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !deleteDiffData && !exists {
|
||||
t.Errorf("The diff data of node with blue score %d shouldn't have been deleted from the database if deleteDiffData is %T", node.blueScore, deleteDiffData)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -972,7 +1001,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
|
||||
|
||||
func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
dag, teardownFunc, err := DAGSetup("TestDAGIndexFailedStatus", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestDAGIndexFailedStatus", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -9,68 +9,22 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/buffers"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/binaryserializer"
|
||||
"github.com/kaspanet/kaspad/util/buffers"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// blockHdrSize is the size of a block header. This is simply the
|
||||
// constant from wire and is only provided here for convenience since
|
||||
// wire.MaxBlockHeaderPayload is quite long.
|
||||
blockHdrSize = wire.MaxBlockHeaderPayload
|
||||
|
||||
// latestUTXOSetBucketVersion is the current version of the UTXO set
|
||||
// bucket that is used to track all unspent outputs.
|
||||
latestUTXOSetBucketVersion = 1
|
||||
)
|
||||
|
||||
var (
|
||||
// blockIndexBucketName is the name of the database bucket used to house the
|
||||
// block headers and contextual information.
|
||||
blockIndexBucketName = []byte("blockheaderidx")
|
||||
|
||||
// dagStateKeyName is the name of the db key used to store the DAG
|
||||
// tip hashes.
|
||||
dagStateKeyName = []byte("dagstate")
|
||||
|
||||
// utxoSetVersionKeyName is the name of the db key used to store the
|
||||
// version of the utxo set currently in the database.
|
||||
utxoSetVersionKeyName = []byte("utxosetversion")
|
||||
|
||||
// utxoSetBucketName is the name of the database bucket used to house the
|
||||
// unspent transaction output set.
|
||||
utxoSetBucketName = []byte("utxoset")
|
||||
|
||||
// utxoDiffsBucketName is the name of the database bucket used to house the
|
||||
// diffs and diff children of blocks.
|
||||
utxoDiffsBucketName = []byte("utxodiffs")
|
||||
|
||||
// reachabilityDataBucketName is the name of the database bucket used to house the
|
||||
// reachability tree nodes and future covering sets of blocks.
|
||||
reachabilityDataBucketName = []byte("reachability")
|
||||
|
||||
// multisetBucketName is the name of the database bucket used to house the
|
||||
// ECMH multisets of blocks.
|
||||
multisetBucketName = []byte("multiset")
|
||||
|
||||
// subnetworksBucketName is the name of the database bucket used to store the
|
||||
// subnetwork registry.
|
||||
subnetworksBucketName = []byte("subnetworks")
|
||||
|
||||
// localSubnetworkKeyName is the name of the db key used to store the
|
||||
// node's local subnetwork ID.
|
||||
localSubnetworkKeyName = []byte("localsubnetworkidkey")
|
||||
|
||||
// byteOrder is the preferred byte order used for serializing numeric
|
||||
// fields for storage in the database.
|
||||
byteOrder = binary.LittleEndian
|
||||
@@ -92,15 +46,6 @@ func isNotInDAGErr(err error) bool {
|
||||
return errors.As(err, ¬InDAGErr)
|
||||
}
|
||||
|
||||
// dbPutVersion uses an existing database transaction to update the provided
|
||||
// key in the metadata bucket to the given version. It is primarily used to
|
||||
// track versions on entities such as buckets.
|
||||
func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
|
||||
var serialized [4]byte
|
||||
byteOrder.PutUint32(serialized[:], version)
|
||||
return dbTx.Metadata().Put(key, serialized[:])
|
||||
}
|
||||
|
||||
// outpointKeyPool defines a concurrent safe free list of byte buffers used to
|
||||
// provide temporary buffers for outpoint database keys.
|
||||
var outpointKeyPool = sync.Pool{
|
||||
@@ -143,13 +88,10 @@ func deserializeOutpoint(r io.Reader) (*wire.Outpoint, error) {
|
||||
return outpoint, nil
|
||||
}
|
||||
|
||||
// dbPutUTXODiff uses an existing database transaction to update the UTXO set
|
||||
// in the database based on the provided UTXO view contents and state. In
|
||||
// particular, only the entries that have been marked as modified are written
|
||||
// to the database.
|
||||
func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
|
||||
utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
|
||||
for outpoint := range diff.toRemove {
|
||||
// updateUTXOSet updates the UTXO set in the database based on the provided
|
||||
// UTXO diff.
|
||||
func updateUTXOSet(dbContext dbaccess.Context, virtualUTXODiff *UTXODiff) error {
|
||||
for outpoint := range virtualUTXODiff.toRemove {
|
||||
w := outpointKeyPool.Get().(*bytes.Buffer)
|
||||
w.Reset()
|
||||
err := serializeOutpoint(w, &outpoint)
|
||||
@@ -158,7 +100,7 @@ func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
|
||||
}
|
||||
|
||||
key := w.Bytes()
|
||||
err = utxoBucket.Delete(key)
|
||||
err = dbaccess.RemoveFromUTXOSet(dbContext, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -167,9 +109,9 @@ func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
|
||||
|
||||
// We are preallocating for P2PKH entries because they are the most common ones.
|
||||
// If we have entries with a compressed script bigger than P2PKH's, the buffer will grow.
|
||||
bytesToPreallocate := (p2pkhUTXOEntrySerializeSize + outpointSerializeSize) * len(diff.toAdd)
|
||||
bytesToPreallocate := (p2pkhUTXOEntrySerializeSize + outpointSerializeSize) * len(virtualUTXODiff.toAdd)
|
||||
buff := bytes.NewBuffer(make([]byte, bytesToPreallocate))
|
||||
for outpoint, entry := range diff.toAdd {
|
||||
for outpoint, entry := range virtualUTXODiff.toAdd {
|
||||
// Serialize and store the UTXO entry.
|
||||
sBuff := buffers.NewSubBuffer(buff)
|
||||
err := serializeUTXOEntry(sBuff, entry)
|
||||
@@ -185,11 +127,7 @@ func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
|
||||
}
|
||||
|
||||
key := sBuff.Bytes()
|
||||
err = utxoBucket.Put(key, serializedEntry)
|
||||
// NOTE: The key is intentionally not recycled here since the
|
||||
// database interface contract prohibits modifications. It will
|
||||
// be garbage collected normally when the database is done with
|
||||
// it.
|
||||
err = dbaccess.AddToUTXOSet(dbContext, key, serializedEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -201,6 +139,7 @@ func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
|
||||
type dagState struct {
|
||||
TipHashes []*daghash.Hash
|
||||
LastFinalityPoint *daghash.Hash
|
||||
LocalSubnetworkID *subnetworkid.SubnetworkID
|
||||
}
|
||||
|
||||
// serializeDAGState returns the serialization of the DAG state.
|
||||
@@ -216,366 +155,220 @@ func deserializeDAGState(serializedData []byte) (*dagState, error) {
|
||||
var state *dagState
|
||||
err := json.Unmarshal(serializedData, &state)
|
||||
if err != nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: "corrupt DAG state",
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// dbPutDAGState uses an existing database transaction to store the latest
|
||||
// saveDAGState uses an existing database context to store the latest
|
||||
// tip hashes of the DAG.
|
||||
func dbPutDAGState(dbTx database.Tx, state *dagState) error {
|
||||
serializedData, err := serializeDAGState(state)
|
||||
|
||||
func saveDAGState(dbContext dbaccess.Context, state *dagState) error {
|
||||
serializedDAGState, err := serializeDAGState(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dbTx.Metadata().Put(dagStateKeyName, serializedData)
|
||||
return dbaccess.StoreDAGState(dbContext, serializedDAGState)
|
||||
}
|
||||
|
||||
// createDAGState initializes both the database and the DAG state to the
|
||||
// genesis block. This includes creating the necessary buckets, so it
|
||||
// must only be called on an uninitialized database.
|
||||
func (dag *BlockDAG) createDAGState() error {
|
||||
// Create the initial the database DAG state including creating the
|
||||
// necessary index buckets and inserting the genesis block.
|
||||
err := dag.db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
|
||||
// Create the bucket that houses the block index data.
|
||||
_, err := meta.CreateBucket(blockIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the buckets that house the utxo set, the utxo diffs, and their
|
||||
// version.
|
||||
_, err = meta.CreateBucket(utxoSetBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = meta.CreateBucket(utxoDiffsBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = meta.CreateBucket(reachabilityDataBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = meta.CreateBucket(multisetBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbPutVersion(dbTx, utxoSetVersionKeyName,
|
||||
latestUTXOSetBucketVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the bucket that houses the registered subnetworks.
|
||||
_, err = meta.CreateBucket(subnetworksBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := dbPutLocalSubnetworkID(dbTx, dag.subnetworkID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := meta.CreateBucketIfNotExists(idByHashIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucketIfNotExists(hashByIDIndexBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
// createDAGState initializes the DAG state to the
|
||||
// genesis block and the node's local subnetwork id.
|
||||
func (dag *BlockDAG) createDAGState(localSubnetworkID *subnetworkid.SubnetworkID) error {
|
||||
return saveDAGState(dbaccess.NoTx(), &dagState{
|
||||
TipHashes: []*daghash.Hash{dag.dagParams.GenesisHash},
|
||||
LastFinalityPoint: dag.dagParams.GenesisHash,
|
||||
LocalSubnetworkID: localSubnetworkID,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) removeDAGState() error {
|
||||
err := dag.db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
|
||||
err := meta.DeleteBucket(blockIndexBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(utxoSetBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(utxoDiffsBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(reachabilityDataBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(multisetBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Metadata().Delete(utxoSetVersionKeyName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = meta.DeleteBucket(subnetworksBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Metadata().Delete(localSubnetworkKeyName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dbPutLocalSubnetworkID(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID) error {
|
||||
if subnetworkID == nil {
|
||||
return dbTx.Metadata().Put(localSubnetworkKeyName, []byte{})
|
||||
}
|
||||
return dbTx.Metadata().Put(localSubnetworkKeyName, subnetworkID[:])
|
||||
}
|
||||
|
||||
// initDAGState attempts to load and initialize the DAG state from the
|
||||
// database. When the db does not yet contain any DAG state, both it and the
|
||||
// DAG state are initialized to the genesis block.
|
||||
func (dag *BlockDAG) initDAGState() error {
|
||||
// Determine the state of the DAG database. We may need to initialize
|
||||
// everything from scratch or upgrade certain buckets.
|
||||
var initialized bool
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
initialized = dbTx.Metadata().Get(dagStateKeyName) != nil
|
||||
if initialized {
|
||||
var localSubnetworkID *subnetworkid.SubnetworkID
|
||||
localSubnetworkIDBytes := dbTx.Metadata().Get(localSubnetworkKeyName)
|
||||
if len(localSubnetworkIDBytes) != 0 {
|
||||
localSubnetworkID = &subnetworkid.SubnetworkID{}
|
||||
localSubnetworkID.SetBytes(localSubnetworkIDBytes)
|
||||
}
|
||||
if !localSubnetworkID.IsEqual(dag.subnetworkID) {
|
||||
return errors.Errorf("Cannot start kaspad with subnetwork ID %s because"+
|
||||
" its database is already built with subnetwork ID %s. If you"+
|
||||
" want to switch to a new database, please reset the"+
|
||||
" database by starting kaspad with --reset-db flag", dag.subnetworkID, localSubnetworkID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
// Fetch the stored DAG state from the database. If it doesn't exist,
|
||||
// it means that kaspad is running for the first time.
|
||||
serializedDAGState, err := dbaccess.FetchDAGState(dbaccess.NoTx())
|
||||
if dbaccess.IsNotFoundError(err) {
|
||||
// Initialize the database and the DAG state to the genesis block.
|
||||
return dag.createDAGState(dag.subnetworkID)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !initialized {
|
||||
// At this point the database has not already been initialized, so
|
||||
// initialize both it and the DAG state to the genesis block.
|
||||
return dag.createDAGState()
|
||||
dagState, err := deserializeDAGState(serializedDAGState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !dagState.LocalSubnetworkID.IsEqual(dag.subnetworkID) {
|
||||
return errors.Errorf("Cannot start kaspad with subnetwork ID %s because"+
|
||||
" its database is already built with subnetwork ID %s. If you"+
|
||||
" want to switch to a new database, please reset the"+
|
||||
" database by starting kaspad with --reset-db flag", dag.subnetworkID, dagState.LocalSubnetworkID)
|
||||
}
|
||||
|
||||
// Attempt to load the DAG state from the database.
|
||||
return dag.db.View(func(dbTx database.Tx) error {
|
||||
// Fetch the stored DAG tipHashes from the database metadata.
|
||||
// When it doesn't exist, it means the database hasn't been
|
||||
// initialized for use with the DAG yet, so break out now to allow
|
||||
// that to happen under a writable database transaction.
|
||||
serializedData := dbTx.Metadata().Get(dagStateKeyName)
|
||||
log.Tracef("Serialized DAG tip hashes: %x", serializedData)
|
||||
state, err := deserializeDAGState(serializedData)
|
||||
log.Debugf("Loading block index...")
|
||||
var unprocessedBlockNodes []*blockNode
|
||||
blockIndexCursor, err := dbaccess.BlockIndexCursor(dbaccess.NoTx())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer blockIndexCursor.Close()
|
||||
for blockIndexCursor.Next() {
|
||||
serializedDBNode, err := blockIndexCursor.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node, err := dag.deserializeBlockNode(serializedDBNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load all of the headers from the data for the known DAG
|
||||
// and construct the block index accordingly. Since the
|
||||
// number of nodes are already known, perform a single alloc
|
||||
// for them versus a whole bunch of little ones to reduce
|
||||
// pressure on the GC.
|
||||
log.Infof("Loading block index...")
|
||||
// Check to see if this node had been stored in the the block DB
|
||||
// but not yet accepted. If so, add it to a slice to be processed later.
|
||||
if node.status == statusDataStored {
|
||||
unprocessedBlockNodes = append(unprocessedBlockNodes, node)
|
||||
continue
|
||||
}
|
||||
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
|
||||
var unprocessedBlockNodes []*blockNode
|
||||
cursor := blockIndexBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
node, err := dag.deserializeBlockNode(cursor.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check to see if this node had been stored in the the block DB
|
||||
// but not yet accepted. If so, add it to a slice to be processed later.
|
||||
if node.status == statusDataStored {
|
||||
unprocessedBlockNodes = append(unprocessedBlockNodes, node)
|
||||
continue
|
||||
}
|
||||
|
||||
// If the node is known to be invalid add it as-is to the block
|
||||
// index and continue.
|
||||
if node.status.KnownInvalid() {
|
||||
dag.index.addNode(node)
|
||||
continue
|
||||
}
|
||||
|
||||
if dag.blockCount == 0 {
|
||||
if !node.hash.IsEqual(dag.dagParams.GenesisHash) {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Expected "+
|
||||
"first entry in block index to be genesis block, "+
|
||||
"found %s", node.hash))
|
||||
}
|
||||
} else {
|
||||
if len(node.parents) == 0 {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Could "+
|
||||
"not find any parent for block %s", node.hash))
|
||||
}
|
||||
}
|
||||
|
||||
// Add the node to its parents children, connect it,
|
||||
// and add it to the block index.
|
||||
node.updateParentsChildren()
|
||||
// If the node is known to be invalid add it as-is to the block
|
||||
// index and continue.
|
||||
if node.status.KnownInvalid() {
|
||||
dag.index.addNode(node)
|
||||
|
||||
dag.blockCount++
|
||||
continue
|
||||
}
|
||||
|
||||
// Load all of the known UTXO entries and construct the full
|
||||
// UTXO set accordingly. Since the number of entries is already
|
||||
// known, perform a single alloc for them versus a whole bunch
|
||||
// of little ones to reduce pressure on the GC.
|
||||
log.Infof("Loading UTXO set...")
|
||||
|
||||
utxoEntryBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
|
||||
|
||||
// Determine how many UTXO entries will be loaded into the index so we can
|
||||
// allocate the right amount.
|
||||
var utxoEntryCount int32
|
||||
cursor = utxoEntryBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
utxoEntryCount++
|
||||
}
|
||||
|
||||
fullUTXOCollection := make(utxoCollection, utxoEntryCount)
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
// Deserialize the outpoint
|
||||
outpoint, err := deserializeOutpoint(bytes.NewReader(cursor.Key()))
|
||||
if err != nil {
|
||||
return err
|
||||
if dag.blockCount == 0 {
|
||||
if !node.hash.IsEqual(dag.dagParams.GenesisHash) {
|
||||
return AssertError(fmt.Sprintf("initDAGState: Expected "+
|
||||
"first entry in block index to be genesis block, "+
|
||||
"found %s", node.hash))
|
||||
}
|
||||
|
||||
// Deserialize the utxo entry
|
||||
entry, err := deserializeUTXOEntry(bytes.NewReader(cursor.Value()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fullUTXOCollection[*outpoint] = entry
|
||||
}
|
||||
|
||||
// Initialize the reachability store
|
||||
log.Infof("Loading reachability data...")
|
||||
err = dag.reachabilityStore.init(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize the multiset store
|
||||
log.Infof("Loading multiset data...")
|
||||
err = dag.multisetStore.init(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Apply the loaded utxoCollection to the virtual block.
|
||||
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
|
||||
if err != nil {
|
||||
return AssertError(fmt.Sprintf("Error loading UTXOSet: %s", err))
|
||||
}
|
||||
|
||||
// Apply the stored tips to the virtual block.
|
||||
tips := newBlockSet()
|
||||
for _, tipHash := range state.TipHashes {
|
||||
tip := dag.index.LookupNode(tipHash)
|
||||
if tip == nil {
|
||||
return AssertError(fmt.Sprintf("initDAGState: cannot find "+
|
||||
"DAG tip %s in block index", state.TipHashes))
|
||||
}
|
||||
tips.add(tip)
|
||||
}
|
||||
dag.virtual.SetTips(tips)
|
||||
|
||||
// Set the last finality point
|
||||
dag.lastFinalityPoint = dag.index.LookupNode(state.LastFinalityPoint)
|
||||
dag.finalizeNodesBelowFinalityPoint(false)
|
||||
|
||||
// Go over any unprocessed blockNodes and process them now.
|
||||
for _, node := range unprocessedBlockNodes {
|
||||
// Check to see if the block exists in the block DB. If it
|
||||
// doesn't, the database has certainly been corrupted.
|
||||
blockExists, err := dbTx.HasBlock(node.hash)
|
||||
if err != nil {
|
||||
return AssertError(fmt.Sprintf("initDAGState: HasBlock "+
|
||||
"for block %s failed: %s", node.hash, err))
|
||||
}
|
||||
if !blockExists {
|
||||
} else {
|
||||
if len(node.parents) == 0 {
|
||||
return AssertError(fmt.Sprintf("initDAGState: block %s "+
|
||||
"exists in block index but not in block db", node.hash))
|
||||
}
|
||||
|
||||
// Attempt to accept the block.
|
||||
block, err := dbFetchBlockByNode(dbTx, node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(block, BFWasStored)
|
||||
if err != nil {
|
||||
log.Warnf("Block %s, which was not previously processed, "+
|
||||
"failed to be accepted to the DAG: %s", node.hash, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// If the block is an orphan or is delayed then it couldn't have
|
||||
// possibly been written to the block index in the first place.
|
||||
if isOrphan {
|
||||
return AssertError(fmt.Sprintf("Block %s, which was not "+
|
||||
"previously processed, turned out to be an orphan, which is "+
|
||||
"impossible.", node.hash))
|
||||
}
|
||||
if isDelayed {
|
||||
return AssertError(fmt.Sprintf("Block %s, which was not "+
|
||||
"previously processed, turned out to be delayed, which is "+
|
||||
"impossible.", node.hash))
|
||||
"has no parents but it's not the genesis block", node.hash))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
// Add the node to its parents children, connect it,
|
||||
// and add it to the block index.
|
||||
node.updateParentsChildren()
|
||||
dag.index.addNode(node)
|
||||
|
||||
dag.blockCount++
|
||||
}
|
||||
|
||||
log.Debugf("Loading UTXO set...")
|
||||
fullUTXOCollection := make(utxoCollection)
|
||||
cursor, err := dbaccess.UTXOSetCursor(dbaccess.NoTx())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
for cursor.Next() {
|
||||
// Deserialize the outpoint
|
||||
key, err := cursor.Key()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outpoint, err := deserializeOutpoint(bytes.NewReader(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Deserialize the utxo entry
|
||||
value, err := cursor.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entry, err := deserializeUTXOEntry(bytes.NewReader(value))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fullUTXOCollection[*outpoint] = entry
|
||||
}
|
||||
|
||||
log.Debugf("Loading reachability data...")
|
||||
err = dag.reachabilityStore.init(dbaccess.NoTx())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Loading multiset data...")
|
||||
err = dag.multisetStore.init(dbaccess.NoTx())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Applying the loaded utxoCollection to the virtual block...")
|
||||
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
|
||||
if err != nil {
|
||||
return AssertError(fmt.Sprintf("Error loading UTXOSet: %s", err))
|
||||
}
|
||||
|
||||
log.Debugf("Applying the stored tips to the virtual block...")
|
||||
tips := newBlockSet()
|
||||
for _, tipHash := range dagState.TipHashes {
|
||||
tip := dag.index.LookupNode(tipHash)
|
||||
if tip == nil {
|
||||
return AssertError(fmt.Sprintf("initDAGState: cannot find "+
|
||||
"DAG tip %s in block index", dagState.TipHashes))
|
||||
}
|
||||
tips.add(tip)
|
||||
}
|
||||
dag.virtual.SetTips(tips)
|
||||
|
||||
log.Debugf("Setting the last finality point...")
|
||||
dag.lastFinalityPoint = dag.index.LookupNode(dagState.LastFinalityPoint)
|
||||
dag.finalizeNodesBelowFinalityPoint(false)
|
||||
|
||||
log.Debugf("Processing unprocessed blockNodes...")
|
||||
for _, node := range unprocessedBlockNodes {
|
||||
// Check to see if the block exists in the block DB. If it
|
||||
// doesn't, the database has certainly been corrupted.
|
||||
blockExists, err := dbaccess.HasBlock(dbaccess.NoTx(), node.hash)
|
||||
if err != nil {
|
||||
return AssertError(fmt.Sprintf("initDAGState: HasBlock "+
|
||||
"for block %s failed: %s", node.hash, err))
|
||||
}
|
||||
if !blockExists {
|
||||
return AssertError(fmt.Sprintf("initDAGState: block %s "+
|
||||
"exists in block index but not in block db", node.hash))
|
||||
}
|
||||
|
||||
// Attempt to accept the block.
|
||||
block, err := fetchBlockByHash(dbaccess.NoTx(), node.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(block, BFWasStored)
|
||||
if err != nil {
|
||||
log.Warnf("Block %s, which was not previously processed, "+
|
||||
"failed to be accepted to the DAG: %s", node.hash, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// If the block is an orphan or is delayed then it couldn't have
|
||||
// possibly been written to the block index in the first place.
|
||||
if isOrphan {
|
||||
return AssertError(fmt.Sprintf("Block %s, which was not "+
|
||||
"previously processed, turned out to be an orphan, which is "+
|
||||
"impossible.", node.hash))
|
||||
}
|
||||
if isDelayed {
|
||||
return AssertError(fmt.Sprintf("Block %s, which was not "+
|
||||
"previously processed, turned out to be delayed, which is "+
|
||||
"impossible.", node.hash))
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("DAG state initialized.")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deserializeBlockNode parses a value in the block index bucket and returns a block node.
|
||||
@@ -671,26 +464,26 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// dbFetchBlockByNode uses an existing database transaction to retrieve the
|
||||
// raw block for the provided node, deserialize it, and return a util.Block
|
||||
// of it.
|
||||
func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error) {
|
||||
// Load the raw block bytes from the database.
|
||||
blockBytes, err := dbTx.FetchBlock(node.hash)
|
||||
// fetchBlockByHash retrieves the raw block for the provided hash,
|
||||
// deserializes it, and returns a util.Block of it.
|
||||
func fetchBlockByHash(dbContext dbaccess.Context, hash *daghash.Hash) (*util.Block, error) {
|
||||
blockBytes, err := dbaccess.FetchBlock(dbContext, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return util.NewBlockFromBytes(blockBytes)
|
||||
}
|
||||
|
||||
// Create the encapsulated block.
|
||||
block, err := util.NewBlockFromBytes(blockBytes)
|
||||
func storeBlock(dbContext *dbaccess.TxContext, block *util.Block) error {
|
||||
blockBytes, err := block.Bytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
return block, nil
|
||||
return dbaccess.StoreBlock(dbContext, block.Hash(), blockBytes)
|
||||
}
|
||||
|
||||
func serializeBlockNode(node *blockNode) ([]byte, error) {
|
||||
w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1))
|
||||
w := bytes.NewBuffer(make([]byte, 0, wire.MaxBlockHeaderPayload+1))
|
||||
header := node.Header()
|
||||
err := header.Serialize(w)
|
||||
if err != nil {
|
||||
@@ -747,37 +540,11 @@ func serializeBlockNode(node *blockNode) ([]byte, error) {
|
||||
return w.Bytes(), nil
|
||||
}
|
||||
|
||||
// dbStoreBlockNode stores the block node data into the block
|
||||
// index bucket. This overwrites the current entry if there exists one.
|
||||
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
|
||||
serializedNode, err := serializeBlockNode(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Write block header data to block index bucket.
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
key := BlockIndexKey(node.hash, node.blueScore)
|
||||
return blockIndexBucket.Put(key, serializedNode)
|
||||
}
|
||||
|
||||
// dbStoreBlock stores the provided block in the database if it is not already
|
||||
// there. The full block data is written to ffldb.
|
||||
func dbStoreBlock(dbTx database.Tx, block *util.Block) error {
|
||||
hasBlock, err := dbTx.HasBlock(block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hasBlock {
|
||||
return nil
|
||||
}
|
||||
return dbTx.StoreBlock(block)
|
||||
}
|
||||
|
||||
// BlockIndexKey generates the binary key for an entry in the block index
|
||||
// blockIndexKey generates the binary key for an entry in the block index
|
||||
// bucket. The key is composed of the block blue score encoded as a big-endian
|
||||
// 64-bit unsigned int followed by the 32 byte block hash.
|
||||
// The blue score component is important for iteration order.
|
||||
func BlockIndexKey(blockHash *daghash.Hash, blueScore uint64) []byte {
|
||||
func blockIndexKey(blockHash *daghash.Hash, blueScore uint64) []byte {
|
||||
indexKey := make([]byte, daghash.HashSize+8)
|
||||
binary.BigEndian.PutUint64(indexKey[0:8], blueScore)
|
||||
copy(indexKey[8:daghash.HashSize+8], blockHash[:])
|
||||
@@ -799,13 +566,10 @@ func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
|
||||
return nil, errNotInDAG(str)
|
||||
}
|
||||
|
||||
// Load the block from the database and return it.
|
||||
var block *util.Block
|
||||
err := dag.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
block, err = dbFetchBlockByNode(dbTx, node)
|
||||
return err
|
||||
})
|
||||
block, err := fetchBlockByHash(dbaccess.NoTx(), node.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return block, err
|
||||
}
|
||||
|
||||
@@ -830,27 +594,27 @@ func (dag *BlockDAG) BlockHashesFrom(lowHash *daghash.Hash, limit int) ([]*dagha
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dag.index.db.View(func(dbTx database.Tx) error {
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
lowKey := BlockIndexKey(lowHash, blueScore)
|
||||
|
||||
cursor := blockIndexBucket.Cursor()
|
||||
cursor.Seek(lowKey)
|
||||
for ok := cursor.Next(); ok; ok = cursor.Next() {
|
||||
key := cursor.Key()
|
||||
blockHash, err := blockHashFromBlockIndexKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHashes = append(blockHashes, blockHash)
|
||||
if len(blockHashes) == limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
key := blockIndexKey(lowHash, blueScore)
|
||||
cursor, err := dbaccess.BlockIndexCursorFrom(dbaccess.NoTx(), key)
|
||||
if dbaccess.IsNotFoundError(err) {
|
||||
return nil, errors.Wrapf(err, "block %s not in block index", lowHash)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
for cursor.Next() && len(blockHashes) < limit {
|
||||
key, err := cursor.Key()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockHash, err := blockHashFromBlockIndexKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockHashes = append(blockHashes, blockHash)
|
||||
}
|
||||
|
||||
return blockHashes, nil
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
@@ -189,7 +188,7 @@ func TestDAGStateSerialization(t *testing.T) {
|
||||
TipHashes: []*daghash.Hash{newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")},
|
||||
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
|
||||
},
|
||||
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
|
||||
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
|
||||
},
|
||||
{
|
||||
name: "block 1",
|
||||
@@ -197,7 +196,7 @@ func TestDAGStateSerialization(t *testing.T) {
|
||||
TipHashes: []*daghash.Hash{newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")},
|
||||
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
|
||||
},
|
||||
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
|
||||
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -234,51 +233,6 @@ func TestDAGStateSerialization(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestDAGStateDeserializeErrors performs negative tests against
|
||||
// deserializing the DAG state to ensure error paths work as expected.
|
||||
func TestDAGStateDeserializeErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
serialized []byte
|
||||
errType error
|
||||
}{
|
||||
{
|
||||
name: "nothing serialized",
|
||||
serialized: hexToBytes(""),
|
||||
errType: database.Error{ErrorCode: database.ErrCorruption},
|
||||
},
|
||||
{
|
||||
name: "corrupted data",
|
||||
serialized: []byte("[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,7"),
|
||||
errType: database.Error{ErrorCode: database.ErrCorruption},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Ensure the expected error type and code is returned.
|
||||
_, err := deserializeDAGState(test.serialized)
|
||||
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
|
||||
t.Errorf("deserializeDAGState (%s): expected "+
|
||||
"error type does not match - got %T, want %T",
|
||||
test.name, err, test.errType)
|
||||
continue
|
||||
}
|
||||
var dbErr database.Error
|
||||
if ok := errors.As(err, &dbErr); ok {
|
||||
tderr := test.errType.(database.Error)
|
||||
if dbErr.ErrorCode != tderr.ErrorCode {
|
||||
t.Errorf("deserializeDAGState (%s): "+
|
||||
"wrong error code got: %v, want: %v",
|
||||
test.name, dbErr.ErrorCode,
|
||||
tderr.ErrorCode)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newHashFromStr converts the passed big-endian hex string into a
|
||||
// daghash.Hash. It only differs from the one available in daghash in that
|
||||
// it panics in case of an error since it will only (and must only) be
|
||||
|
||||
@@ -82,7 +82,7 @@ func TestDifficulty(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.DifficultyAdjustmentWindowSize = 264
|
||||
dag, teardownFunc, err := DAGSetup("TestDifficulty", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestDifficulty", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
@@ -40,7 +41,7 @@ func TestFinality(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.FinalityInterval = 100
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -185,7 +186,7 @@ func TestSubnetworkRegistry(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -198,7 +199,7 @@ func TestSubnetworkRegistry(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("could not register network: %s", err)
|
||||
}
|
||||
limit, err := dag.SubnetworkStore.GasLimit(subnetworkID)
|
||||
limit, err := blockdag.GasLimit(subnetworkID)
|
||||
if err != nil {
|
||||
t.Fatalf("could not retrieve gas limit: %s", err)
|
||||
}
|
||||
@@ -211,7 +212,7 @@ func TestChainedTransactions(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", blockdag.Config{
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -339,7 +340,7 @@ func TestOrderInDiffFromAcceptanceData(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = math.MaxUint8
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", blockdag.Config{
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -409,7 +410,7 @@ func TestGasLimit(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -556,7 +557,7 @@ func TestGasLimit(t *testing.T) {
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
|
||||
expectedErrStr := fmt.Sprintf("Error getting gas limit for subnetworkID '%s': subnetwork '%s' not found",
|
||||
nonExistentSubnetwork, nonExistentSubnetwork)
|
||||
if err.Error() != expectedErrStr {
|
||||
if strings.Contains(err.Error(), expectedErrStr) {
|
||||
t.Fatalf("ProcessBlock expected error \"%v\" but got \"%v\"", expectedErrStr, err)
|
||||
}
|
||||
if isDelayed {
|
||||
|
||||
@@ -3,7 +3,7 @@ package blockdag
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"reflect"
|
||||
@@ -176,7 +176,7 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
func() {
|
||||
resetExtraNonceForTest()
|
||||
dagParams.K = test.k
|
||||
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), Config{
|
||||
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), true, Config{
|
||||
DAGParams: &dagParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -282,7 +282,7 @@ func checkReds(expectedReds []string, reds map[string]bool) bool {
|
||||
|
||||
func TestBlueAnticoneSizeErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -323,7 +323,7 @@ func TestBlueAnticoneSizeErrors(t *testing.T) {
|
||||
|
||||
func TestGHOSTDAGErrors(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -340,19 +340,21 @@ func TestGHOSTDAGErrors(t *testing.T) {
|
||||
|
||||
// Clear the reachability store
|
||||
dag.reachabilityStore.loaded = map[daghash.Hash]*reachabilityData{}
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
|
||||
cursor := bucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
err := bucket.Delete(cursor.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
dbTx, err := dbaccess.NewTx()
|
||||
if err != nil {
|
||||
t.Fatalf("TestGHOSTDAGErrors: db.Update failed: %s", err)
|
||||
t.Fatalf("NewTx: %s", err)
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
|
||||
err = dbaccess.ClearReachabilityData(dbTx)
|
||||
if err != nil {
|
||||
t.Fatalf("ClearReachabilityData: %s", err)
|
||||
}
|
||||
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("Commit: %s", err)
|
||||
}
|
||||
|
||||
// Try to rerun GHOSTDAG on the last block. GHOSTDAG uses
|
||||
|
||||
@@ -4,29 +4,16 @@ import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// acceptanceIndexName is the human-readable name for the index.
|
||||
acceptanceIndexName = "acceptance index"
|
||||
)
|
||||
|
||||
var (
|
||||
// acceptanceIndexKey is the key of the acceptance index and the db bucket used
|
||||
// to house it.
|
||||
acceptanceIndexKey = []byte("acceptanceidx")
|
||||
)
|
||||
|
||||
// AcceptanceIndex implements a txAcceptanceData by block hash index. That is to say,
|
||||
// it stores a mapping between a block's hash and the set of transactions that the
|
||||
// block accepts among its blue blocks.
|
||||
type AcceptanceIndex struct {
|
||||
db database.DB
|
||||
dag *blockdag.BlockDAG
|
||||
}
|
||||
|
||||
@@ -43,122 +30,82 @@ func NewAcceptanceIndex() *AcceptanceIndex {
|
||||
return &AcceptanceIndex{}
|
||||
}
|
||||
|
||||
// DropAcceptanceIndex drops the acceptance index from the provided database if it
|
||||
// exists.
|
||||
func DropAcceptanceIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, acceptanceIndexKey, acceptanceIndexName, interrupt)
|
||||
}
|
||||
// DropAcceptanceIndex drops the acceptance index.
|
||||
func DropAcceptanceIndex() error {
|
||||
dbTx, err := dbaccess.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Key() []byte {
|
||||
return acceptanceIndexKey
|
||||
}
|
||||
err = dbaccess.DropAcceptanceIndex(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Name() string {
|
||||
return acceptanceIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time. It creates the bucket for the
|
||||
// acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Create(dbTx database.Tx) error {
|
||||
_, err := dbTx.Metadata().CreateBucket(acceptanceIndexKey)
|
||||
return err
|
||||
return dbTx.Commit()
|
||||
}
|
||||
|
||||
// Init initializes the hash-based acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
func (idx *AcceptanceIndex) Init(dag *blockdag.BlockDAG) error {
|
||||
idx.dag = dag
|
||||
return nil
|
||||
return idx.recover()
|
||||
}
|
||||
|
||||
// recover attempts to insert any data that's missing from the
|
||||
// acceptance index.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) recover() error {
|
||||
dbTx, err := dbaccess.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
|
||||
err = idx.dag.ForEachHash(func(hash daghash.Hash) error {
|
||||
exists, err := dbaccess.HasAcceptanceData(dbTx, &hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(&hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return idx.ConnectBlock(dbTx, &hash, txAcceptanceData)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dbTx.Commit()
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the DAG.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) ConnectBlock(dbTx database.Tx, _ *util.Block, blockID uint64, _ *blockdag.BlockDAG,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
return dbPutTxsAcceptanceData(dbTx, blockID, txsAcceptanceData)
|
||||
}
|
||||
|
||||
// TxsAcceptanceData returns the acceptance data of all the transactions that
|
||||
// were accepted by the block with hash blockHash.
|
||||
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
var txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
txsAcceptanceData, err = dbFetchTxsAcceptanceDataByHash(dbTx, blockHash)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return txsAcceptanceData, nil
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *AcceptanceIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
for blockID := currentBlockID + 1; blockID <= lastKnownBlockID; blockID++ {
|
||||
hash, err := blockdag.DBFetchBlockHashByID(dbTx, currentBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = idx.ConnectBlock(dbTx, nil, blockID, nil, txAcceptanceData, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dbPutTxsAcceptanceData(dbTx database.Tx, blockID uint64,
|
||||
func (idx *AcceptanceIndex) ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(txsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
|
||||
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedTxsAcceptanceData)
|
||||
return dbaccess.StoreAcceptanceData(dbContext, blockHash, serializedTxsAcceptanceData)
|
||||
}
|
||||
|
||||
func dbFetchTxsAcceptanceDataByHash(dbTx database.Tx,
|
||||
hash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
|
||||
blockID, err := blockdag.DBFetchBlockIDByHash(dbTx, hash)
|
||||
// TxsAcceptanceData returns the acceptance data of all the transactions that
|
||||
// were accepted by the block with hash blockHash.
|
||||
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
serializedTxsAcceptanceData, err := dbaccess.FetchAcceptanceData(dbaccess.NoTx(), blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dbFetchTxsAcceptanceDataByID(dbTx, blockID)
|
||||
}
|
||||
|
||||
func dbFetchTxsAcceptanceDataByID(dbTx database.Tx,
|
||||
blockID uint64) (blockdag.MultiBlockTxsAcceptanceData, error) {
|
||||
serializedBlockID := blockdag.SerializeBlockID(blockID)
|
||||
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
|
||||
serializedTxsAcceptanceData := bucket.Get(serializedBlockID)
|
||||
if serializedTxsAcceptanceData == nil {
|
||||
return nil, errors.Errorf("no entry in the accpetance index for block id %d", blockID)
|
||||
}
|
||||
|
||||
return deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package indexers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
@@ -96,7 +96,7 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(db1Path)
|
||||
|
||||
db1, err := database.Create("ffldb", db1Path, params.Net)
|
||||
err = dbaccess.Open(db1Path)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
@@ -104,10 +104,9 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
db1Config := blockdag.Config{
|
||||
IndexManager: db1IndexManager,
|
||||
DAGParams: params,
|
||||
DB: db1,
|
||||
}
|
||||
|
||||
db1DAG, teardown, err := blockdag.DAGSetup("", db1Config)
|
||||
db1DAG, teardown, err := blockdag.DAGSetup("", false, db1Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
@@ -130,11 +129,6 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
err = db1.FlushCache()
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing database to disk: %s", err)
|
||||
}
|
||||
|
||||
db2Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover2")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
@@ -166,17 +160,20 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
t.Fatalf("Error fetching acceptance data: %s", err)
|
||||
}
|
||||
|
||||
db2, err := database.Open("ffldb", db2Path, params.Net)
|
||||
err = dbaccess.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Error opening database: %s", err)
|
||||
t.Fatalf("Error closing the database: %s", err)
|
||||
}
|
||||
err = dbaccess.Open(db2Path)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
db2Config := blockdag.Config{
|
||||
DAGParams: params,
|
||||
DB: db2,
|
||||
}
|
||||
|
||||
db2DAG, teardown, err := blockdag.DAGSetup("", db2Config)
|
||||
db2DAG, teardown, err := blockdag.DAGSetup("", false, db2Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
@@ -199,10 +196,6 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
err = db2.FlushCache()
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing database to disk: %s", err)
|
||||
}
|
||||
db3Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover3")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %s", err)
|
||||
@@ -213,9 +206,13 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
t.Fatalf("copyDirectory: %s", err)
|
||||
}
|
||||
|
||||
db3, err := database.Open("ffldb", db3Path, params.Net)
|
||||
err = dbaccess.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Error opening database: %s", err)
|
||||
t.Fatalf("Error closing the database: %s", err)
|
||||
}
|
||||
err = dbaccess.Open(db3Path)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
db3AcceptanceIndex := NewAcceptanceIndex()
|
||||
@@ -223,10 +220,9 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
db3Config := blockdag.Config{
|
||||
IndexManager: db3IndexManager,
|
||||
DAGParams: params,
|
||||
DB: db3,
|
||||
}
|
||||
|
||||
_, teardown, err = blockdag.DAGSetup("", db3Config)
|
||||
_, teardown, err = blockdag.DAGSetup("", false, db3Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package indexers implements optional block DAG indexes.
|
||||
*/
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// byteOrder is the preferred byte order used for serializing numeric
|
||||
// fields for storage in the database.
|
||||
byteOrder = binary.LittleEndian
|
||||
|
||||
// errInterruptRequested indicates that an operation was cancelled due
|
||||
// to a user-requested interrupt.
|
||||
errInterruptRequested = errors.New("interrupt requested")
|
||||
)
|
||||
|
||||
// NeedsInputser provides a generic interface for an indexer to specify the it
|
||||
// requires the ability to look up inputs for a transaction.
|
||||
type NeedsInputser interface {
|
||||
NeedsInputs() bool
|
||||
}
|
||||
|
||||
// Indexer provides a generic interface for an indexer that is managed by an
|
||||
// index manager such as the Manager type provided by this package.
|
||||
type Indexer interface {
|
||||
// Key returns the key of the index as a byte slice.
|
||||
Key() []byte
|
||||
|
||||
// Name returns the human-readable name of the index.
|
||||
Name() string
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs
|
||||
// to be created for the first time.
|
||||
Create(dbTx database.Tx) error
|
||||
|
||||
// Init is invoked when the index manager is first initializing the
|
||||
// index. This differs from the Create method in that it is called on
|
||||
// every load, including the case the index was just created.
|
||||
Init(db database.DB, dag *blockdag.BlockDAG) error
|
||||
|
||||
// ConnectBlock is invoked when the index manager is notified that a new
|
||||
// block has been connected to the DAG.
|
||||
ConnectBlock(dbTx database.Tx,
|
||||
block *util.Block,
|
||||
blockID uint64,
|
||||
dag *blockdag.BlockDAG,
|
||||
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData,
|
||||
virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error
|
||||
}
|
||||
|
||||
// AssertError identifies an error that indicates an internal code consistency
|
||||
// issue and should be treated as a critical and unrecoverable error.
|
||||
type AssertError string
|
||||
|
||||
// Error returns the assertion error as a huma-readable string and satisfies
|
||||
// the error interface.
|
||||
func (e AssertError) Error() string {
|
||||
return "assertion failed: " + string(e)
|
||||
}
|
||||
|
||||
// errDeserialize signifies that a problem was encountered when deserializing
|
||||
// data.
|
||||
type errDeserialize string
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e errDeserialize) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// isDeserializeErr returns whether or not the passed error is an errDeserialize
|
||||
// error.
|
||||
func isDeserializeErr(err error) bool {
|
||||
var deserializeErr errDeserialize
|
||||
return errors.As(err, &deserializeErr)
|
||||
}
|
||||
|
||||
// internalBucket is an abstraction over a database bucket. It is used to make
|
||||
// the code easier to test since it allows mock objects in the tests to only
|
||||
// implement these functions instead of everything a database.Bucket supports.
|
||||
type internalBucket interface {
|
||||
Get(key []byte) []byte
|
||||
Put(key []byte, value []byte) error
|
||||
Delete(key []byte) error
|
||||
}
|
||||
|
||||
// interruptRequested returns true when the provided channel has been closed.
|
||||
// This simplifies early shutdown slightly since the caller can just use an if
|
||||
// statement instead of a select.
|
||||
func interruptRequested(interrupted <-chan struct{}) bool {
|
||||
select {
|
||||
case <-interrupted:
|
||||
return true
|
||||
default:
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
28
blockdag/indexers/indexer.go
Normal file
28
blockdag/indexers/indexer.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright (c) 2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package indexers implements optional block DAG indexes.
|
||||
*/
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// Indexer provides a generic interface for an indexer that is managed by an
|
||||
// index manager such as the Manager type provided by this package.
|
||||
type Indexer interface {
|
||||
// Init is invoked when the index manager is first initializing the
|
||||
// index.
|
||||
Init(dag *blockdag.BlockDAG) error
|
||||
|
||||
// ConnectBlock is invoked when the index manager is notified that a new
|
||||
// block has been connected to the DAG.
|
||||
ConnectBlock(dbContext *dbaccess.TxContext,
|
||||
blockHash *daghash.Hash,
|
||||
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData) error
|
||||
}
|
||||
@@ -6,190 +6,30 @@ package indexers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
var (
|
||||
// indexTipsBucketName is the name of the db bucket used to house the
|
||||
// current tip of each index.
|
||||
indexTipsBucketName = []byte("idxtips")
|
||||
|
||||
indexCurrentBlockIDBucketName = []byte("idxcurrentblockid")
|
||||
)
|
||||
|
||||
// Manager defines an index manager that manages multiple optional indexes and
|
||||
// implements the blockdag.IndexManager interface so it can be seamlessly
|
||||
// plugged into normal DAG processing.
|
||||
type Manager struct {
|
||||
db database.DB
|
||||
enabledIndexes []Indexer
|
||||
}
|
||||
|
||||
// Ensure the Manager type implements the blockdag.IndexManager interface.
|
||||
var _ blockdag.IndexManager = (*Manager)(nil)
|
||||
|
||||
// indexDropKey returns the key for an index which indicates it is in the
|
||||
// process of being dropped.
|
||||
func indexDropKey(idxKey []byte) []byte {
|
||||
dropKey := make([]byte, len(idxKey)+1)
|
||||
dropKey[0] = 'd'
|
||||
copy(dropKey[1:], idxKey)
|
||||
return dropKey
|
||||
}
|
||||
|
||||
// maybeFinishDrops determines if each of the enabled indexes are in the middle
|
||||
// of being dropped and finishes dropping them when the are. This is necessary
|
||||
// because dropping and index has to be done in several atomic steps rather than
|
||||
// one big atomic step due to the massive number of entries.
|
||||
func (m *Manager) maybeFinishDrops(interrupt <-chan struct{}) error {
|
||||
indexNeedsDrop := make([]bool, len(m.enabledIndexes))
|
||||
err := m.db.View(func(dbTx database.Tx) error {
|
||||
// None of the indexes needs to be dropped if the index tips
|
||||
// bucket hasn't been created yet.
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
if indexesBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark the indexer as requiring a drop if one is already in
|
||||
// progress.
|
||||
for i, indexer := range m.enabledIndexes {
|
||||
dropKey := indexDropKey(indexer.Key())
|
||||
if indexesBucket.Get(dropKey) != nil {
|
||||
indexNeedsDrop[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
// Finish dropping any of the enabled indexes that are already in the
|
||||
// middle of being dropped.
|
||||
for i, indexer := range m.enabledIndexes {
|
||||
if !indexNeedsDrop[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infof("Resuming %s drop", indexer.Name())
|
||||
err := dropIndex(m.db, indexer.Key(), indexer.Name(), interrupt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// maybeCreateIndexes determines if each of the enabled indexes have already
|
||||
// been created and creates them if not.
|
||||
func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
// Nothing to do if the index tip already exists.
|
||||
idxKey := indexer.Key()
|
||||
if indexesBucket.Get(idxKey) != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// The tip for the index does not exist, so create it and
|
||||
// invoke the create callback for the index so it can perform
|
||||
// any one-time initialization it requires.
|
||||
if err := indexer.Create(dbTx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO (Mike): this is temporary solution to prevent node from not starting
|
||||
// because it thinks indexers are not initialized.
|
||||
// Indexers, however, do not work properly, and a general solution to their work operation is required
|
||||
indexesBucket.Put(idxKey, []byte{0})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init initializes the enabled indexes. This is called during DAG
|
||||
// initialization and primarily consists of catching up all indexes to the
|
||||
// current tips. This is necessary since each index can be disabled
|
||||
// and re-enabled at any time and attempting to catch-up indexes at the same
|
||||
// time new blocks are being downloaded would lead to an overall longer time to
|
||||
// catch up due to the I/O contention.
|
||||
//
|
||||
// Init initializes the enabled indexes.
|
||||
// This is part of the blockdag.IndexManager interface.
|
||||
func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
|
||||
// Nothing to do when no indexes are enabled.
|
||||
if len(m.enabledIndexes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
m.db = db
|
||||
|
||||
// Finish and drops that were previously interrupted.
|
||||
if err := m.maybeFinishDrops(interrupt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the initial state for the indexes as needed.
|
||||
err := m.db.Update(func(dbTx database.Tx) error {
|
||||
// Create the bucket for the current tips as needed.
|
||||
meta := dbTx.Metadata()
|
||||
_, err := meta.CreateBucketIfNotExists(indexTipsBucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := meta.CreateBucketIfNotExists(indexCurrentBlockIDBucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.maybeCreateIndexes(dbTx)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize each of the enabled indexes.
|
||||
func (m *Manager) Init(dag *blockdag.BlockDAG) error {
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
if err := indexer.Init(db, blockDAG); err != nil {
|
||||
if err := indexer.Init(dag); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return m.recoverIfNeeded()
|
||||
}
|
||||
|
||||
// recoverIfNeeded checks if the node worked for some time
|
||||
// without one of the current enabled indexes, and if it's
|
||||
// the case, recovers the missing blocks from the index.
|
||||
func (m *Manager) recoverIfNeeded() error {
|
||||
return m.db.Update(func(dbTx database.Tx) error {
|
||||
lastKnownBlockID := blockdag.DBFetchCurrentBlockID(dbTx)
|
||||
for _, indexer := range m.enabledIndexes {
|
||||
serializedCurrentIdxBlockID := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Get(indexer.Key())
|
||||
currentIdxBlockID := uint64(0)
|
||||
if serializedCurrentIdxBlockID != nil {
|
||||
currentIdxBlockID = blockdag.DeserializeBlockID(serializedCurrentIdxBlockID)
|
||||
}
|
||||
if lastKnownBlockID > currentIdxBlockID {
|
||||
err := indexer.Recover(dbTx, currentIdxBlockID, lastKnownBlockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectBlock must be invoked when a block is added to the DAG. It
|
||||
@@ -197,32 +37,13 @@ func (m *Manager) recoverIfNeeded() error {
|
||||
// checks, and invokes each indexer.
|
||||
//
|
||||
// This is part of the blockdag.IndexManager interface.
|
||||
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
func (m *Manager) ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
// Call each of the currently active optional indexes with the block
|
||||
// being connected so they can update accordingly.
|
||||
for _, index := range m.enabledIndexes {
|
||||
// Notify the indexer with the connected block so it can index it.
|
||||
if err := index.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new block ID index entry for the block being connected and
|
||||
// update the current internal block ID accordingly.
|
||||
err := m.updateIndexersWithCurrentBlockID(dbTx, block.Hash(), blockID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) updateIndexersWithCurrentBlockID(dbTx database.Tx, blockHash *daghash.Hash, blockID uint64) error {
|
||||
serializedBlockID := blockdag.SerializeBlockID(blockID)
|
||||
for _, index := range m.enabledIndexes {
|
||||
err := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Put(index.Key(), serializedBlockID)
|
||||
if err != nil {
|
||||
if err := index.ConnectBlock(dbContext, blockHash, txsAcceptanceData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -238,155 +59,3 @@ func NewManager(enabledIndexes []Indexer) *Manager {
|
||||
enabledIndexes: enabledIndexes,
|
||||
}
|
||||
}
|
||||
|
||||
// dropIndex drops the passed index from the database. Since indexes can be
|
||||
// massive, it deletes the index in multiple database transactions in order to
|
||||
// keep memory usage to reasonable levels. It also marks the drop in progress
|
||||
// so the drop can be resumed if it is stopped before it is done before the
|
||||
// index can be used again.
|
||||
func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan struct{}) error {
|
||||
// Nothing to do if the index doesn't already exist.
|
||||
var needsDelete bool
|
||||
err := db.View(func(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
if indexesBucket != nil && indexesBucket.Get(idxKey) != nil {
|
||||
needsDelete = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !needsDelete {
|
||||
log.Infof("Not dropping %s because it does not exist", idxName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark that the index is in the process of being dropped so that it
|
||||
// can be resumed on the next start if interrupted before the process is
|
||||
// complete.
|
||||
log.Infof("Dropping all %s entries. This might take a while...",
|
||||
idxName)
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
|
||||
return indexesBucket.Put(indexDropKey(idxKey), idxKey)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since the indexes can be so large, attempting to simply delete
|
||||
// the bucket in a single database transaction would result in massive
|
||||
// memory usage and likely crash many systems due to ulimits. In order
|
||||
// to avoid this, use a cursor to delete a maximum number of entries out
|
||||
// of the bucket at a time. Recurse buckets depth-first to delete any
|
||||
// sub-buckets.
|
||||
const maxDeletions = 2000000
|
||||
var totalDeleted uint64
|
||||
|
||||
// Recurse through all buckets in the index, cataloging each for
|
||||
// later deletion.
|
||||
var subBuckets [][][]byte
|
||||
var subBucketClosure func(database.Tx, []byte, [][]byte) error
|
||||
subBucketClosure = func(dbTx database.Tx,
|
||||
subBucket []byte, tlBucket [][]byte) error {
|
||||
// Get full bucket name and append to subBuckets for later
|
||||
// deletion.
|
||||
var bucketName [][]byte
|
||||
if (tlBucket == nil) || (len(tlBucket) == 0) {
|
||||
bucketName = append(bucketName, subBucket)
|
||||
} else {
|
||||
bucketName = append(tlBucket, subBucket)
|
||||
}
|
||||
subBuckets = append(subBuckets, bucketName)
|
||||
// Recurse sub-buckets to append to subBuckets slice.
|
||||
bucket := dbTx.Metadata()
|
||||
for _, subBucketName := range bucketName {
|
||||
bucket = bucket.Bucket(subBucketName)
|
||||
}
|
||||
return bucket.ForEachBucket(func(k []byte) error {
|
||||
return subBucketClosure(dbTx, k, bucketName)
|
||||
})
|
||||
}
|
||||
|
||||
// Call subBucketClosure with top-level bucket.
|
||||
err = db.View(func(dbTx database.Tx) error {
|
||||
return subBucketClosure(dbTx, idxKey, nil)
|
||||
})
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iterate through each sub-bucket in reverse, deepest-first, deleting
|
||||
// all keys inside them and then dropping the buckets themselves.
|
||||
for i := range subBuckets {
|
||||
bucketName := subBuckets[len(subBuckets)-1-i]
|
||||
// Delete maxDeletions key/value pairs at a time.
|
||||
for numDeleted := maxDeletions; numDeleted == maxDeletions; {
|
||||
numDeleted = 0
|
||||
err := db.Update(func(dbTx database.Tx) error {
|
||||
subBucket := dbTx.Metadata()
|
||||
for _, subBucketName := range bucketName {
|
||||
subBucket = subBucket.Bucket(subBucketName)
|
||||
}
|
||||
cursor := subBucket.Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() &&
|
||||
numDeleted < maxDeletions {
|
||||
|
||||
if err := cursor.Delete(); err != nil {
|
||||
return err
|
||||
}
|
||||
numDeleted++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numDeleted > 0 {
|
||||
totalDeleted += uint64(numDeleted)
|
||||
log.Infof("Deleted %d keys (%d total) from %s",
|
||||
numDeleted, totalDeleted, idxName)
|
||||
}
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return errInterruptRequested
|
||||
}
|
||||
|
||||
// Drop the bucket itself.
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata()
|
||||
for j := 0; j < len(bucketName)-1; j++ {
|
||||
bucket = bucket.Bucket(bucketName[j])
|
||||
}
|
||||
return bucket.DeleteBucket(bucketName[len(bucketName)-1])
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the index tip, index bucket, and in-progress drop flag now
|
||||
// that all index entries have been removed.
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
indexesBucket := meta.Bucket(indexTipsBucketName)
|
||||
if err := indexesBucket.Delete(idxKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := meta.Bucket(indexCurrentBlockIDBucketName).Delete(idxKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return indexesBucket.Delete(indexDropKey(idxKey))
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Dropped %s", idxName)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package blockdag
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/locks"
|
||||
"github.com/pkg/errors"
|
||||
@@ -51,7 +51,7 @@ func (store *multisetStore) multisetByBlockHash(hash *daghash.Hash) (*secp256k1.
|
||||
}
|
||||
|
||||
// flushToDB writes all new multiset data to the database.
|
||||
func (store *multisetStore) flushToDB(dbTx database.Tx) error {
|
||||
func (store *multisetStore) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
if len(store.new) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -71,7 +71,7 @@ func (store *multisetStore) flushToDB(dbTx database.Tx) error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = store.dbStoreMultiset(dbTx, &hash, w.Bytes())
|
||||
err = store.storeMultiset(dbContext, &hash, w.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -83,16 +83,30 @@ func (store *multisetStore) clearNewEntries() {
|
||||
store.new = make(map[daghash.Hash]struct{})
|
||||
}
|
||||
|
||||
func (store *multisetStore) init(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata().Bucket(multisetBucketName)
|
||||
cursor := bucket.Cursor()
|
||||
func (store *multisetStore) init(dbContext dbaccess.Context) error {
|
||||
cursor, err := dbaccess.MultisetCursor(dbContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
hash, err := daghash.NewHash(cursor.Key())
|
||||
key, err := cursor.Key()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ms, err := deserializeMultiset(bytes.NewReader(cursor.Value()))
|
||||
hash, err := daghash.NewHash(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serializedMS, err := cursor.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ms, err := deserializeMultiset(bytes.NewReader(serializedMS))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -102,11 +116,16 @@ func (store *multisetStore) init(dbTx database.Tx) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbStoreMultiset stores the multiset data to the database.
|
||||
func (store *multisetStore) dbStoreMultiset(dbTx database.Tx, blockHash *daghash.Hash, serializedMS []byte) error {
|
||||
bucket := dbTx.Metadata().Bucket(multisetBucketName)
|
||||
if bucket.Get(blockHash[:]) != nil {
|
||||
// storeMultiset stores the multiset data to the database.
|
||||
func (store *multisetStore) storeMultiset(dbContext dbaccess.Context, blockHash *daghash.Hash, serializedMS []byte) error {
|
||||
exists, err := dbaccess.HasMultiset(dbContext, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
return errors.Errorf("Can't override an existing multiset database entry for block %s", blockHash)
|
||||
}
|
||||
return bucket.Put(blockHash[:], serializedMS)
|
||||
|
||||
return dbaccess.StoreMultiset(dbContext, blockHash, serializedMS)
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestNotifications(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("notifications", Config{
|
||||
dag, teardownFunc, err := DAGSetup("notifications", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func TestProcessOrphans(t *testing.T) {
|
||||
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -76,13 +76,18 @@ func TestProcessDelayedBlocks(t *testing.T) {
|
||||
// We use dag1 so we can build the test blocks with the proper
|
||||
// block header (UTXO commitment, acceptedIDMerkleroot, etc), and
|
||||
// then we use dag2 for the actual test.
|
||||
dag1, teardownFunc, err := DAGSetup("TestProcessDelayedBlocks1", Config{
|
||||
dag1, teardownFunc, err := DAGSetup("TestProcessDelayedBlocks1", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
isDAG1Open := true
|
||||
defer func() {
|
||||
if isDAG1Open {
|
||||
teardownFunc()
|
||||
}
|
||||
}()
|
||||
|
||||
initialTime := dag1.dagParams.GenesisBlock.Header.Timestamp
|
||||
// Here we use a fake time source that returns a timestamp
|
||||
@@ -116,11 +121,14 @@ func TestProcessDelayedBlocks(t *testing.T) {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
}
|
||||
|
||||
teardownFunc()
|
||||
isDAG1Open = false
|
||||
|
||||
// Here the actual test begins. We add a delayed block and
|
||||
// its child and check that they are not added to the DAG,
|
||||
// and check that they're added only if we add a new block
|
||||
// after the delayed block timestamp is valid.
|
||||
dag2, teardownFunc2, err := DAGSetup("TestProcessDelayedBlocks2", Config{
|
||||
dag2, teardownFunc2, err := DAGSetup("TestProcessDelayedBlocks2", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -311,7 +311,7 @@ func (rtn *reachabilityTreeNode) countSubtrees(subTreeSizeMap map[*reachabilityT
|
||||
if len(current.children) == 0 {
|
||||
// We reached a leaf
|
||||
subTreeSizeMap[current] = 1
|
||||
} else if calculatedChildrenCount[current] <= uint64(len(current.children)) {
|
||||
} else if _, ok := subTreeSizeMap[current]; !ok {
|
||||
// We haven't yet calculated the subtree size of
|
||||
// the current node. Add all its children to the
|
||||
// queue
|
||||
|
||||
@@ -609,6 +609,46 @@ func TestReindexIntervalErrors(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReindexInterval(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
root := newReachabilityTreeNode(&blockNode{})
|
||||
|
||||
const subTreeSize = 70000
|
||||
// We set the interval of the root to subTreeSize*2 because
|
||||
// its first child gets half of the interval, so a reindex
|
||||
// from the root should happen after adding subTreeSize
|
||||
// nodes.
|
||||
root.setInterval(newReachabilityInterval(0, subTreeSize*2))
|
||||
|
||||
currentTreeNode := root
|
||||
for i := 0; i < subTreeSize; i++ {
|
||||
childTreeNode := newReachabilityTreeNode(&blockNode{})
|
||||
_, err := currentTreeNode.addChild(childTreeNode)
|
||||
if err != nil {
|
||||
b.Fatalf("addChild: %s", err)
|
||||
}
|
||||
|
||||
currentTreeNode = childTreeNode
|
||||
}
|
||||
|
||||
remainingIntervalBefore := *root.remainingInterval
|
||||
// After we added subTreeSize nodes, adding the next
|
||||
// node should lead to a reindex from root.
|
||||
fullReindexTriggeringNode := newReachabilityTreeNode(&blockNode{})
|
||||
b.StartTimer()
|
||||
_, err := currentTreeNode.addChild(fullReindexTriggeringNode)
|
||||
b.StopTimer()
|
||||
if err != nil {
|
||||
b.Fatalf("addChild: %s", err)
|
||||
}
|
||||
|
||||
if *root.remainingInterval == remainingIntervalBefore {
|
||||
b.Fatal("Expected a reindex from root, but it didn't happen")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFutureCoveringBlockSetString(t *testing.T) {
|
||||
treeNodeA := newReachabilityTreeNode(&blockNode{})
|
||||
treeNodeA.setInterval(newReachabilityInterval(123, 456))
|
||||
|
||||
@@ -3,6 +3,7 @@ package blockdag
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
@@ -82,7 +83,7 @@ func (store *reachabilityStore) reachabilityDataByHash(hash *daghash.Hash) (*rea
|
||||
}
|
||||
|
||||
// flushToDB writes all dirty reachability data to the database.
|
||||
func (store *reachabilityStore) flushToDB(dbTx database.Tx) error {
|
||||
func (store *reachabilityStore) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
if len(store.dirty) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -90,7 +91,7 @@ func (store *reachabilityStore) flushToDB(dbTx database.Tx) error {
|
||||
for hash := range store.dirty {
|
||||
hash := hash // Copy hash to a new variable to avoid passing the same pointer
|
||||
reachabilityData := store.loaded[hash]
|
||||
err := store.dbStoreReachabilityData(dbTx, &hash, reachabilityData)
|
||||
err := store.storeReachabilityData(dbContext, &hash, reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -102,22 +103,25 @@ func (store *reachabilityStore) clearDirtyEntries() {
|
||||
store.dirty = make(map[daghash.Hash]struct{})
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) init(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
|
||||
|
||||
func (store *reachabilityStore) init(dbContext dbaccess.Context) error {
|
||||
// TODO: (Stas) This is a quick and dirty hack.
|
||||
// We iterate over the entire bucket twice:
|
||||
// * First, populate the loaded set with all entries
|
||||
// * Second, connect the parent/children pointers in each entry
|
||||
// with other nodes, which are now guaranteed to exist
|
||||
cursor := bucket.Cursor()
|
||||
cursor, err := dbaccess.ReachabilityDataCursor(dbContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
err := store.initReachabilityData(cursor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
cursor = bucket.Cursor()
|
||||
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
err := store.loadReachabilityDataFromCursor(cursor)
|
||||
if err != nil {
|
||||
@@ -128,7 +132,12 @@ func (store *reachabilityStore) init(dbTx database.Tx) error {
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) error {
|
||||
hash, err := daghash.NewHash(cursor.Key())
|
||||
key, err := cursor.Key()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := daghash.NewHash(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -141,7 +150,12 @@ func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) err
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.Cursor) error {
|
||||
hash, err := daghash.NewHash(cursor.Key())
|
||||
key, err := cursor.Key()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := daghash.NewHash(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -151,7 +165,12 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C
|
||||
return errors.Errorf("cannot find reachability data for block hash: %s", hash)
|
||||
}
|
||||
|
||||
err = store.deserializeReachabilityData(cursor.Value(), reachabilityData)
|
||||
serializedReachabilityData, err := cursor.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = store.deserializeReachabilityData(serializedReachabilityData, reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -162,15 +181,15 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbStoreReachabilityData stores the reachability data to the database.
|
||||
// storeReachabilityData stores the reachability data to the database.
|
||||
// This overwrites the current entry if there exists one.
|
||||
func (store *reachabilityStore) dbStoreReachabilityData(dbTx database.Tx, hash *daghash.Hash, reachabilityData *reachabilityData) error {
|
||||
func (store *reachabilityStore) storeReachabilityData(dbContext dbaccess.Context, hash *daghash.Hash, reachabilityData *reachabilityData) error {
|
||||
serializedReachabilyData, err := store.serializeReachabilityData(reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dbTx.Metadata().Bucket(reachabilityDataBucketName).Put(hash[:], serializedReachabilyData)
|
||||
return dbaccess.StoreReachabilityData(dbContext, hash, serializedReachabilyData)
|
||||
}
|
||||
|
||||
func (store *reachabilityStore) serializeReachabilityData(reachabilityData *reachabilityData) ([]byte, error) {
|
||||
|
||||
@@ -4,31 +4,20 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// SubnetworkStore stores the subnetworks data
|
||||
type SubnetworkStore struct {
|
||||
db database.DB
|
||||
}
|
||||
|
||||
func newSubnetworkStore(db database.DB) *SubnetworkStore {
|
||||
return &SubnetworkStore{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// registerSubnetworks scans a list of transactions, singles out
|
||||
// subnetwork registry transactions, validates them, and registers a new
|
||||
// subnetwork based on it.
|
||||
// This function returns an error if one or more transactions are invalid
|
||||
func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
|
||||
func registerSubnetworks(dbContext dbaccess.Context, txs []*util.Tx) error {
|
||||
subnetworkRegistryTxs := make([]*wire.MsgTx, 0)
|
||||
for _, tx := range txs {
|
||||
msgTx := tx.MsgTx()
|
||||
@@ -50,13 +39,13 @@ func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sNet, err := dbGetSubnetwork(dbTx, subnetworkID)
|
||||
exists, err := dbaccess.HasSubnetwork(dbContext, subnetworkID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sNet == nil {
|
||||
if !exists {
|
||||
createdSubnetwork := newSubnetwork(registryTx)
|
||||
err := dbRegisterSubnetwork(dbTx, subnetworkID, createdSubnetwork)
|
||||
err := registerSubnetwork(dbContext, subnetworkID, createdSubnetwork)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed registering subnetwork"+
|
||||
"for tx '%s': %s", registryTx.TxHash(), err)
|
||||
@@ -85,66 +74,39 @@ func TxToSubnetworkID(tx *wire.MsgTx) (*subnetworkid.SubnetworkID, error) {
|
||||
return subnetworkid.New(util.Hash160(txHash[:]))
|
||||
}
|
||||
|
||||
// subnetwork returns a registered subnetwork. If the subnetwork does not exist
|
||||
// this method returns an error.
|
||||
func (s *SubnetworkStore) subnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
|
||||
var sNet *subnetwork
|
||||
var err error
|
||||
dbErr := s.db.View(func(dbTx database.Tx) error {
|
||||
sNet, err = dbGetSubnetwork(dbTx, subnetworkID)
|
||||
return nil
|
||||
})
|
||||
if dbErr != nil {
|
||||
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, dbErr)
|
||||
}
|
||||
// fetchSubnetwork returns a registered subnetwork.
|
||||
func fetchSubnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
|
||||
serializedSubnetwork, err := dbaccess.FetchSubnetworkData(dbaccess.NoTx(), subnetworkID)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sNet, nil
|
||||
subnet, err := deserializeSubnetwork(serializedSubnetwork)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return subnet, nil
|
||||
}
|
||||
|
||||
// GasLimit returns the gas limit of a registered subnetwork. If the subnetwork does not
|
||||
// exist this method returns an error.
|
||||
func (s *SubnetworkStore) GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
|
||||
sNet, err := s.subnetwork(subnetworkID)
|
||||
func GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
|
||||
sNet, err := fetchSubnetwork(subnetworkID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if sNet == nil {
|
||||
return 0, errors.Errorf("subnetwork '%s' not found", subnetworkID)
|
||||
}
|
||||
|
||||
return sNet.gasLimit, nil
|
||||
}
|
||||
|
||||
// dbRegisterSubnetwork stores mappings from ID of the subnetwork to the subnetwork data.
|
||||
func dbRegisterSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
|
||||
// Serialize the subnetwork
|
||||
func registerSubnetwork(dbContext dbaccess.Context, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
|
||||
serializedSubnetwork, err := serializeSubnetwork(network)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to serialize sub-netowrk '%s': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
// Store the subnetwork
|
||||
subnetworksBucket := dbTx.Metadata().Bucket(subnetworksBucketName)
|
||||
err = subnetworksBucket.Put(subnetworkID[:], serializedSubnetwork)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to write sub-netowrk '%s': %s", subnetworkID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbGetSubnetwork returns the subnetwork associated with subnetworkID or nil if the subnetwork was not found.
|
||||
func dbGetSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
|
||||
bucket := dbTx.Metadata().Bucket(subnetworksBucketName)
|
||||
serializedSubnetwork := bucket.Get(subnetworkID[:])
|
||||
if serializedSubnetwork == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return deserializeSubnetwork(serializedSubnetwork)
|
||||
return dbaccess.StoreSubnetwork(dbContext, subnetworkID, serializedSubnetwork)
|
||||
}
|
||||
|
||||
type subnetwork struct {
|
||||
|
||||
@@ -5,9 +5,11 @@ package blockdag
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@@ -16,34 +18,11 @@ import (
|
||||
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb" // blank import ffldb so that its init() function runs before tests
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// testDbType is the database backend type to use for the tests.
|
||||
testDbType = "ffldb"
|
||||
|
||||
// blockDataNet is the expected network in the test block data.
|
||||
blockDataNet = wire.Mainnet
|
||||
)
|
||||
|
||||
// isSupportedDbType returns whether or not the passed database type is
|
||||
// currently supported.
|
||||
func isSupportedDbType(dbType string) bool {
|
||||
supportedDrivers := database.SupportedDrivers()
|
||||
for _, driver := range supportedDrivers {
|
||||
if dbType == driver {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// FileExists returns whether or not the named file or directory exists.
|
||||
func FileExists(name string) bool {
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
@@ -57,11 +36,10 @@ func FileExists(name string) bool {
|
||||
// DAGSetup is used to create a new db and DAG instance with the genesis
|
||||
// block already inserted. In addition to the new DAG instance, it returns
|
||||
// a teardown function the caller should invoke when done testing to clean up.
|
||||
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
|
||||
if !isSupportedDbType(testDbType) {
|
||||
return nil, nil, errors.Errorf("unsupported db type %s", testDbType)
|
||||
}
|
||||
|
||||
// The openDB parameter instructs DAGSetup whether or not to also open the
|
||||
// database. Setting it to false is useful in tests that handle database
|
||||
// opening/closing by themselves.
|
||||
func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), error) {
|
||||
var teardown func()
|
||||
|
||||
// To make sure that the teardown function is not called before any goroutines finished to run -
|
||||
@@ -76,13 +54,16 @@ func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
|
||||
})
|
||||
}
|
||||
|
||||
if config.DB == nil {
|
||||
tmpDir := os.TempDir()
|
||||
if openDb {
|
||||
var err error
|
||||
tmpDir, err := ioutil.TempDir("", "DAGSetup")
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("error creating temp dir: %s", err)
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(tmpDir, dbName)
|
||||
_ = os.RemoveAll(dbPath)
|
||||
var err error
|
||||
config.DB, err = database.Create(testDbType, dbPath, blockDataNet)
|
||||
err = dbaccess.Open(dbPath)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("error creating db: %s", err)
|
||||
}
|
||||
@@ -92,14 +73,13 @@ func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
|
||||
teardown = func() {
|
||||
spawnWaitGroup.Wait()
|
||||
spawn = realSpawn
|
||||
config.DB.Close()
|
||||
dbaccess.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
}
|
||||
} else {
|
||||
teardown = func() {
|
||||
spawnWaitGroup.Wait()
|
||||
spawn = realSpawn
|
||||
config.DB.Close()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsSupportedDbType(t *testing.T) {
|
||||
if !isSupportedDbType("ffldb") {
|
||||
t.Errorf("ffldb should be a supported DB driver")
|
||||
}
|
||||
if isSupportedDbType("madeUpDb") {
|
||||
t.Errorf("madeUpDb should not be a supported DB driver")
|
||||
}
|
||||
}
|
||||
@@ -2,10 +2,9 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/locks"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type blockUTXODiffData struct {
|
||||
@@ -33,12 +32,11 @@ func (diffStore *utxoDiffStore) setBlockDiff(node *blockNode, diff *UTXODiff) er
|
||||
diffStore.mtx.HighPriorityWriteLock()
|
||||
defer diffStore.mtx.HighPriorityWriteUnlock()
|
||||
// load the diff data from DB to diffStore.loaded
|
||||
_, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
_, err := diffStore.diffDataByHash(node.hash)
|
||||
if dbaccess.IsNotFoundError(err) {
|
||||
diffStore.loaded[*node.hash] = &blockUTXODiffData{}
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diffStore.loaded[*node.hash].diff = diff
|
||||
@@ -50,22 +48,19 @@ func (diffStore *utxoDiffStore) setBlockDiffChild(node *blockNode, diffChild *bl
|
||||
diffStore.mtx.HighPriorityWriteLock()
|
||||
defer diffStore.mtx.HighPriorityWriteUnlock()
|
||||
// load the diff data from DB to diffStore.loaded
|
||||
_, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
_, err := diffStore.diffDataByHash(node.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return diffNotFoundError(node)
|
||||
}
|
||||
|
||||
diffStore.loaded[*node.hash].diffChild = diffChild
|
||||
diffStore.setBlockAsDirty(node.hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbTx database.Tx, blockHashes []*daghash.Hash) error {
|
||||
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbContext dbaccess.Context, blockHashes []*daghash.Hash) error {
|
||||
for _, hash := range blockHashes {
|
||||
err := diffStore.removeBlockDiffData(dbTx, hash)
|
||||
err := diffStore.removeBlockDiffData(dbContext, hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -73,11 +68,11 @@ func (diffStore *utxoDiffStore) removeBlocksDiffData(dbTx database.Tx, blockHash
|
||||
return nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) removeBlockDiffData(dbTx database.Tx, blockHash *daghash.Hash) error {
|
||||
func (diffStore *utxoDiffStore) removeBlockDiffData(dbContext dbaccess.Context, blockHash *daghash.Hash) error {
|
||||
diffStore.mtx.LowPriorityWriteLock()
|
||||
defer diffStore.mtx.LowPriorityWriteUnlock()
|
||||
delete(diffStore.loaded, *blockHash)
|
||||
err := dbRemoveDiffData(dbTx, blockHash)
|
||||
err := dbaccess.RemoveDiffData(dbContext, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -88,72 +83,49 @@ func (diffStore *utxoDiffStore) setBlockAsDirty(blockHash *daghash.Hash) {
|
||||
diffStore.dirty[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffDataByHash(hash *daghash.Hash) (*blockUTXODiffData, bool, error) {
|
||||
func (diffStore *utxoDiffStore) diffDataByHash(hash *daghash.Hash) (*blockUTXODiffData, error) {
|
||||
if diffData, ok := diffStore.loaded[*hash]; ok {
|
||||
return diffData, true, nil
|
||||
return diffData, nil
|
||||
}
|
||||
diffData, err := diffStore.diffDataFromDB(hash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
exists := diffData != nil
|
||||
if exists {
|
||||
diffStore.loaded[*hash] = diffData
|
||||
}
|
||||
return diffData, exists, nil
|
||||
}
|
||||
|
||||
func diffNotFoundError(node *blockNode) error {
|
||||
return errors.Errorf("Couldn't find diff data for block %s", node.hash)
|
||||
diffStore.loaded[*hash] = diffData
|
||||
return diffData, nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffByNode(node *blockNode) (*UTXODiff, error) {
|
||||
diffStore.mtx.HighPriorityReadLock()
|
||||
defer diffStore.mtx.HighPriorityReadUnlock()
|
||||
diffData, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
diffData, err := diffStore.diffDataByHash(node.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, diffNotFoundError(node)
|
||||
}
|
||||
return diffData.diff, nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffChildByNode(node *blockNode) (*blockNode, error) {
|
||||
diffStore.mtx.HighPriorityReadLock()
|
||||
defer diffStore.mtx.HighPriorityReadUnlock()
|
||||
diffData, exists, err := diffStore.diffDataByHash(node.hash)
|
||||
diffData, err := diffStore.diffDataByHash(node.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, diffNotFoundError(node)
|
||||
}
|
||||
return diffData.diffChild, nil
|
||||
}
|
||||
|
||||
func (diffStore *utxoDiffStore) diffDataFromDB(hash *daghash.Hash) (*blockUTXODiffData, error) {
|
||||
var diffData *blockUTXODiffData
|
||||
err := diffStore.dag.db.View(func(dbTx database.Tx) error {
|
||||
bucket := dbTx.Metadata().Bucket(utxoDiffsBucketName)
|
||||
serializedBlockDiffData := bucket.Get(hash[:])
|
||||
if serializedBlockDiffData != nil {
|
||||
var err error
|
||||
diffData, err = diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
serializedBlockDiffData, err := dbaccess.FetchUTXODiffData(dbaccess.NoTx(), hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return diffData, nil
|
||||
|
||||
return diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
|
||||
}
|
||||
|
||||
// flushToDB writes all dirty diff data to the database. If all writes
|
||||
// succeed, this clears the dirty set.
|
||||
func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
|
||||
// flushToDB writes all dirty diff data to the database.
|
||||
func (diffStore *utxoDiffStore) flushToDB(dbContext *dbaccess.TxContext) error {
|
||||
diffStore.mtx.HighPriorityWriteLock()
|
||||
defer diffStore.mtx.HighPriorityWriteUnlock()
|
||||
if len(diffStore.dirty) == 0 {
|
||||
@@ -167,7 +139,7 @@ func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
|
||||
hash := hash // Copy hash to a new variable to avoid passing the same pointer
|
||||
buffer.Reset()
|
||||
diffData := diffStore.loaded[hash]
|
||||
err := dbStoreDiffData(dbTx, buffer, &hash, diffData)
|
||||
err := storeDiffData(dbContext, buffer, &hash, diffData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -179,28 +151,18 @@ func (diffStore *utxoDiffStore) clearDirtyEntries() {
|
||||
diffStore.dirty = make(map[daghash.Hash]struct{})
|
||||
}
|
||||
|
||||
// dbStoreDiffData stores the UTXO diff data to the database.
|
||||
// storeDiffData stores the UTXO diff data to the database.
|
||||
// This overwrites the current entry if there exists one.
|
||||
func dbStoreDiffData(dbTx database.Tx, writeBuffer *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error {
|
||||
// To avoid a ton of allocs, use the given writeBuffer
|
||||
func storeDiffData(dbContext dbaccess.Context, w *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error {
|
||||
// To avoid a ton of allocs, use the io.Writer
|
||||
// instead of allocating one. We expect the buffer to
|
||||
// already be initalized and, in most cases, to already
|
||||
// be large enough to accommodate the serialized data
|
||||
// without growing.
|
||||
err := serializeBlockUTXODiffData(writeBuffer, diffData)
|
||||
err := serializeBlockUTXODiffData(w, diffData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Bucket.Put doesn't copy on its own, so we manually
|
||||
// copy here. We do so because we expect the buffer
|
||||
// to be reused once we're done with it.
|
||||
serializedDiffData := make([]byte, writeBuffer.Len())
|
||||
copy(serializedDiffData, writeBuffer.Bytes())
|
||||
|
||||
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Put(hash[:], serializedDiffData)
|
||||
}
|
||||
|
||||
func dbRemoveDiffData(dbTx database.Tx, hash *daghash.Hash) error {
|
||||
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Delete(hash[:])
|
||||
return dbaccess.StoreUTXODiffData(dbContext, hash, w.Bytes())
|
||||
}
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"reflect"
|
||||
@@ -12,7 +11,7 @@ import (
|
||||
|
||||
func TestUTXODiffStore(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -31,9 +30,12 @@ func TestUTXODiffStore(t *testing.T) {
|
||||
// Check that an error is returned when asking for non existing node
|
||||
nonExistingNode := createNode()
|
||||
_, err = dag.utxoDiffStore.diffByNode(nonExistingNode)
|
||||
expectedErrString := fmt.Sprintf("Couldn't find diff data for block %s", nonExistingNode.hash)
|
||||
if err == nil || err.Error() != expectedErrString {
|
||||
t.Errorf("diffByNode: expected error %s but got %s", expectedErrString, err)
|
||||
if !dbaccess.IsNotFoundError(err) {
|
||||
if err != nil {
|
||||
t.Errorf("diffByNode: %s", err)
|
||||
} else {
|
||||
t.Errorf("diffByNode: unexpectedly found diff data")
|
||||
}
|
||||
}
|
||||
|
||||
// Add node's diff data to the utxoDiffStore and check if it's checked correctly.
|
||||
@@ -63,12 +65,19 @@ func TestUTXODiffStore(t *testing.T) {
|
||||
|
||||
// Flush changes to db, delete them from the dag.utxoDiffStore.loaded
|
||||
// map, and check if the diff data is re-fetched from the database.
|
||||
err = dag.db.Update(func(dbTx database.Tx) error {
|
||||
return dag.utxoDiffStore.flushToDB(dbTx)
|
||||
})
|
||||
dbTx, err := dbaccess.NewTx()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open database transaction: %s", err)
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = dag.utxoDiffStore.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to commit database transaction: %s", err)
|
||||
}
|
||||
delete(dag.utxoDiffStore.loaded, *node.hash)
|
||||
|
||||
if storeDiff, err := dag.utxoDiffStore.diffByNode(node); err != nil {
|
||||
|
||||
@@ -69,7 +69,7 @@ func TestSequenceLocksActive(t *testing.T) {
|
||||
// ensure it fails.
|
||||
func TestCheckConnectBlockTemplate(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", Config{
|
||||
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -161,7 +161,7 @@ func TestCheckConnectBlockTemplate(t *testing.T) {
|
||||
// as expected.
|
||||
func TestCheckBlockSanity(t *testing.T) {
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -561,7 +561,7 @@ func TestPastMedianTime(t *testing.T) {
|
||||
|
||||
func TestValidateParents(t *testing.T) {
|
||||
// Create a new database and dag instance to run tests against.
|
||||
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", true, Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestVirtualBlock(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestVirtualBlock", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestVirtualBlock", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -134,7 +134,7 @@ func TestSelectedPath(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestSelectedPath", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestSelectedPath", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -222,7 +222,7 @@ func TestChainUpdates(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestChainUpdates", Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestChainUpdates", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -5,12 +5,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/limits"
|
||||
"github.com/kaspanet/kaspad/logs"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
@@ -27,39 +24,6 @@ var (
|
||||
spawn func(func())
|
||||
)
|
||||
|
||||
// loadBlockDB opens the block database and returns a handle to it.
|
||||
func loadBlockDB() (database.DB, error) {
|
||||
// The database name is based on the database type.
|
||||
dbName := blockDBNamePrefix + "_" + cfg.DBType
|
||||
dbPath := filepath.Join(cfg.DataDir, dbName)
|
||||
|
||||
log.Infof("Loading block database from '%s'", dbPath)
|
||||
db, err := database.Open(cfg.DBType, dbPath, ActiveConfig().NetParams().Net)
|
||||
if err != nil {
|
||||
// Return the error if it's not because the database doesn't
|
||||
// exist.
|
||||
var dbErr database.Error
|
||||
if ok := errors.As(err, &dbErr); !ok || dbErr.ErrorCode !=
|
||||
database.ErrDbDoesNotExist {
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the db if it does not exist.
|
||||
err = os.MkdirAll(cfg.DataDir, 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db, err = database.Create(cfg.DBType, dbPath, ActiveConfig().NetParams().Net)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Block database loaded")
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// realMain is the real main function for the utility. It is necessary to work
|
||||
// around the fact that deferred functions do not run when os.Exit() is called.
|
||||
func realMain() error {
|
||||
@@ -76,14 +40,6 @@ func realMain() error {
|
||||
log = backendLogger.Logger("MAIN")
|
||||
spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
// Load the block database.
|
||||
db, err := loadBlockDB()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to load database: %s", err)
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
fi, err := os.Open(cfg.InFile)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to open file %s: %s", cfg.InFile, err)
|
||||
@@ -94,7 +50,7 @@ func realMain() error {
|
||||
// Create a block importer for the database and input file and start it.
|
||||
// The done channel returned from start will contain an error if
|
||||
// anything went wrong.
|
||||
importer, err := newBlockImporter(db, fi)
|
||||
importer, err := newBlockImporter(fi)
|
||||
if err != nil {
|
||||
log.Errorf("Failed create block importer: %s", err)
|
||||
return err
|
||||
|
||||
@@ -6,20 +6,15 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
flags "github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/config"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
flags "github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDBType = "ffldb"
|
||||
defaultDataFile = "bootstrap.dat"
|
||||
defaultProgress = 10
|
||||
)
|
||||
@@ -27,7 +22,6 @@ const (
|
||||
var (
|
||||
kaspadHomeDir = util.AppDataDir("kaspad", false)
|
||||
defaultDataDir = filepath.Join(kaspadHomeDir, "data")
|
||||
knownDbTypes = database.SupportedDrivers()
|
||||
activeConfig *ConfigFlags
|
||||
)
|
||||
|
||||
@@ -41,7 +35,6 @@ func ActiveConfig() *ConfigFlags {
|
||||
// See loadConfig for details on the configuration load process.
|
||||
type ConfigFlags struct {
|
||||
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
|
||||
DBType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
|
||||
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
|
||||
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
|
||||
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainFromBlock RPC available"`
|
||||
@@ -58,23 +51,11 @@ func fileExists(name string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// validDbType returns whether or not dbType is a supported database type.
|
||||
func validDbType(dbType string) bool {
|
||||
for _, knownType := range knownDbTypes {
|
||||
if dbType == knownType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// loadConfig initializes and parses the config using command line options.
|
||||
func loadConfig() (*ConfigFlags, []string, error) {
|
||||
// Default config.
|
||||
activeConfig = &ConfigFlags{
|
||||
DataDir: defaultDataDir,
|
||||
DBType: defaultDBType,
|
||||
InFile: defaultDataFile,
|
||||
Progress: defaultProgress,
|
||||
}
|
||||
@@ -95,16 +76,6 @@ func loadConfig() (*ConfigFlags, []string, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Validate database type.
|
||||
if !validDbType(activeConfig.DBType) {
|
||||
str := "%s: The specified database type [%s] is invalid -- " +
|
||||
"supported types %s"
|
||||
err := errors.Errorf(str, "loadConfig", activeConfig.DBType, strings.Join(knownDbTypes, ", "))
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
parser.WriteHelp(os.Stderr)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Append the network type to the data directory so it is "namespaced"
|
||||
// per network. In addition to the block database, there are other
|
||||
// pieces of data that are saved to disk such as address manager state.
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
@@ -28,7 +27,6 @@ type importResults struct {
|
||||
// blockImporter houses information about an ongoing import from a block data
|
||||
// file to the block database.
|
||||
type blockImporter struct {
|
||||
db database.DB
|
||||
dag *blockdag.BlockDAG
|
||||
r io.ReadSeeker
|
||||
processQueue chan []byte
|
||||
@@ -287,7 +285,7 @@ func (bi *blockImporter) Import() chan *importResults {
|
||||
|
||||
// newBlockImporter returns a new importer for the provided file reader seeker
|
||||
// and database.
|
||||
func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
|
||||
func newBlockImporter(r io.ReadSeeker) (*blockImporter, error) {
|
||||
// Create the acceptance index if needed.
|
||||
var indexes []indexers.Indexer
|
||||
if cfg.AcceptanceIndex {
|
||||
@@ -302,7 +300,6 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
|
||||
}
|
||||
|
||||
dag, err := blockdag.New(&blockdag.Config{
|
||||
DB: db,
|
||||
DAGParams: ActiveConfig().NetParams(),
|
||||
TimeSource: blockdag.NewTimeSource(),
|
||||
IndexManager: indexManager,
|
||||
@@ -312,7 +309,6 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
|
||||
}
|
||||
|
||||
return &blockImporter{
|
||||
db: db,
|
||||
r: r,
|
||||
processQueue: make(chan []byte, 2),
|
||||
doneChan: make(chan bool),
|
||||
|
||||
@@ -2,8 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
@@ -14,6 +12,7 @@ import (
|
||||
|
||||
"github.com/kaspanet/kaspad/signal"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
"github.com/kaspanet/kaspad/util/profiling"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -35,13 +34,7 @@ func main() {
|
||||
|
||||
// Enable http profiling server if requested.
|
||||
if cfg.Profile != "" {
|
||||
spawn(func() {
|
||||
listenAddr := net.JoinHostPort("", cfg.Profile)
|
||||
log.Infof("Profile server listening on %s", listenAddr)
|
||||
profileRedirect := http.RedirectHandler("/debug/pprof", http.StatusSeeOther)
|
||||
http.Handle("/", profileRedirect)
|
||||
log.Errorf("%s", http.ListenAndServe(listenAddr, nil))
|
||||
})
|
||||
profiling.Start(cfg.Profile, log)
|
||||
}
|
||||
|
||||
client, err := connectToServer(cfg)
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
|
||||
"github.com/btcsuite/go-socks/socks"
|
||||
"github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/network"
|
||||
@@ -46,7 +45,6 @@ const (
|
||||
defaultMaxRPCClients = 10
|
||||
defaultMaxRPCWebsockets = 25
|
||||
defaultMaxRPCConcurrentReqs = 20
|
||||
defaultDbType = "ffldb"
|
||||
defaultBlockMaxMass = 10000000
|
||||
blockMaxMassMin = 1000
|
||||
blockMaxMassMax = 10000000
|
||||
@@ -65,7 +63,6 @@ var (
|
||||
|
||||
defaultConfigFile = filepath.Join(DefaultHomeDir, defaultConfigFilename)
|
||||
defaultDataDir = filepath.Join(DefaultHomeDir, defaultDataDirname)
|
||||
knownDbTypes = database.SupportedDrivers()
|
||||
defaultRPCKeyFile = filepath.Join(DefaultHomeDir, "rpc.key")
|
||||
defaultRPCCertFile = filepath.Join(DefaultHomeDir, "rpc.cert")
|
||||
defaultLogDir = filepath.Join(DefaultHomeDir, defaultLogDirname)
|
||||
@@ -168,17 +165,6 @@ func cleanAndExpandPath(path string) string {
|
||||
return filepath.Clean(os.ExpandEnv(path))
|
||||
}
|
||||
|
||||
// validDbType returns whether or not dbType is a supported database type.
|
||||
func validDbType(dbType string) bool {
|
||||
for _, knownType := range knownDbTypes {
|
||||
if dbType == knownType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// newConfigParser returns a new command line flags parser.
|
||||
func newConfigParser(cfgFlags *Flags, so *serviceOptions, options flags.Options) *flags.Parser {
|
||||
parser := flags.NewParser(cfgFlags, options)
|
||||
@@ -235,7 +221,6 @@ func loadConfig() (*Config, []string, error) {
|
||||
RPCMaxConcurrentReqs: defaultMaxRPCConcurrentReqs,
|
||||
DataDir: defaultDataDir,
|
||||
LogDir: defaultLogDir,
|
||||
DbType: defaultDbType,
|
||||
RPCKey: defaultRPCKeyFile,
|
||||
RPCCert: defaultRPCCertFile,
|
||||
BlockMaxMass: defaultBlockMaxMass,
|
||||
@@ -424,16 +409,6 @@ func loadConfig() (*Config, []string, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Validate database type.
|
||||
if !validDbType(activeConfig.DbType) {
|
||||
str := "%s: The specified database type [%s] is invalid -- " +
|
||||
"supported types %s"
|
||||
err := errors.Errorf(str, funcName, activeConfig.DbType, knownDbTypes)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
fmt.Fprintln(os.Stderr, usageMessage)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Validate profile port number
|
||||
if activeConfig.Profile != "" {
|
||||
profilePort, err := strconv.Atoi(activeConfig.Profile)
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/hdkeychain"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
@@ -177,13 +176,6 @@ type Params struct {
|
||||
|
||||
// Address encoding magics
|
||||
PrivateKeyID byte // First byte of a WIF private key
|
||||
|
||||
// BIP32 hierarchical deterministic extended key magics
|
||||
HDKeyIDPair hdkeychain.HDKeyIDPair
|
||||
|
||||
// BIP44 coin type used in the hierarchical deterministic path for
|
||||
// address generation.
|
||||
HDCoinType uint32
|
||||
}
|
||||
|
||||
// NormalizeRPCServerAddress returns addr with the current network default
|
||||
@@ -238,13 +230,6 @@ var MainnetParams = Params{
|
||||
|
||||
// Address encoding magics
|
||||
PrivateKeyID: 0x80, // starts with 5 (uncompressed) or K (compressed)
|
||||
|
||||
// BIP32 hierarchical deterministic extended key magics
|
||||
HDKeyIDPair: hdkeychain.HDKeyPairMainnet,
|
||||
|
||||
// BIP44 coin type used in the hierarchical deterministic path for
|
||||
// address generation.
|
||||
HDCoinType: 0,
|
||||
}
|
||||
|
||||
// RegressionNetParams defines the network parameters for the regression test
|
||||
@@ -295,13 +280,6 @@ var RegressionNetParams = Params{
|
||||
|
||||
// Address encoding magics
|
||||
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
|
||||
|
||||
// BIP32 hierarchical deterministic extended key magics
|
||||
HDKeyIDPair: hdkeychain.HDKeyPairRegressionNet,
|
||||
|
||||
// BIP44 coin type used in the hierarchical deterministic path for
|
||||
// address generation.
|
||||
HDCoinType: 1,
|
||||
}
|
||||
|
||||
// TestnetParams defines the network parameters for the test Kaspa network.
|
||||
@@ -350,13 +328,6 @@ var TestnetParams = Params{
|
||||
|
||||
// Address encoding magics
|
||||
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
|
||||
|
||||
// BIP32 hierarchical deterministic extended key magics
|
||||
HDKeyIDPair: hdkeychain.HDKeyPairTestnet,
|
||||
|
||||
// BIP44 coin type used in the hierarchical deterministic path for
|
||||
// address generation.
|
||||
HDCoinType: 1,
|
||||
}
|
||||
|
||||
// SimnetParams defines the network parameters for the simulation test Kaspa
|
||||
@@ -409,13 +380,6 @@ var SimnetParams = Params{
|
||||
PrivateKeyID: 0x64, // starts with 4 (uncompressed) or F (compressed)
|
||||
// Human-readable part for Bech32 encoded addresses
|
||||
Prefix: util.Bech32PrefixKaspaSim,
|
||||
|
||||
// BIP32 hierarchical deterministic extended key magics
|
||||
HDKeyIDPair: hdkeychain.HDKeyPairSimnet,
|
||||
|
||||
// BIP44 coin type used in the hierarchical deterministic path for
|
||||
// address generation.
|
||||
HDCoinType: 115, // ASCII for s
|
||||
}
|
||||
|
||||
// DevnetParams defines the network parameters for the development Kaspa network.
|
||||
@@ -464,13 +428,6 @@ var DevnetParams = Params{
|
||||
|
||||
// Address encoding magics
|
||||
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
|
||||
|
||||
// BIP32 hierarchical deterministic extended key magics
|
||||
HDKeyIDPair: hdkeychain.HDKeyPairDevnet,
|
||||
|
||||
// BIP44 coin type used in the hierarchical deterministic path for
|
||||
// address generation.
|
||||
HDCoinType: 1,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package dagconfig_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/kaspad/util/hdkeychain"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
. "github.com/kaspanet/kaspad/dagconfig"
|
||||
@@ -15,10 +12,6 @@ import (
|
||||
var mockNetParams = Params{
|
||||
Name: "mocknet",
|
||||
Net: 1<<32 - 1,
|
||||
HDKeyIDPair: hdkeychain.HDKeyIDPair{
|
||||
PrivateKeyID: [4]byte{0x01, 0x02, 0x03, 0x04},
|
||||
PublicKeyID: [4]byte{0x05, 0x06, 0x07, 0x08},
|
||||
},
|
||||
}
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
@@ -27,16 +20,10 @@ func TestRegister(t *testing.T) {
|
||||
params *Params
|
||||
err error
|
||||
}
|
||||
type hdTest struct {
|
||||
priv []byte
|
||||
want []byte
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
register []registerTest
|
||||
hdMagics []hdTest
|
||||
}{
|
||||
{
|
||||
name: "default networks",
|
||||
@@ -62,40 +49,6 @@ func TestRegister(t *testing.T) {
|
||||
err: ErrDuplicateNet,
|
||||
},
|
||||
},
|
||||
hdMagics: []hdTest{
|
||||
{
|
||||
priv: MainnetParams.HDKeyIDPair.PrivateKeyID[:],
|
||||
want: MainnetParams.HDKeyIDPair.PublicKeyID[:],
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
priv: TestnetParams.HDKeyIDPair.PrivateKeyID[:],
|
||||
want: TestnetParams.HDKeyIDPair.PublicKeyID[:],
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
priv: RegressionNetParams.HDKeyIDPair.PrivateKeyID[:],
|
||||
want: RegressionNetParams.HDKeyIDPair.PublicKeyID[:],
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
priv: SimnetParams.HDKeyIDPair.PrivateKeyID[:],
|
||||
want: SimnetParams.HDKeyIDPair.PublicKeyID[:],
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
priv: mockNetParams.HDKeyIDPair.PrivateKeyID[:],
|
||||
err: hdkeychain.ErrUnknownHDKeyID,
|
||||
},
|
||||
{
|
||||
priv: []byte{0xff, 0xff, 0xff, 0xff},
|
||||
err: hdkeychain.ErrUnknownHDKeyID,
|
||||
},
|
||||
{
|
||||
priv: []byte{0xff},
|
||||
err: hdkeychain.ErrUnknownHDKeyID,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "register mocknet",
|
||||
@@ -106,13 +59,6 @@ func TestRegister(t *testing.T) {
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
hdMagics: []hdTest{
|
||||
{
|
||||
priv: mockNetParams.HDKeyIDPair.PrivateKeyID[:],
|
||||
want: mockNetParams.HDKeyIDPair.PublicKeyID[:],
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "more duplicates",
|
||||
@@ -143,41 +89,6 @@ func TestRegister(t *testing.T) {
|
||||
err: ErrDuplicateNet,
|
||||
},
|
||||
},
|
||||
hdMagics: []hdTest{
|
||||
{
|
||||
priv: MainnetParams.HDKeyIDPair.PrivateKeyID[:],
|
||||
want: MainnetParams.HDKeyIDPair.PublicKeyID[:],
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
priv: TestnetParams.HDKeyIDPair.PrivateKeyID[:],
|
||||
want: TestnetParams.HDKeyIDPair.PublicKeyID[:],
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
priv: RegressionNetParams.HDKeyIDPair.PrivateKeyID[:],
|
||||
want: RegressionNetParams.HDKeyIDPair.PublicKeyID[:],
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
priv: SimnetParams.HDKeyIDPair.PrivateKeyID[:],
|
||||
want: SimnetParams.HDKeyIDPair.PublicKeyID[:],
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
priv: mockNetParams.HDKeyIDPair.PrivateKeyID[:],
|
||||
want: mockNetParams.HDKeyIDPair.PublicKeyID[:],
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
priv: []byte{0xff, 0xff, 0xff, 0xff},
|
||||
err: hdkeychain.ErrUnknownHDKeyID,
|
||||
},
|
||||
{
|
||||
priv: []byte{0xff},
|
||||
err: hdkeychain.ErrUnknownHDKeyID,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -185,25 +96,10 @@ func TestRegister(t *testing.T) {
|
||||
for _, regtest := range test.register {
|
||||
err := Register(regtest.params)
|
||||
|
||||
// HDKeyIDPairs must be registered separately
|
||||
hdkeychain.RegisterHDKeyIDPair(regtest.params.HDKeyIDPair)
|
||||
|
||||
if err != regtest.err {
|
||||
t.Errorf("%s:%s: Registered network with unexpected error: got %v expected %v",
|
||||
test.name, regtest.name, err, regtest.err)
|
||||
}
|
||||
}
|
||||
for i, magTest := range test.hdMagics {
|
||||
pubKey, err := hdkeychain.HDPrivateKeyToPublicKeyID(magTest.priv[:])
|
||||
if !reflect.DeepEqual(err, magTest.err) {
|
||||
t.Errorf("%s: HD magic %d mismatched error: got %v expected %v ",
|
||||
test.name, i, err, magTest.err)
|
||||
continue
|
||||
}
|
||||
if magTest.err == nil && !bytes.Equal(pubKey, magTest.want[:]) {
|
||||
t.Errorf("%s: HD magic %d private and public mismatch: got %v expected %v ",
|
||||
test.name, i, pubKey, magTest.want[:])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,29 +4,35 @@ database
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad/database)
|
||||
|
||||
Package database provides a block and metadata storage database.
|
||||
Package database provides a database for kaspad.
|
||||
|
||||
Please note that this package is intended to enable kaspad to support different
|
||||
database backends and is not something that a client can directly access as only
|
||||
one entity can have the database open at a time (for most database backends),
|
||||
and that entity will be kaspad.
|
||||
Overview
|
||||
--------
|
||||
This package provides a database layer to store and retrieve data in a simple
|
||||
and efficient manner.
|
||||
|
||||
When a client wants programmatic access to the data provided by kaspad, they'll
|
||||
likely want to use the [rpcclient](https://github.com/kaspanet/kaspad/tree/master/rpcclient)
|
||||
package which makes use of the [JSON-RPC API](https://github.com/kaspanet/kaspad/tree/master/docs/json_rpc_api.md).
|
||||
The current backend is ffldb, which makes use of leveldb, flat files, and strict
|
||||
checksums in key areas to ensure data integrity.
|
||||
|
||||
The default backend, ffldb, has a strong focus on speed, efficiency, and
|
||||
robustness. It makes use of leveldb for the metadata, flat files for block
|
||||
storage, and strict checksums in key areas to ensure data integrity.
|
||||
Implementors of additional backends are required to implement the following interfaces:
|
||||
|
||||
## Feature Overview
|
||||
DataAccessor
|
||||
------------
|
||||
This defines the common interface by which data gets accessed in a generic kaspad
|
||||
database. Both the Database and the Transaction interfaces (see below) implement it.
|
||||
|
||||
- Key/value metadata store
|
||||
- Kaspa block storage
|
||||
- Efficient retrieval of block headers and regions (transactions, scripts, etc)
|
||||
- Read-only and read-write transactions with both manual and managed modes
|
||||
- Nested buckets
|
||||
- Iteration support including cursors with seek capability
|
||||
- Supports registration of backend databases
|
||||
- Comprehensive test coverage
|
||||
Database
|
||||
--------
|
||||
This defines the interface of a database that can begin transactions and close itself.
|
||||
|
||||
Transaction
|
||||
-----------
|
||||
This defines the interface of a generic kaspad database transaction.
|
||||
|
||||
Note: Transactions provide data consistency over the state of the database as it was
|
||||
when the transaction started. There is NO guarantee that if one puts data into the
|
||||
transaction then it will be available to get within the same transaction.
|
||||
|
||||
Cursor
|
||||
------
|
||||
This iterates over database entries given some bucket.
|
||||
|
||||
51
database/bucket.go
Normal file
51
database/bucket.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package database
|
||||
|
||||
import "bytes"
|
||||
|
||||
var separator = []byte("/")
|
||||
|
||||
// Bucket is a helper type meant to combine buckets,
|
||||
// sub-buckets, and keys into a single full key-value
|
||||
// database key.
|
||||
type Bucket struct {
|
||||
path [][]byte
|
||||
}
|
||||
|
||||
// MakeBucket creates a new Bucket using the given path
|
||||
// of buckets.
|
||||
func MakeBucket(path ...[]byte) *Bucket {
|
||||
return &Bucket{path: path}
|
||||
}
|
||||
|
||||
// Bucket returns the sub-bucket of the current bucket
|
||||
// defined by bucketBytes.
|
||||
func (b *Bucket) Bucket(bucketBytes []byte) *Bucket {
|
||||
newPath := make([][]byte, len(b.path)+1)
|
||||
copy(newPath, b.path)
|
||||
copy(newPath[len(b.path):], [][]byte{bucketBytes})
|
||||
|
||||
return MakeBucket(newPath...)
|
||||
}
|
||||
|
||||
// Key returns the key inside of the current bucket.
|
||||
func (b *Bucket) Key(key []byte) []byte {
|
||||
bucketPath := b.Path()
|
||||
|
||||
fullKeyLength := len(bucketPath) + len(key)
|
||||
fullKey := make([]byte, fullKeyLength)
|
||||
copy(fullKey, bucketPath)
|
||||
copy(fullKey[len(bucketPath):], key)
|
||||
|
||||
return fullKey
|
||||
}
|
||||
|
||||
// Path returns the full path of the current bucket.
|
||||
func (b *Bucket) Path() []byte {
|
||||
bucketPath := bytes.Join(b.path, separator)
|
||||
|
||||
bucketPathWithFinalSeparator := make([]byte, len(bucketPath)+len(separator))
|
||||
copy(bucketPathWithFinalSeparator, bucketPath)
|
||||
copy(bucketPathWithFinalSeparator[len(bucketPath):], separator)
|
||||
|
||||
return bucketPathWithFinalSeparator
|
||||
}
|
||||
69
database/bucket_test.go
Normal file
69
database/bucket_test.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBucketPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
bucketByteSlices [][]byte
|
||||
expectedPath []byte
|
||||
}{
|
||||
{
|
||||
bucketByteSlices: [][]byte{[]byte("hello")},
|
||||
expectedPath: []byte("hello/"),
|
||||
},
|
||||
{
|
||||
bucketByteSlices: [][]byte{[]byte("hello"), []byte("world")},
|
||||
expectedPath: []byte("hello/world/"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Build a result using the MakeBucket function alone
|
||||
resultKey := MakeBucket(test.bucketByteSlices...).Path()
|
||||
if !reflect.DeepEqual(resultKey, test.expectedPath) {
|
||||
t.Errorf("TestBucketPath: got wrong path using MakeBucket. "+
|
||||
"Want: %s, got: %s", string(test.expectedPath), string(resultKey))
|
||||
}
|
||||
|
||||
// Build a result using sub-Bucket calls
|
||||
bucket := MakeBucket()
|
||||
for _, bucketBytes := range test.bucketByteSlices {
|
||||
bucket = bucket.Bucket(bucketBytes)
|
||||
}
|
||||
resultKey = bucket.Path()
|
||||
if !reflect.DeepEqual(resultKey, test.expectedPath) {
|
||||
t.Errorf("TestBucketPath: got wrong path using sub-Bucket "+
|
||||
"calls. Want: %s, got: %s", string(test.expectedPath), string(resultKey))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBucketKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
bucketByteSlices [][]byte
|
||||
key []byte
|
||||
expectedKey []byte
|
||||
}{
|
||||
{
|
||||
bucketByteSlices: [][]byte{[]byte("hello")},
|
||||
key: []byte("test"),
|
||||
expectedKey: []byte("hello/test"),
|
||||
},
|
||||
{
|
||||
bucketByteSlices: [][]byte{[]byte("hello"), []byte("world")},
|
||||
key: []byte("test"),
|
||||
expectedKey: []byte("hello/world/test"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resultKey := MakeBucket(test.bucketByteSlices...).Key(test.key)
|
||||
if !reflect.DeepEqual(resultKey, test.expectedKey) {
|
||||
t.Errorf("TestBucketKey: got wrong key. Want: %s, got: %s",
|
||||
string(test.expectedKey), string(resultKey))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// fetchBlockCmd defines the configuration options for the fetchblock command.
|
||||
type fetchBlockCmd struct{}
|
||||
|
||||
var (
|
||||
// fetchBlockCfg defines the configuration options for the command.
|
||||
fetchBlockCfg = fetchBlockCmd{}
|
||||
)
|
||||
|
||||
// Execute is the main entry point for the command. It's invoked by the parser.
|
||||
func (cmd *fetchBlockCmd) Execute(args []string) error {
|
||||
// Setup the global config options and ensure they are valid.
|
||||
if err := setupGlobalConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
return errors.New("required block hash parameter not specified")
|
||||
}
|
||||
blockHash, err := daghash.NewHashFromStr(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load the block database.
|
||||
db, err := loadBlockDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
return db.View(func(dbTx database.Tx) error {
|
||||
log.Infof("Fetching block %s", blockHash)
|
||||
startTime := time.Now()
|
||||
blockBytes, err := dbTx.FetchBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("Loaded block in %s", time.Since(startTime))
|
||||
log.Infof("Block Hex: %s", hex.EncodeToString(blockBytes))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Usage overrides the usage display for the command.
|
||||
func (cmd *fetchBlockCmd) Usage() string {
|
||||
return "<block-hash>"
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/pkg/errors"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// blockRegionCmd defines the configuration options for the fetchblockregion
|
||||
// command.
|
||||
type blockRegionCmd struct{}
|
||||
|
||||
var (
|
||||
// blockRegionCfg defines the configuration options for the command.
|
||||
blockRegionCfg = blockRegionCmd{}
|
||||
)
|
||||
|
||||
// Execute is the main entry point for the command. It's invoked by the parser.
|
||||
func (cmd *blockRegionCmd) Execute(args []string) error {
|
||||
// Setup the global config options and ensure they are valid.
|
||||
if err := setupGlobalConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure expected arguments.
|
||||
if len(args) < 1 {
|
||||
return errors.New("required block hash parameter not specified")
|
||||
}
|
||||
if len(args) < 2 {
|
||||
return errors.New("required start offset parameter not " +
|
||||
"specified")
|
||||
}
|
||||
if len(args) < 3 {
|
||||
return errors.New("required region length parameter not " +
|
||||
"specified")
|
||||
}
|
||||
|
||||
// Parse arguments.
|
||||
blockHash, err := daghash.NewHashFromStr(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
startOffset, err := strconv.ParseUint(args[1], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
regionLen, err := strconv.ParseUint(args[2], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load the block database.
|
||||
db, err := loadBlockDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
return db.View(func(dbTx database.Tx) error {
|
||||
log.Infof("Fetching block region %s<%d:%d>", blockHash,
|
||||
startOffset, startOffset+regionLen-1)
|
||||
region := database.BlockRegion{
|
||||
Hash: blockHash,
|
||||
Offset: uint32(startOffset),
|
||||
Len: uint32(regionLen),
|
||||
}
|
||||
startTime := time.Now()
|
||||
regionBytes, err := dbTx.FetchBlockRegion(®ion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("Loaded block region in %s", time.Since(startTime))
|
||||
log.Infof("Double Hash: %s", daghash.DoubleHashH(regionBytes))
|
||||
log.Infof("Region Hex: %s", hex.EncodeToString(regionBytes))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Usage overrides the usage display for the command.
|
||||
func (cmd *blockRegionCmd) Usage() string {
|
||||
return "<block-hash> <start-offset> <length-of-region>"
|
||||
}
|
||||
@@ -1,111 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
var (
|
||||
kaspadHomeDir = util.AppDataDir("kaspad", false)
|
||||
knownDbTypes = database.SupportedDrivers()
|
||||
activeNetParams = &dagconfig.MainnetParams
|
||||
|
||||
// Default global config.
|
||||
cfg = &config{
|
||||
DataDir: filepath.Join(kaspadHomeDir, "data"),
|
||||
DbType: "ffldb",
|
||||
}
|
||||
)
|
||||
|
||||
// config defines the global configuration options.
|
||||
type config struct {
|
||||
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
|
||||
DbType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
|
||||
Testnet bool `long:"testnet" description:"Use the test network"`
|
||||
RegressionTest bool `long:"regtest" description:"Use the regression test network"`
|
||||
Simnet bool `long:"simnet" description:"Use the simulation test network"`
|
||||
Devnet bool `long:"devnet" description:"Use the development test network"`
|
||||
}
|
||||
|
||||
// fileExists reports whether the named file or directory exists.
|
||||
func fileExists(name string) bool {
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// validDbType returns whether or not dbType is a supported database type.
|
||||
func validDbType(dbType string) bool {
|
||||
for _, knownType := range knownDbTypes {
|
||||
if dbType == knownType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// setupGlobalConfig examine the global configuration options for any conditions
|
||||
// which are invalid as well as performs any addition setup necessary after the
|
||||
// initial parse.
|
||||
func setupGlobalConfig() error {
|
||||
// Multiple networks can't be selected simultaneously.
|
||||
// Count number of network flags passed; assign active network params
|
||||
// while we're at it
|
||||
numNets := 0
|
||||
if cfg.Testnet {
|
||||
numNets++
|
||||
activeNetParams = &dagconfig.TestnetParams
|
||||
}
|
||||
if cfg.RegressionTest {
|
||||
numNets++
|
||||
activeNetParams = &dagconfig.RegressionNetParams
|
||||
}
|
||||
if cfg.Simnet {
|
||||
numNets++
|
||||
activeNetParams = &dagconfig.SimnetParams
|
||||
}
|
||||
if cfg.Devnet {
|
||||
numNets++
|
||||
activeNetParams = &dagconfig.DevnetParams
|
||||
}
|
||||
if numNets > 1 {
|
||||
return errors.New("The testnet, regtest, simnet and devnet params " +
|
||||
"can't be used together -- choose one of the four")
|
||||
}
|
||||
|
||||
if numNets == 0 {
|
||||
return errors.New("Mainnet has not launched yet, use --testnet to run in testnet mode")
|
||||
}
|
||||
|
||||
// Validate database type.
|
||||
if !validDbType(cfg.DbType) {
|
||||
str := "The specified database type [%s] is invalid -- " +
|
||||
"supported types: %s"
|
||||
return errors.Errorf(str, cfg.DbType, strings.Join(knownDbTypes, ", "))
|
||||
}
|
||||
|
||||
// Append the network type to the data directory so it is "namespaced"
|
||||
// per network. In addition to the block database, there are other
|
||||
// pieces of data that are saved to disk such as address manager state.
|
||||
// All data is specific to a network, so namespacing the data directory
|
||||
// means each individual piece of serialized data does not have to
|
||||
// worry about changing names per network and such.
|
||||
cfg.DataDir = filepath.Join(cfg.DataDir, activeNetParams.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
"github.com/kaspanet/kaspad/logs"
|
||||
)
|
||||
|
||||
const (
|
||||
// blockDbNamePrefix is the prefix for the kaspad block database.
|
||||
blockDbNamePrefix = "blocks"
|
||||
)
|
||||
|
||||
var (
|
||||
log *logs.Logger
|
||||
spawn func(func())
|
||||
shutdownChannel = make(chan error)
|
||||
)
|
||||
|
||||
// loadBlockDB opens the block database and returns a handle to it.
|
||||
func loadBlockDB() (database.DB, error) {
|
||||
// The database name is based on the database type.
|
||||
dbName := blockDbNamePrefix + "_" + cfg.DbType
|
||||
dbPath := filepath.Join(cfg.DataDir, dbName)
|
||||
|
||||
log.Infof("Loading block database from '%s'", dbPath)
|
||||
db, err := database.Open(cfg.DbType, dbPath, activeNetParams.Net)
|
||||
if err != nil {
|
||||
// Return the error if it's not because the database doesn't
|
||||
// exist.
|
||||
var dbErr database.Error
|
||||
if ok := errors.As(err, &dbErr); !ok || dbErr.ErrorCode !=
|
||||
database.ErrDbDoesNotExist {
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the db if it does not exist.
|
||||
err = os.MkdirAll(cfg.DataDir, 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db, err = database.Create(cfg.DbType, dbPath, activeNetParams.Net)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Block database loaded")
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// realMain is the real main function for the utility. It is necessary to work
|
||||
// around the fact that deferred functions do not run when os.Exit() is called.
|
||||
func realMain() error {
|
||||
// Setup logging.
|
||||
backendLogger := logs.NewBackend()
|
||||
defer os.Stdout.Sync()
|
||||
log = backendLogger.Logger("MAIN")
|
||||
spawn = panics.GoroutineWrapperFunc(log)
|
||||
dbLog, _ := logger.Get(logger.SubsystemTags.KSDB)
|
||||
dbLog.SetLevel(logs.LevelDebug)
|
||||
|
||||
// Setup the parser options and commands.
|
||||
appName := filepath.Base(os.Args[0])
|
||||
appName = strings.TrimSuffix(appName, filepath.Ext(appName))
|
||||
parserFlags := flags.Options(flags.HelpFlag | flags.PassDoubleDash)
|
||||
parser := flags.NewNamedParser(appName, parserFlags)
|
||||
parser.AddGroup("Global Options", "", cfg)
|
||||
parser.AddCommand("fetchblock",
|
||||
"Fetch the specific block hash from the database", "",
|
||||
&fetchBlockCfg)
|
||||
parser.AddCommand("fetchblockregion",
|
||||
"Fetch the specified block region from the database", "",
|
||||
&blockRegionCfg)
|
||||
|
||||
// Parse command line and invoke the Execute function for the specified
|
||||
// command.
|
||||
if _, err := parser.Parse(); err != nil {
|
||||
var flagsErr *flags.Error
|
||||
if ok := errors.As(err, &flagsErr); ok && flagsErr.Type == flags.ErrHelp {
|
||||
parser.WriteHelp(os.Stderr)
|
||||
} else {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Use all processor cores.
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
// Work around defer not working after os.Exit()
|
||||
if err := realMain(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,82 +0,0 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
)
|
||||
|
||||
// interruptChannel is used to receive SIGINT (Ctrl+C) signals.
|
||||
var interruptChannel chan os.Signal
|
||||
|
||||
// addHandlerChannel is used to add an interrupt handler to the list of handlers
|
||||
// to be invoked on SIGINT (Ctrl+C) signals.
|
||||
var addHandlerChannel = make(chan func())
|
||||
|
||||
// mainInterruptHandler listens for SIGINT (Ctrl+C) signals on the
|
||||
// interruptChannel and invokes the registered interruptCallbacks accordingly.
|
||||
// It also listens for callback registration. It must be run as a goroutine.
|
||||
func mainInterruptHandler() {
|
||||
// interruptCallbacks is a list of callbacks to invoke when a
|
||||
// SIGINT (Ctrl+C) is received.
|
||||
var interruptCallbacks []func()
|
||||
|
||||
// isShutdown is a flag which is used to indicate whether or not
|
||||
// the shutdown signal has already been received and hence any future
|
||||
// attempts to add a new interrupt handler should invoke them
|
||||
// immediately.
|
||||
var isShutdown bool
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-interruptChannel:
|
||||
// Ignore more than one shutdown signal.
|
||||
if isShutdown {
|
||||
log.Infof("Received SIGINT (Ctrl+C). " +
|
||||
"Already shutting down...")
|
||||
continue
|
||||
}
|
||||
|
||||
isShutdown = true
|
||||
log.Infof("Received SIGINT (Ctrl+C). Shutting down...")
|
||||
|
||||
// Run handlers in LIFO order.
|
||||
for i := range interruptCallbacks {
|
||||
idx := len(interruptCallbacks) - 1 - i
|
||||
callback := interruptCallbacks[idx]
|
||||
callback()
|
||||
}
|
||||
|
||||
// Signal the main goroutine to shutdown.
|
||||
spawn(func() {
|
||||
shutdownChannel <- nil
|
||||
})
|
||||
|
||||
case handler := <-addHandlerChannel:
|
||||
// The shutdown signal has already been received, so
|
||||
// just invoke and new handlers immediately.
|
||||
if isShutdown {
|
||||
handler()
|
||||
}
|
||||
|
||||
interruptCallbacks = append(interruptCallbacks, handler)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// addInterruptHandler adds a handler to call when a SIGINT (Ctrl+C) is
|
||||
// received.
|
||||
func addInterruptHandler(handler func()) {
|
||||
// Create the channel and start the main interrupt handler which invokes
|
||||
// all other callbacks and exits if not already done.
|
||||
if interruptChannel == nil {
|
||||
interruptChannel = make(chan os.Signal, 1)
|
||||
signal.Notify(interruptChannel, os.Interrupt)
|
||||
spawn(mainInterruptHandler)
|
||||
}
|
||||
|
||||
addHandlerChannel <- handler
|
||||
}
|
||||
31
database/cursor.go
Normal file
31
database/cursor.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package database
|
||||
|
||||
// Cursor iterates over database entries given some bucket.
|
||||
type Cursor interface {
|
||||
// Next moves the iterator to the next key/value pair. It returns whether the
|
||||
// iterator is exhausted. Returns false if the cursor is closed.
|
||||
Next() bool
|
||||
|
||||
// First moves the iterator to the first key/value pair. It returns false if
|
||||
// such a pair does not exist or if the cursor is closed.
|
||||
First() bool
|
||||
|
||||
// Seek moves the iterator to the first key/value pair whose key is greater
|
||||
// than or equal to the given key. It returns ErrNotFound if such pair does not
|
||||
// exist.
|
||||
Seek(key []byte) error
|
||||
|
||||
// Key returns the key of the current key/value pair, or ErrNotFound if done.
|
||||
// Note that the key is trimmed to not include the prefix the cursor was opened
|
||||
// with. The caller should not modify the contents of the returned slice, and
|
||||
// its contents may change on the next call to Next.
|
||||
Key() ([]byte, error)
|
||||
|
||||
// Value returns the value of the current key/value pair, or ErrNotFound if done.
|
||||
// The caller should not modify the contents of the returned slice, and its
|
||||
// contents may change on the next call to Next.
|
||||
Value() ([]byte, error)
|
||||
|
||||
// Close releases associated resources.
|
||||
Close() error
|
||||
}
|
||||
36
database/dataaccessor.go
Normal file
36
database/dataaccessor.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package database
|
||||
|
||||
// DataAccessor defines the common interface by which data gets
|
||||
// accessed in a generic kaspad database.
|
||||
type DataAccessor interface {
|
||||
// Put sets the value for the given key. It overwrites
|
||||
// any previous value for that key.
|
||||
Put(key []byte, value []byte) error
|
||||
|
||||
// Get gets the value for the given key. It returns
|
||||
// ErrNotFound if the given key does not exist.
|
||||
Get(key []byte) ([]byte, error)
|
||||
|
||||
// Has returns true if the database does contains the
|
||||
// given key.
|
||||
Has(key []byte) (bool, error)
|
||||
|
||||
// Delete deletes the value for the given key. Will not
|
||||
// return an error if the key doesn't exist.
|
||||
Delete(key []byte) error
|
||||
|
||||
// AppendToStore appends the given data to the store
|
||||
// defined by storeName. This function returns a serialized
|
||||
// location handle that's meant to be stored and later used
|
||||
// when querying the data that has just now been inserted.
|
||||
AppendToStore(storeName string, data []byte) ([]byte, error)
|
||||
|
||||
// RetrieveFromStore retrieves data from the store defined by
|
||||
// storeName using the given serialized location handle. It
|
||||
// returns ErrNotFound if the location does not exist. See
|
||||
// AppendToStore for further details.
|
||||
RetrieveFromStore(storeName string, location []byte) ([]byte, error)
|
||||
|
||||
// Cursor begins a new cursor over the given bucket.
|
||||
Cursor(bucket []byte) (Cursor, error)
|
||||
}
|
||||
19
database/database.go
Normal file
19
database/database.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package database
|
||||
|
||||
// Database defines the interface of a database that can begin
|
||||
// transactions and close itself.
|
||||
//
|
||||
// Important: This is not part of the DataAccessor interface
|
||||
// because the Transaction interface includes it. Were we to
|
||||
// merge Database with DataAccessor, implementors of the
|
||||
// Transaction interface would be forced to implement methods
|
||||
// such as Begin and Close, which is undesirable.
|
||||
type Database interface {
|
||||
DataAccessor
|
||||
|
||||
// Begin begins a new database transaction.
|
||||
Begin() (Transaction, error)
|
||||
|
||||
// Close closes the database.
|
||||
Close() error
|
||||
}
|
||||
@@ -1,85 +1,34 @@
|
||||
/*
|
||||
Package database provides a block and metadata storage database.
|
||||
Package database provides a database for kaspad.
|
||||
|
||||
Overview
|
||||
|
||||
This package provides a database layer to store and retrieve this data in a
|
||||
simple and efficient manner.
|
||||
This package provides a database layer to store and retrieve data in a simple
|
||||
and efficient manner.
|
||||
|
||||
The default backend, ffldb, has a strong focus on speed, efficiency, and
|
||||
robustness. It makes use leveldb for the metadata, flat files for block
|
||||
storage, and strict checksums in key areas to ensure data integrity.
|
||||
The current backend is ffldb, which makes use of leveldb, flat files, and strict
|
||||
checksums in key areas to ensure data integrity.
|
||||
|
||||
A quick overview of the features database provides are as follows:
|
||||
Implementors of additional backends are required to implement the following interfaces:
|
||||
|
||||
- Key/value metadata store
|
||||
- Kaspa block storage
|
||||
- Efficient retrieval of block headers and regions (transactions, scripts, etc)
|
||||
- Read-only and read-write transactions with both manual and managed modes
|
||||
- Nested buckets
|
||||
- Supports registration of backend databases
|
||||
- Comprehensive test coverage
|
||||
DataAccessor
|
||||
|
||||
This defines the common interface by which data gets accessed in a generic kaspad
|
||||
database. Both the Database and the Transaction interfaces (see below) implement it.
|
||||
|
||||
Database
|
||||
|
||||
The main entry point is the DB interface. It exposes functionality for
|
||||
transactional-based access and storage of metadata and block data. It is
|
||||
obtained via the Create and Open functions which take a database type string
|
||||
that identifies the specific database driver (backend) to use as well as
|
||||
arguments specific to the specified driver.
|
||||
This defines the interface of a database that can begin transactions and close itself.
|
||||
|
||||
The interface provides facilities for obtaining transactions (the Tx interface)
|
||||
that are the basis of all database reads and writes. Unlike some database
|
||||
interfaces that support reading and writing without transactions, this interface
|
||||
requires transactions even when only reading or writing a single key.
|
||||
Transaction
|
||||
|
||||
The Begin function provides an unmanaged transaction while the View and Update
|
||||
functions provide a managed transaction. These are described in more detail
|
||||
below.
|
||||
This defines the interface of a generic kaspad database transaction.
|
||||
Note: transactions provide data consistency over the state of the database as it was
|
||||
when the transaction started. There is NO guarantee that if one puts data into the
|
||||
transaction then it will be available to get within the same transaction.
|
||||
|
||||
Transactions
|
||||
Cursor
|
||||
|
||||
The Tx interface provides facilities for rolling back or committing changes that
|
||||
took place while the transaction was active. It also provides the root metadata
|
||||
bucket under which all keys, values, and nested buckets are stored. A
|
||||
transaction can either be read-only or read-write and managed or unmanaged.
|
||||
|
||||
Managed versus Unmanaged Transactions
|
||||
|
||||
A managed transaction is one where the caller provides a function to execute
|
||||
within the context of the transaction and the commit or rollback is handled
|
||||
automatically depending on whether or not the provided function returns an
|
||||
error. Attempting to manually call Rollback or Commit on the managed
|
||||
transaction will result in a panic.
|
||||
|
||||
An unmanaged transaction, on the other hand, requires the caller to manually
|
||||
call Commit or Rollback when they are finished with it. Leaving transactions
|
||||
open for long periods of time can have several adverse effects, so it is
|
||||
recommended that managed transactions are used instead.
|
||||
|
||||
Buckets
|
||||
|
||||
The Bucket interface provides the ability to manipulate key/value pairs and
|
||||
nested buckets as well as iterate through them.
|
||||
|
||||
The Get, Put, and Delete functions work with key/value pairs, while the Bucket,
|
||||
CreateBucket, CreateBucketIfNotExists, and DeleteBucket functions work with
|
||||
buckets. The ForEach function allows the caller to provide a function to be
|
||||
called with each key/value pair and nested bucket in the current bucket.
|
||||
|
||||
Metadata Bucket
|
||||
|
||||
As discussed above, all of the functions which are used to manipulate key/value
|
||||
pairs and nested buckets exist on the Bucket interface. The root metadata
|
||||
bucket is the upper-most bucket in which data is stored and is created at the
|
||||
same time as the database. Use the Metadata function on the Tx interface
|
||||
to retrieve it.
|
||||
|
||||
Nested Buckets
|
||||
|
||||
The CreateBucket and CreateBucketIfNotExists functions on the Bucket interface
|
||||
provide the ability to create an arbitrary number of nested buckets. It is
|
||||
a good idea to avoid a lot of buckets with little data in them as it could lead
|
||||
to poor page utilization depending on the specific driver in use.
|
||||
This iterates over database entries given some bucket.
|
||||
*/
|
||||
package database
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Driver defines a structure for backend drivers to use when they registered
|
||||
// themselves as a backend which implements the DB interface.
|
||||
type Driver struct {
|
||||
// DbType is the identifier used to uniquely identify a specific
|
||||
// database driver. There can be only one driver with the same name.
|
||||
DbType string
|
||||
|
||||
// Create is the function that will be invoked with all user-specified
|
||||
// arguments to create the database. This function must return
|
||||
// ErrDbExists if the database already exists.
|
||||
Create func(args ...interface{}) (DB, error)
|
||||
|
||||
// Open is the function that will be invoked with all user-specified
|
||||
// arguments to open the database. This function must return
|
||||
// ErrDbDoesNotExist if the database has not already been created.
|
||||
Open func(args ...interface{}) (DB, error)
|
||||
}
|
||||
|
||||
// driverList holds all of the registered database backends.
|
||||
var drivers = make(map[string]*Driver)
|
||||
|
||||
// RegisterDriver adds a backend database driver to available interfaces.
|
||||
// ErrDbTypeRegistered will be returned if the database type for the driver has
|
||||
// already been registered.
|
||||
func RegisterDriver(driver Driver) error {
|
||||
if _, exists := drivers[driver.DbType]; exists {
|
||||
str := fmt.Sprintf("driver %q is already registered",
|
||||
driver.DbType)
|
||||
return makeError(ErrDbTypeRegistered, str, nil)
|
||||
}
|
||||
|
||||
drivers[driver.DbType] = &driver
|
||||
return nil
|
||||
}
|
||||
|
||||
// SupportedDrivers returns a slice of strings that represent the database
|
||||
// drivers that have been registered and are therefore supported.
|
||||
func SupportedDrivers() []string {
|
||||
supportedDBs := make([]string, 0, len(drivers))
|
||||
for _, drv := range drivers {
|
||||
supportedDBs = append(supportedDBs, drv.DbType)
|
||||
}
|
||||
return supportedDBs
|
||||
}
|
||||
|
||||
// Create initializes and opens a database for the specified type. The
|
||||
// arguments are specific to the database type driver. See the documentation
|
||||
// for the database driver for further details.
|
||||
//
|
||||
// ErrDbUnknownType will be returned if the the database type is not registered.
|
||||
func Create(dbType string, args ...interface{}) (DB, error) {
|
||||
drv, exists := drivers[dbType]
|
||||
if !exists {
|
||||
str := fmt.Sprintf("driver %q is not registered", dbType)
|
||||
return nil, makeError(ErrDbUnknownType, str, nil)
|
||||
}
|
||||
|
||||
return drv.Create(args...)
|
||||
}
|
||||
|
||||
// Open opens an existing database for the specified type. The arguments are
|
||||
// specific to the database type driver. See the documentation for the database
|
||||
// driver for further details.
|
||||
//
|
||||
// ErrDbUnknownType will be returned if the the database type is not registered.
|
||||
func Open(dbType string, args ...interface{}) (DB, error) {
|
||||
drv, exists := drivers[dbType]
|
||||
if !exists {
|
||||
str := fmt.Sprintf("driver %q is not registered", dbType)
|
||||
return nil, makeError(ErrDbUnknownType, str, nil)
|
||||
}
|
||||
|
||||
return drv.Open(args...)
|
||||
}
|
||||
@@ -1,128 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package database_test
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
)
|
||||
|
||||
// checkDbError ensures the passed error is a database.Error with an error code
|
||||
// that matches the passed error code.
|
||||
func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool {
|
||||
dbErr, ok := gotErr.(database.Error)
|
||||
if !ok {
|
||||
t.Errorf("%s: unexpected error type - got %T, want %T",
|
||||
testName, gotErr, database.Error{})
|
||||
return false
|
||||
}
|
||||
if dbErr.ErrorCode != wantErrCode {
|
||||
t.Errorf("%s: unexpected error code - got %s (%s), want %s",
|
||||
testName, dbErr.ErrorCode, dbErr.Description,
|
||||
wantErrCode)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestAddDuplicateDriver ensures that adding a duplicate driver does not
|
||||
// overwrite an existing one.
|
||||
func TestAddDuplicateDriver(t *testing.T) {
|
||||
supportedDrivers := database.SupportedDrivers()
|
||||
if len(supportedDrivers) == 0 {
|
||||
t.Errorf("no backends to test")
|
||||
return
|
||||
}
|
||||
dbType := supportedDrivers[0]
|
||||
|
||||
// bogusCreateDB is a function which acts as a bogus create and open
|
||||
// driver function and intentionally returns a failure that can be
|
||||
// detected if the interface allows a duplicate driver to overwrite an
|
||||
// existing one.
|
||||
bogusCreateDB := func(args ...interface{}) (database.DB, error) {
|
||||
return nil, errors.Errorf("duplicate driver allowed for database "+
|
||||
"type [%v]", dbType)
|
||||
}
|
||||
|
||||
// Create a driver that tries to replace an existing one. Set its
|
||||
// create and open functions to a function that causes a test failure if
|
||||
// they are invoked.
|
||||
driver := database.Driver{
|
||||
DbType: dbType,
|
||||
Create: bogusCreateDB,
|
||||
Open: bogusCreateDB,
|
||||
}
|
||||
testName := "duplicate driver registration"
|
||||
err := database.RegisterDriver(driver)
|
||||
if !checkDbError(t, testName, err, database.ErrDbTypeRegistered) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// TestCreateOpenFail ensures that errors which occur while opening or closing
|
||||
// a database are handled properly.
|
||||
func TestCreateOpenFail(t *testing.T) {
|
||||
// bogusCreateDB is a function which acts as a bogus create and open
|
||||
// driver function that intentionally returns a failure which can be
|
||||
// detected.
|
||||
dbType := "createopenfail"
|
||||
openError := errors.Errorf("failed to create or open database for "+
|
||||
"database type [%v]", dbType)
|
||||
bogusCreateDB := func(args ...interface{}) (database.DB, error) {
|
||||
return nil, openError
|
||||
}
|
||||
|
||||
// Create and add driver that intentionally fails when created or opened
|
||||
// to ensure errors on database open and create are handled properly.
|
||||
driver := database.Driver{
|
||||
DbType: dbType,
|
||||
Create: bogusCreateDB,
|
||||
Open: bogusCreateDB,
|
||||
}
|
||||
database.RegisterDriver(driver)
|
||||
|
||||
// Ensure creating a database with the new type fails with the expected
|
||||
// error.
|
||||
_, err := database.Create(dbType)
|
||||
if err != openError {
|
||||
t.Errorf("expected error not received - got: %v, want %v", err,
|
||||
openError)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure opening a database with the new type fails with the expected
|
||||
// error.
|
||||
_, err = database.Open(dbType)
|
||||
if err != openError {
|
||||
t.Errorf("expected error not received - got: %v, want %v", err,
|
||||
openError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// TestCreateOpenUnsupported ensures that attempting to create or open an
|
||||
// unsupported database type is handled properly.
|
||||
func TestCreateOpenUnsupported(t *testing.T) {
|
||||
// Ensure creating a database with an unsupported type fails with the
|
||||
// expected error.
|
||||
testName := "create with unsupported database type"
|
||||
dbType := "unsupported"
|
||||
_, err := database.Create(dbType)
|
||||
if !checkDbError(t, testName, err, database.ErrDbUnknownType) {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure opening a database with the an unsupported type fails with the
|
||||
// expected error.
|
||||
testName = "open with unsupported database type"
|
||||
_, err = database.Open(dbType)
|
||||
if !checkDbError(t, testName, err, database.ErrDbUnknownType) {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,211 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ErrorCode identifies a kind of error.
|
||||
type ErrorCode int
|
||||
|
||||
// These constants are used to identify a specific database Error.
|
||||
const (
|
||||
// **************************************
|
||||
// Errors related to driver registration.
|
||||
// **************************************
|
||||
|
||||
// ErrDbTypeRegistered indicates two different database drivers
|
||||
// attempt to register with the name database type.
|
||||
ErrDbTypeRegistered ErrorCode = iota
|
||||
|
||||
// *************************************
|
||||
// Errors related to database functions.
|
||||
// *************************************
|
||||
|
||||
// ErrDbUnknownType indicates there is no driver registered for
|
||||
// the specified database type.
|
||||
ErrDbUnknownType
|
||||
|
||||
// ErrDbDoesNotExist indicates open is called for a database that
|
||||
// does not exist.
|
||||
ErrDbDoesNotExist
|
||||
|
||||
// ErrDbExists indicates create is called for a database that
|
||||
// already exists.
|
||||
ErrDbExists
|
||||
|
||||
// ErrDbNotOpen indicates a database instance is accessed before
|
||||
// it is opened or after it is closed.
|
||||
ErrDbNotOpen
|
||||
|
||||
// ErrDbAlreadyOpen indicates open was called on a database that
|
||||
// is already open.
|
||||
ErrDbAlreadyOpen
|
||||
|
||||
// ErrInvalid indicates the specified database is not valid.
|
||||
ErrInvalid
|
||||
|
||||
// ErrCorruption indicates a checksum failure occurred which invariably
|
||||
// means the database is corrupt.
|
||||
ErrCorruption
|
||||
|
||||
// ****************************************
|
||||
// Errors related to database transactions.
|
||||
// ****************************************
|
||||
|
||||
// ErrTxClosed indicates an attempt was made to commit or rollback a
|
||||
// transaction that has already had one of those operations performed.
|
||||
ErrTxClosed
|
||||
|
||||
// ErrTxNotWritable indicates an operation that requires write access to
|
||||
// the database was attempted against a read-only transaction.
|
||||
ErrTxNotWritable
|
||||
|
||||
// **************************************
|
||||
// Errors related to metadata operations.
|
||||
// **************************************
|
||||
|
||||
// ErrBucketNotFound indicates an attempt to access a bucket that has
|
||||
// not been created yet.
|
||||
ErrBucketNotFound
|
||||
|
||||
// ErrBucketExists indicates an attempt to create a bucket that already
|
||||
// exists.
|
||||
ErrBucketExists
|
||||
|
||||
// ErrBucketNameRequired indicates an attempt to create a bucket with a
|
||||
// blank name.
|
||||
ErrBucketNameRequired
|
||||
|
||||
// ErrKeyRequired indicates at attempt to insert a zero-length key.
|
||||
ErrKeyRequired
|
||||
|
||||
// ErrKeyTooLarge indicates an attmempt to insert a key that is larger
|
||||
// than the max allowed key size. The max key size depends on the
|
||||
// specific backend driver being used. As a general rule, key sizes
|
||||
// should be relatively, so this should rarely be an issue.
|
||||
ErrKeyTooLarge
|
||||
|
||||
// ErrValueTooLarge indicates an attmpt to insert a value that is larger
|
||||
// than max allowed value size. The max key size depends on the
|
||||
// specific backend driver being used.
|
||||
ErrValueTooLarge
|
||||
|
||||
// ErrIncompatibleValue indicates the value in question is invalid for
|
||||
// the specific requested operation. For example, trying create or
|
||||
// delete a bucket with an existing non-bucket key, attempting to create
|
||||
// or delete a non-bucket key with an existing bucket key, or trying to
|
||||
// delete a value via a cursor when it points to a nested bucket.
|
||||
ErrIncompatibleValue
|
||||
|
||||
// ***************************************
|
||||
// Errors related to block I/O operations.
|
||||
// ***************************************
|
||||
|
||||
// ErrBlockNotFound indicates a block with the provided hash does not
|
||||
// exist in the database.
|
||||
ErrBlockNotFound
|
||||
|
||||
// ErrBlockExists indicates a block with the provided hash already
|
||||
// exists in the database.
|
||||
ErrBlockExists
|
||||
|
||||
// ErrBlockRegionInvalid indicates a region that exceeds the bounds of
|
||||
// the specified block was requested. When the hash provided by the
|
||||
// region does not correspond to an existing block, the error will be
|
||||
// ErrBlockNotFound instead.
|
||||
ErrBlockRegionInvalid
|
||||
|
||||
// ***********************************
|
||||
// Support for driver-specific errors.
|
||||
// ***********************************
|
||||
|
||||
// ErrDriverSpecific indicates the Err field is a driver-specific error.
|
||||
// This provides a mechanism for drivers to plug-in their own custom
|
||||
// errors for any situations which aren't already covered by the error
|
||||
// codes provided by this package.
|
||||
ErrDriverSpecific
|
||||
|
||||
// numErrorCodes is the maximum error code number used in tests.
|
||||
numErrorCodes
|
||||
)
|
||||
|
||||
// Map of ErrorCode values back to their constant names for pretty printing.
|
||||
var errorCodeStrings = map[ErrorCode]string{
|
||||
ErrDbTypeRegistered: "ErrDbTypeRegistered",
|
||||
ErrDbUnknownType: "ErrDbUnknownType",
|
||||
ErrDbDoesNotExist: "ErrDbDoesNotExist",
|
||||
ErrDbExists: "ErrDbExists",
|
||||
ErrDbNotOpen: "ErrDbNotOpen",
|
||||
ErrDbAlreadyOpen: "ErrDbAlreadyOpen",
|
||||
ErrInvalid: "ErrInvalid",
|
||||
ErrCorruption: "ErrCorruption",
|
||||
ErrTxClosed: "ErrTxClosed",
|
||||
ErrTxNotWritable: "ErrTxNotWritable",
|
||||
ErrBucketNotFound: "ErrBucketNotFound",
|
||||
ErrBucketExists: "ErrBucketExists",
|
||||
ErrBucketNameRequired: "ErrBucketNameRequired",
|
||||
ErrKeyRequired: "ErrKeyRequired",
|
||||
ErrKeyTooLarge: "ErrKeyTooLarge",
|
||||
ErrValueTooLarge: "ErrValueTooLarge",
|
||||
ErrIncompatibleValue: "ErrIncompatibleValue",
|
||||
ErrBlockNotFound: "ErrBlockNotFound",
|
||||
ErrBlockExists: "ErrBlockExists",
|
||||
ErrBlockRegionInvalid: "ErrBlockRegionInvalid",
|
||||
ErrDriverSpecific: "ErrDriverSpecific",
|
||||
}
|
||||
|
||||
// String returns the ErrorCode as a human-readable name.
|
||||
func (e ErrorCode) String() string {
|
||||
if s := errorCodeStrings[e]; s != "" {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("Unknown ErrorCode (%d)", int(e))
|
||||
}
|
||||
|
||||
// Error provides a single type for errors that can happen during database
|
||||
// operation. It is used to indicate several types of failures including errors
|
||||
// with caller requests such as specifying invalid block regions or attempting
|
||||
// to access data against closed database transactions, driver errors, errors
|
||||
// retrieving data, and errors communicating with database servers.
|
||||
//
|
||||
// The caller can use type assertions to determine if an error is an Error and
|
||||
// access the ErrorCode field to ascertain the specific reason for the failure.
|
||||
//
|
||||
// The ErrDriverSpecific error code will also have the Err field set with the
|
||||
// underlying error. Depending on the backend driver, the Err field might be
|
||||
// set to the underlying error for other error codes as well.
|
||||
type Error struct {
|
||||
ErrorCode ErrorCode // Describes the kind of error
|
||||
Description string // Human readable description of the issue
|
||||
Err error // Underlying error
|
||||
}
|
||||
|
||||
// Error satisfies the error interface and prints human-readable errors.
|
||||
func (e Error) Error() string {
|
||||
if e.Err != nil {
|
||||
return e.Description + ": " + e.Err.Error()
|
||||
}
|
||||
return e.Description
|
||||
}
|
||||
|
||||
// makeError creates an Error given a set of arguments. The error code must
|
||||
// be one of the error codes provided by this package.
|
||||
func makeError(c ErrorCode, desc string, err error) Error {
|
||||
return Error{ErrorCode: c, Description: desc, Err: err}
|
||||
}
|
||||
|
||||
// IsErrorCode returns whether or not the provided error is a script error with
|
||||
// the provided error code.
|
||||
func IsErrorCode(err error, c ErrorCode) bool {
|
||||
var errError Error
|
||||
if ok := errors.As(err, &errError); ok {
|
||||
return errError.ErrorCode == c
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestErrorCodeStringer tests the stringized output for the ErrorCode type.
|
||||
func TestErrorCodeStringer(t *testing.T) {
|
||||
tests := []struct {
|
||||
in ErrorCode
|
||||
want string
|
||||
}{
|
||||
{ErrDbTypeRegistered, "ErrDbTypeRegistered"},
|
||||
{ErrDbUnknownType, "ErrDbUnknownType"},
|
||||
{ErrDbDoesNotExist, "ErrDbDoesNotExist"},
|
||||
{ErrDbExists, "ErrDbExists"},
|
||||
{ErrDbNotOpen, "ErrDbNotOpen"},
|
||||
{ErrDbAlreadyOpen, "ErrDbAlreadyOpen"},
|
||||
{ErrInvalid, "ErrInvalid"},
|
||||
{ErrCorruption, "ErrCorruption"},
|
||||
{ErrTxClosed, "ErrTxClosed"},
|
||||
{ErrTxNotWritable, "ErrTxNotWritable"},
|
||||
{ErrBucketNotFound, "ErrBucketNotFound"},
|
||||
{ErrBucketExists, "ErrBucketExists"},
|
||||
{ErrBucketNameRequired, "ErrBucketNameRequired"},
|
||||
{ErrKeyRequired, "ErrKeyRequired"},
|
||||
{ErrKeyTooLarge, "ErrKeyTooLarge"},
|
||||
{ErrValueTooLarge, "ErrValueTooLarge"},
|
||||
{ErrIncompatibleValue, "ErrIncompatibleValue"},
|
||||
{ErrBlockNotFound, "ErrBlockNotFound"},
|
||||
{ErrBlockExists, "ErrBlockExists"},
|
||||
{ErrBlockRegionInvalid, "ErrBlockRegionInvalid"},
|
||||
{ErrDriverSpecific, "ErrDriverSpecific"},
|
||||
|
||||
{0xffff, "Unknown ErrorCode (65535)"},
|
||||
}
|
||||
|
||||
// Detect additional error codes that don't have the stringer added.
|
||||
if len(tests)-1 != int(TstNumErrorCodes) {
|
||||
t.Errorf("It appears an error code was added without adding " +
|
||||
"an associated stringer test")
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
result := test.in.String()
|
||||
if result != test.want {
|
||||
t.Errorf("String #%d\ngot: %s\nwant: %s", i, result,
|
||||
test.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestError tests the error output for the Error type.
|
||||
func TestError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
in Error
|
||||
want string
|
||||
}{
|
||||
{
|
||||
Error{Description: "some error"},
|
||||
"some error",
|
||||
},
|
||||
{
|
||||
Error{Description: "human-readable error"},
|
||||
"human-readable error",
|
||||
},
|
||||
{
|
||||
Error{
|
||||
ErrorCode: ErrDriverSpecific,
|
||||
Description: "some error",
|
||||
Err: errors.New("driver-specific error"),
|
||||
},
|
||||
"some error: driver-specific error",
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
result := test.in.Error()
|
||||
if result != test.want {
|
||||
t.Errorf("Error #%d\n got: %s want: %s", i, result,
|
||||
test.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsErrorCode(t *testing.T) {
|
||||
dummyError := errors.New("")
|
||||
|
||||
tests := []struct {
|
||||
err error
|
||||
code ErrorCode
|
||||
expectedResult bool
|
||||
}{
|
||||
{makeError(ErrBucketExists, "", dummyError), ErrBucketExists, true},
|
||||
{makeError(ErrBucketExists, "", dummyError), ErrBlockExists, false},
|
||||
{dummyError, ErrBlockExists, false},
|
||||
{nil, ErrBlockExists, false},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
actualResult := IsErrorCode(test.err, test.code)
|
||||
if test.expectedResult != actualResult {
|
||||
t.Errorf("TestIsErrorCode: %d: Expected: %t, but got: %t",
|
||||
i, test.expectedResult, actualResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
12
database/errors.go
Normal file
12
database/errors.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package database
|
||||
|
||||
import "errors"
|
||||
|
||||
// ErrNotFound denotes that the requested item was not
|
||||
// found in the database.
|
||||
var ErrNotFound = errors.New("not found")
|
||||
|
||||
// IsNotFoundError checks whether an error is an ErrNotFound.
|
||||
func IsNotFoundError(err error) bool {
|
||||
return errors.Is(err, ErrNotFound)
|
||||
}
|
||||
@@ -1,180 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package database_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
_ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// This example demonstrates creating a new database.
|
||||
func ExampleCreate() {
|
||||
// This example assumes the ffldb driver is imported.
|
||||
//
|
||||
// import (
|
||||
// "github.com/kaspanet/kaspad/database"
|
||||
// _ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
// )
|
||||
|
||||
// Create a database and schedule it to be closed and removed on exit.
|
||||
// Typically you wouldn't want to remove the database right away like
|
||||
// this, nor put it in the temp directory, but it's done here to ensure
|
||||
// the example cleans up after itself.
|
||||
dbPath := filepath.Join(os.TempDir(), "examplecreate")
|
||||
db, err := database.Create("ffldb", dbPath, wire.Mainnet)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(dbPath)
|
||||
defer db.Close()
|
||||
|
||||
// Output:
|
||||
}
|
||||
|
||||
// This example demonstrates creating a new database and using a managed
|
||||
// read-write transaction to store and retrieve metadata.
|
||||
func Example_basicUsage() {
|
||||
// This example assumes the ffldb driver is imported.
|
||||
//
|
||||
// import (
|
||||
// "github.com/kaspanet/kaspad/database"
|
||||
// _ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
// )
|
||||
|
||||
// Create a database and schedule it to be closed and removed on exit.
|
||||
// Typically you wouldn't want to remove the database right away like
|
||||
// this, nor put it in the temp directory, but it's done here to ensure
|
||||
// the example cleans up after itself.
|
||||
dbPath := filepath.Join(os.TempDir(), "exampleusage")
|
||||
// ensure that DB does not exist before test starts
|
||||
os.RemoveAll(dbPath)
|
||||
db, err := database.Create("ffldb", dbPath, wire.Mainnet)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(dbPath)
|
||||
defer db.Close()
|
||||
|
||||
// Use the Update function of the database to perform a managed
|
||||
// read-write transaction. The transaction will automatically be rolled
|
||||
// back if the supplied inner function returns a non-nil error.
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
// Store a key/value pair directly in the metadata bucket.
|
||||
// Typically a nested bucket would be used for a given feature,
|
||||
// but this example is using the metadata bucket directly for
|
||||
// simplicity.
|
||||
key := []byte("mykey")
|
||||
value := []byte("myvalue")
|
||||
if err := dbTx.Metadata().Put(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read the key back and ensure it matches.
|
||||
if !bytes.Equal(dbTx.Metadata().Get(key), value) {
|
||||
return errors.Errorf("unexpected value for key '%s'", key)
|
||||
}
|
||||
|
||||
// Create a new nested bucket under the metadata bucket.
|
||||
nestedBucketKey := []byte("mybucket")
|
||||
nestedBucket, err := dbTx.Metadata().CreateBucket(nestedBucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The key from above that was set in the metadata bucket does
|
||||
// not exist in this new nested bucket.
|
||||
if nestedBucket.Get(key) != nil {
|
||||
return errors.Errorf("key '%s' is not expected nil", key)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Output:
|
||||
}
|
||||
|
||||
// This example demonstrates creating a new database, using a managed read-write
|
||||
// transaction to store a block, and using a managed read-only transaction to
|
||||
// fetch the block.
|
||||
func Example_blockStorageAndRetrieval() {
|
||||
// This example assumes the ffldb driver is imported.
|
||||
//
|
||||
// import (
|
||||
// "github.com/kaspanet/kaspad/database"
|
||||
// _ "github.com/kaspanet/kaspad/database/ffldb"
|
||||
// )
|
||||
|
||||
// Create a database and schedule it to be closed and removed on exit.
|
||||
// Typically you wouldn't want to remove the database right away like
|
||||
// this, nor put it in the temp directory, but it's done here to ensure
|
||||
// the example cleans up after itself.
|
||||
dbPath := filepath.Join(os.TempDir(), "exampleblkstorage")
|
||||
db, err := database.Create("ffldb", dbPath, wire.Mainnet)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(dbPath)
|
||||
defer db.Close()
|
||||
|
||||
// Use the Update function of the database to perform a managed
|
||||
// read-write transaction and store a genesis block in the database as
|
||||
// and example.
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
genesisBlock := dagconfig.MainnetParams.GenesisBlock
|
||||
return dbTx.StoreBlock(util.NewBlock(genesisBlock))
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Use the View function of the database to perform a managed read-only
|
||||
// transaction and fetch the block stored above.
|
||||
var loadedBlockBytes []byte
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
genesisHash := dagconfig.MainnetParams.GenesisHash
|
||||
blockBytes, err := dbTx.FetchBlock(genesisHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As documented, all data fetched from the database is only
|
||||
// valid during a database transaction in order to support
|
||||
// zero-copy backends. Thus, make a copy of the data so it
|
||||
// can be used outside of the transaction.
|
||||
loadedBlockBytes = make([]byte, len(blockBytes))
|
||||
copy(loadedBlockBytes, blockBytes)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Typically at this point, the block could be deserialized via the
|
||||
// wire.MsgBlock.Deserialize function or used in its serialized form
|
||||
// depending on need. However, for this example, just display the
|
||||
// number of serialized bytes to show it was loaded as expected.
|
||||
fmt.Printf("Serialized block size: %d bytes\n", len(loadedBlockBytes))
|
||||
|
||||
// Output:
|
||||
// Serialized block size: 280 bytes
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
This test file is part of the database package rather than than the
|
||||
database_test package so it can bridge access to the internals to properly test
|
||||
cases which are either not possible or can't reliably be tested via the public
|
||||
interface. The functions, constants, and variables are only exported while the
|
||||
tests are being run.
|
||||
*/
|
||||
|
||||
package database
|
||||
|
||||
// TstNumErrorCodes makes the internal numErrorCodes parameter available to the
|
||||
// test package.
|
||||
const TstNumErrorCodes = numErrorCodes
|
||||
@@ -1,34 +0,0 @@
|
||||
ffldb
|
||||
=====
|
||||
|
||||
[](https://choosealicense.com/licenses/isc/)
|
||||
[](http://godoc.org/github.com/kaspanet/kaspad/database/ffldb)
|
||||
=======
|
||||
|
||||
Package ffldb implements a driver for the database package that uses leveldb for
|
||||
the backing metadata and flat files for block storage.
|
||||
|
||||
This driver is the recommended driver for use with kaspad. It makes use of leveldb
|
||||
for the metadata, flat files for block storage, and checksums in key areas to
|
||||
ensure data integrity.
|
||||
|
||||
## Usage
|
||||
|
||||
This package is a driver to the database package and provides the database type
|
||||
of "ffldb". The parameters the Open and Create functions take are the
|
||||
database path as a string and the block network.
|
||||
|
||||
```Go
|
||||
db, err := database.Open("ffldb", "path/to/database", wire.Mainnet)
|
||||
if err != nil {
|
||||
// Handle error
|
||||
}
|
||||
```
|
||||
|
||||
```Go
|
||||
db, err := database.Create("ffldb", "path/to/database", wire.Mainnet)
|
||||
if err != nil {
|
||||
// Handle error
|
||||
}
|
||||
```
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// BenchmarkBlockHeader benchmarks how long it takes to load the mainnet genesis
|
||||
// block header.
|
||||
func BenchmarkBlockHeader(b *testing.B) {
|
||||
// Start by creating a new database and populating it with the mainnet
|
||||
// genesis block.
|
||||
dbPath := filepath.Join(os.TempDir(), "ffldb-benchblkhdr")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
db, err := database.Create("ffldb", dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dbPath)
|
||||
defer db.Close()
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
block := util.NewBlock(dagconfig.MainnetParams.GenesisBlock)
|
||||
return dbTx.StoreBlock(block)
|
||||
})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
err = db.View(func(dbTx database.Tx) error {
|
||||
blockHash := dagconfig.MainnetParams.GenesisHash
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := dbTx.FetchBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Don't benchmark teardown.
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
// BenchmarkBlockHeader benchmarks how long it takes to load the mainnet genesis
|
||||
// block.
|
||||
func BenchmarkBlock(b *testing.B) {
|
||||
// Start by creating a new database and populating it with the mainnet
|
||||
// genesis block.
|
||||
dbPath := filepath.Join(os.TempDir(), "ffldb-benchblk")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
db, err := database.Create("ffldb", dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dbPath)
|
||||
defer db.Close()
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
block := util.NewBlock(dagconfig.MainnetParams.GenesisBlock)
|
||||
return dbTx.StoreBlock(block)
|
||||
})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
err = db.View(func(dbTx database.Tx) error {
|
||||
blockHash := dagconfig.MainnetParams.GenesisHash
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := dbTx.FetchBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Don't benchmark teardown.
|
||||
b.StopTimer()
|
||||
}
|
||||
@@ -1,765 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file contains the implementation functions for reading, writing, and
|
||||
// otherwise working with the flat files that house the actual blocks.
|
||||
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxOpenFiles is the max number of open files to maintain in the
|
||||
// open blocks cache. Note that this does not include the current
|
||||
// write file, so there will typically be one more than this value open.
|
||||
maxOpenFiles = 25
|
||||
|
||||
// maxBlockFileSize is the maximum size for each file used to store
|
||||
// blocks.
|
||||
//
|
||||
// NOTE: The current code uses uint32 for all offsets, so this value
|
||||
// must be less than 2^32 (4 GiB). This is also why it's a typed
|
||||
// constant.
|
||||
maxBlockFileSize uint32 = 512 * 1024 * 1024 // 512 MiB
|
||||
)
|
||||
|
||||
var (
|
||||
// castagnoli houses the Catagnoli polynomial used for CRC-32 checksums.
|
||||
castagnoli = crc32.MakeTable(crc32.Castagnoli)
|
||||
)
|
||||
|
||||
// filer is an interface which acts very similar to a *os.File and is typically
|
||||
// implemented by it. It exists so the test code can provide mock files for
|
||||
// properly testing corruption and file system issues.
|
||||
type filer interface {
|
||||
io.Closer
|
||||
io.WriterAt
|
||||
io.ReaderAt
|
||||
Truncate(size int64) error
|
||||
Sync() error
|
||||
}
|
||||
|
||||
// lockableFile represents a block file on disk that has been opened for either
|
||||
// read or read/write access. It also contains a read-write mutex to support
|
||||
// multiple concurrent readers.
|
||||
type lockableFile struct {
|
||||
sync.RWMutex
|
||||
file filer
|
||||
}
|
||||
|
||||
// writeCursor represents the current file and offset of the block file on disk
|
||||
// for performing all writes. It also contains a read-write mutex to support
|
||||
// multiple concurrent readers which can reuse the file handle.
|
||||
type writeCursor struct {
|
||||
sync.RWMutex
|
||||
|
||||
// curFile is the current block file that will be appended to when
|
||||
// writing new blocks.
|
||||
curFile *lockableFile
|
||||
|
||||
// curFileNum is the current block file number and is used to allow
|
||||
// readers to use the same open file handle.
|
||||
curFileNum uint32
|
||||
|
||||
// curOffset is the offset in the current write block file where the
|
||||
// next new block will be written.
|
||||
curOffset uint32
|
||||
}
|
||||
|
||||
// blockStore houses information used to handle reading and writing blocks (and
|
||||
// part of blocks) into flat files with support for multiple concurrent readers.
|
||||
type blockStore struct {
|
||||
// network is the specific network to use in the flat files for each
|
||||
// block.
|
||||
network wire.KaspaNet
|
||||
|
||||
// basePath is the base path used for the flat block files and metadata.
|
||||
basePath string
|
||||
|
||||
// maxBlockFileSize is the maximum size for each file used to store
|
||||
// blocks. It is defined on the store so the whitebox tests can
|
||||
// override the value.
|
||||
maxBlockFileSize uint32
|
||||
|
||||
// maxOpenFiles is the max number of open files to maintain in the
|
||||
// open blocks cache. Note that this does not include the current
|
||||
// write file, so there will typically be one more than this value open.
|
||||
// It is defined on the store so the whitebox tests can override the value.
|
||||
maxOpenFiles int
|
||||
|
||||
// The following fields are related to the flat files which hold the
|
||||
// actual blocks. The number of open files is limited by maxOpenFiles.
|
||||
//
|
||||
// obfMutex protects concurrent access to the openBlockFiles map. It is
|
||||
// a RWMutex so multiple readers can simultaneously access open files.
|
||||
//
|
||||
// openBlockFiles houses the open file handles for existing block files
|
||||
// which have been opened read-only along with an individual RWMutex.
|
||||
// This scheme allows multiple concurrent readers to the same file while
|
||||
// preventing the file from being closed out from under them.
|
||||
//
|
||||
// lruMutex protects concurrent access to the least recently used list
|
||||
// and lookup map.
|
||||
//
|
||||
// openBlocksLRU tracks how the open files are refenced by pushing the
|
||||
// most recently used files to the front of the list thereby trickling
|
||||
// the least recently used files to end of the list. When a file needs
|
||||
// to be closed due to exceeding the the max number of allowed open
|
||||
// files, the one at the end of the list is closed.
|
||||
//
|
||||
// fileNumToLRUElem is a mapping between a specific block file number
|
||||
// and the associated list element on the least recently used list.
|
||||
//
|
||||
// Thus, with the combination of these fields, the database supports
|
||||
// concurrent non-blocking reads across multiple and individual files
|
||||
// along with intelligently limiting the number of open file handles by
|
||||
// closing the least recently used files as needed.
|
||||
//
|
||||
// NOTE: The locking order used throughout is well-defined and MUST be
|
||||
// followed. Failure to do so could lead to deadlocks. In particular,
|
||||
// the locking order is as follows:
|
||||
// 1) obfMutex
|
||||
// 2) lruMutex
|
||||
// 3) writeCursor mutex
|
||||
// 4) specific file mutexes
|
||||
//
|
||||
// None of the mutexes are required to be locked at the same time, and
|
||||
// often aren't. However, if they are to be locked simultaneously, they
|
||||
// MUST be locked in the order previously specified.
|
||||
//
|
||||
// Due to the high performance and multi-read concurrency requirements,
|
||||
// write locks should only be held for the minimum time necessary.
|
||||
obfMutex sync.RWMutex
|
||||
lruMutex sync.Mutex
|
||||
openBlocksLRU *list.List // Contains uint32 block file numbers.
|
||||
fileNumToLRUElem map[uint32]*list.Element
|
||||
openBlockFiles map[uint32]*lockableFile
|
||||
|
||||
// writeCursor houses the state for the current file and location that
|
||||
// new blocks are written to.
|
||||
writeCursor *writeCursor
|
||||
|
||||
// These functions are set to openFile, openWriteFile, and deleteFile by
|
||||
// default, but are exposed here to allow the whitebox tests to replace
|
||||
// them when working with mock files.
|
||||
openFileFunc func(fileNum uint32) (*lockableFile, error)
|
||||
openWriteFileFunc func(fileNum uint32) (filer, error)
|
||||
deleteFileFunc func(fileNum uint32) error
|
||||
}
|
||||
|
||||
// blockLocation identifies a particular block file and location.
|
||||
type blockLocation struct {
|
||||
blockFileNum uint32
|
||||
fileOffset uint32
|
||||
blockLen uint32
|
||||
}
|
||||
|
||||
// deserializeBlockLoc deserializes the passed serialized block location
|
||||
// information. This is data stored into the block index metadata for each
|
||||
// block. The serialized data passed to this function MUST be at least
|
||||
// blockLocSize bytes or it will panic. The error check is avoided here because
|
||||
// this information will always be coming from the block index which includes a
|
||||
// checksum to detect corruption. Thus it is safe to use this unchecked here.
|
||||
func deserializeBlockLoc(serializedLoc []byte) blockLocation {
|
||||
// The serialized block location format is:
|
||||
//
|
||||
// [0:4] Block file (4 bytes)
|
||||
// [4:8] File offset (4 bytes)
|
||||
// [8:12] Block length (4 bytes)
|
||||
return blockLocation{
|
||||
blockFileNum: byteOrder.Uint32(serializedLoc[0:4]),
|
||||
fileOffset: byteOrder.Uint32(serializedLoc[4:8]),
|
||||
blockLen: byteOrder.Uint32(serializedLoc[8:12]),
|
||||
}
|
||||
}
|
||||
|
||||
// serializeBlockLoc returns the serialization of the passed block location.
|
||||
// This is data to be stored into the block index metadata for each block.
|
||||
func serializeBlockLoc(loc blockLocation) []byte {
|
||||
// The serialized block location format is:
|
||||
//
|
||||
// [0:4] Block file (4 bytes)
|
||||
// [4:8] File offset (4 bytes)
|
||||
// [8:12] Block length (4 bytes)
|
||||
var serializedData [12]byte
|
||||
byteOrder.PutUint32(serializedData[0:4], loc.blockFileNum)
|
||||
byteOrder.PutUint32(serializedData[4:8], loc.fileOffset)
|
||||
byteOrder.PutUint32(serializedData[8:12], loc.blockLen)
|
||||
return serializedData[:]
|
||||
}
|
||||
|
||||
// blockFilePath return the file path for the provided block file number.
|
||||
func blockFilePath(dbPath string, fileNum uint32) string {
|
||||
// Choose 9 digits of precision for the filenames. 9 digits provide
|
||||
// 10^9 files @ 512MiB each a total of ~476.84PiB.
|
||||
|
||||
fileName := fmt.Sprintf("%09d.fdb", fileNum)
|
||||
return filepath.Join(dbPath, fileName)
|
||||
}
|
||||
|
||||
// openWriteFile returns a file handle for the passed flat file number in
|
||||
// read/write mode. The file will be created if needed. It is typically used
|
||||
// for the current file that will have all new data appended. Unlike openFile,
|
||||
// this function does not keep track of the open file and it is not subject to
|
||||
// the maxOpenFiles limit.
|
||||
func (s *blockStore) openWriteFile(fileNum uint32) (filer, error) {
|
||||
// The current block file needs to be read-write so it is possible to
|
||||
// append to it. Also, it shouldn't be part of the least recently used
|
||||
// file.
|
||||
filePath := blockFilePath(s.basePath, fileNum)
|
||||
file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
str := fmt.Sprintf("failed to open file %q: %s", filePath, err)
|
||||
return nil, makeDbErr(database.ErrDriverSpecific, str, err)
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// openFile returns a read-only file handle for the passed flat file number.
|
||||
// The function also keeps track of the open files, performs least recently
|
||||
// used tracking, and limits the number of open files to maxOpenFiles by closing
|
||||
// the least recently used file as needed.
|
||||
//
|
||||
// This function MUST be called with the overall files mutex (s.obfMutex) locked
|
||||
// for WRITES.
|
||||
func (s *blockStore) openFile(fileNum uint32) (*lockableFile, error) {
|
||||
// Open the appropriate file as read-only.
|
||||
filePath := blockFilePath(s.basePath, fileNum)
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, makeDbErr(database.ErrDriverSpecific, err.Error(),
|
||||
err)
|
||||
}
|
||||
blockFile := &lockableFile{file: file}
|
||||
|
||||
// Close the least recently used file if the file exceeds the max
|
||||
// allowed open files. This is not done until after the file open in
|
||||
// case the file fails to open, there is no need to close any files.
|
||||
//
|
||||
// A write lock is required on the LRU list here to protect against
|
||||
// modifications happening as already open files are read from and
|
||||
// shuffled to the front of the list.
|
||||
//
|
||||
// Also, add the file that was just opened to the front of the least
|
||||
// recently used list to indicate it is the most recently used file and
|
||||
// therefore should be closed last.
|
||||
s.lruMutex.Lock()
|
||||
lruList := s.openBlocksLRU
|
||||
if lruList.Len() >= s.maxOpenFiles {
|
||||
lruFileNum := lruList.Remove(lruList.Back()).(uint32)
|
||||
oldBlockFile := s.openBlockFiles[lruFileNum]
|
||||
|
||||
// Close the old file under the write lock for the file in case
|
||||
// any readers are currently reading from it so it's not closed
|
||||
// out from under them.
|
||||
oldBlockFile.Lock()
|
||||
_ = oldBlockFile.file.Close()
|
||||
oldBlockFile.Unlock()
|
||||
|
||||
delete(s.openBlockFiles, lruFileNum)
|
||||
delete(s.fileNumToLRUElem, lruFileNum)
|
||||
}
|
||||
s.fileNumToLRUElem[fileNum] = lruList.PushFront(fileNum)
|
||||
s.lruMutex.Unlock()
|
||||
|
||||
// Store a reference to it in the open block files map.
|
||||
s.openBlockFiles[fileNum] = blockFile
|
||||
|
||||
return blockFile, nil
|
||||
}
|
||||
|
||||
// deleteFile removes the block file for the passed flat file number. The file
|
||||
// must already be closed and it is the responsibility of the caller to do any
|
||||
// other state cleanup necessary.
|
||||
func (s *blockStore) deleteFile(fileNum uint32) error {
|
||||
filePath := blockFilePath(s.basePath, fileNum)
|
||||
if err := os.Remove(filePath); err != nil {
|
||||
return makeDbErr(database.ErrDriverSpecific, err.Error(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// blockFile attempts to return an existing file handle for the passed flat file
|
||||
// number if it is already open as well as marking it as most recently used. It
|
||||
// will also open the file when it's not already open subject to the rules
|
||||
// described in openFile.
|
||||
//
|
||||
// NOTE: The returned block file will already have the read lock acquired and
|
||||
// the caller MUST call .RUnlock() to release it once it has finished all read
|
||||
// operations. This is necessary because otherwise it would be possible for a
|
||||
// separate goroutine to close the file after it is returned from here, but
|
||||
// before the caller has acquired a read lock.
|
||||
func (s *blockStore) blockFile(fileNum uint32) (*lockableFile, error) {
|
||||
// When the requested block file is open for writes, return it.
|
||||
wc := s.writeCursor
|
||||
wc.RLock()
|
||||
if fileNum == wc.curFileNum && wc.curFile.file != nil {
|
||||
obf := wc.curFile
|
||||
obf.RLock()
|
||||
wc.RUnlock()
|
||||
return obf, nil
|
||||
}
|
||||
wc.RUnlock()
|
||||
|
||||
// Try to return an open file under the overall files read lock.
|
||||
s.obfMutex.RLock()
|
||||
if obf, ok := s.openBlockFiles[fileNum]; ok {
|
||||
s.lruMutex.Lock()
|
||||
s.openBlocksLRU.MoveToFront(s.fileNumToLRUElem[fileNum])
|
||||
s.lruMutex.Unlock()
|
||||
|
||||
obf.RLock()
|
||||
s.obfMutex.RUnlock()
|
||||
return obf, nil
|
||||
}
|
||||
s.obfMutex.RUnlock()
|
||||
|
||||
// Since the file isn't open already, need to check the open block files
|
||||
// map again under write lock in case multiple readers got here and a
|
||||
// separate one is already opening the file.
|
||||
s.obfMutex.Lock()
|
||||
if obf, ok := s.openBlockFiles[fileNum]; ok {
|
||||
obf.RLock()
|
||||
s.obfMutex.Unlock()
|
||||
return obf, nil
|
||||
}
|
||||
|
||||
// The file isn't open, so open it while potentially closing the least
|
||||
// recently used one as needed.
|
||||
obf, err := s.openFileFunc(fileNum)
|
||||
if err != nil {
|
||||
s.obfMutex.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
obf.RLock()
|
||||
s.obfMutex.Unlock()
|
||||
return obf, nil
|
||||
}
|
||||
|
||||
// writeData is a helper function for writeBlock which writes the provided data
|
||||
// at the current write offset and updates the write cursor accordingly. The
|
||||
// field name parameter is only used when there is an error to provide a nicer
|
||||
// error message.
|
||||
//
|
||||
// The write cursor will be advanced the number of bytes actually written in the
|
||||
// event of failure.
|
||||
//
|
||||
// NOTE: This function MUST be called with the write cursor current file lock
|
||||
// held and must only be called during a write transaction so it is effectively
|
||||
// locked for writes. Also, the write cursor current file must NOT be nil.
|
||||
func (s *blockStore) writeData(data []byte, fieldName string) error {
|
||||
wc := s.writeCursor
|
||||
n, err := wc.curFile.file.WriteAt(data, int64(wc.curOffset))
|
||||
wc.curOffset += uint32(n)
|
||||
if err != nil {
|
||||
var pathErr *os.PathError
|
||||
if ok := errors.As(err, &pathErr); ok && pathErr.Err == syscall.ENOSPC {
|
||||
log.Errorf("No space left on the hard disk, exiting...")
|
||||
os.Exit(1)
|
||||
}
|
||||
str := fmt.Sprintf("failed to write %s to file %d at "+
|
||||
"offset %d: %s", fieldName, wc.curFileNum,
|
||||
wc.curOffset-uint32(n), err)
|
||||
return makeDbErr(database.ErrDriverSpecific, str, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeBlock appends the specified raw block bytes to the store's write cursor
|
||||
// location and increments it accordingly. When the block would exceed the max
|
||||
// file size for the current flat file, this function will close the current
|
||||
// file, create the next file, update the write cursor, and write the block to
|
||||
// the new file.
|
||||
//
|
||||
// The write cursor will also be advanced the number of bytes actually written
|
||||
// in the event of failure.
|
||||
//
|
||||
// Format: <network><block length><serialized block><checksum>
|
||||
func (s *blockStore) writeBlock(rawBlock []byte) (blockLocation, error) {
|
||||
// Compute how many bytes will be written.
|
||||
// 4 bytes each for block network + 4 bytes for block length +
|
||||
// length of raw block + 4 bytes for checksum.
|
||||
blockLen := uint32(len(rawBlock))
|
||||
fullLen := blockLen + 12
|
||||
|
||||
// Move to the next block file if adding the new block would exceed the
|
||||
// max allowed size for the current block file. Also detect overflow
|
||||
// to be paranoid, even though it isn't possible currently, numbers
|
||||
// might change in the future to make it possible.
|
||||
//
|
||||
// NOTE: The writeCursor.offset field isn't protected by the mutex
|
||||
// since it's only read/changed during this function which can only be
|
||||
// called during a write transaction, of which there can be only one at
|
||||
// a time.
|
||||
wc := s.writeCursor
|
||||
finalOffset := wc.curOffset + fullLen
|
||||
if finalOffset < wc.curOffset || finalOffset > s.maxBlockFileSize {
|
||||
// This is done under the write cursor lock since the curFileNum
|
||||
// field is accessed elsewhere by readers.
|
||||
//
|
||||
// Close the current write file to force a read-only reopen
|
||||
// with LRU tracking. The close is done under the write lock
|
||||
// for the file to prevent it from being closed out from under
|
||||
// any readers currently reading from it.
|
||||
wc.Lock()
|
||||
wc.curFile.Lock()
|
||||
if wc.curFile.file != nil {
|
||||
_ = wc.curFile.file.Close()
|
||||
wc.curFile.file = nil
|
||||
}
|
||||
wc.curFile.Unlock()
|
||||
|
||||
// Start writes into next file.
|
||||
wc.curFileNum++
|
||||
wc.curOffset = 0
|
||||
wc.Unlock()
|
||||
}
|
||||
|
||||
// All writes are done under the write lock for the file to ensure any
|
||||
// readers are finished and blocked first.
|
||||
wc.curFile.Lock()
|
||||
defer wc.curFile.Unlock()
|
||||
|
||||
// Open the current file if needed. This will typically only be the
|
||||
// case when moving to the next file to write to or on initial database
|
||||
// load. However, it might also be the case if rollbacks happened after
|
||||
// file writes started during a transaction commit.
|
||||
if wc.curFile.file == nil {
|
||||
file, err := s.openWriteFileFunc(wc.curFileNum)
|
||||
if err != nil {
|
||||
return blockLocation{}, err
|
||||
}
|
||||
wc.curFile.file = file
|
||||
}
|
||||
|
||||
// Kaspa network.
|
||||
origOffset := wc.curOffset
|
||||
hasher := crc32.New(castagnoli)
|
||||
var scratch [4]byte
|
||||
byteOrder.PutUint32(scratch[:], uint32(s.network))
|
||||
if err := s.writeData(scratch[:], "network"); err != nil {
|
||||
return blockLocation{}, err
|
||||
}
|
||||
_, _ = hasher.Write(scratch[:])
|
||||
|
||||
// Block length.
|
||||
byteOrder.PutUint32(scratch[:], blockLen)
|
||||
if err := s.writeData(scratch[:], "block length"); err != nil {
|
||||
return blockLocation{}, err
|
||||
}
|
||||
_, _ = hasher.Write(scratch[:])
|
||||
|
||||
// Serialized block.
|
||||
if err := s.writeData(rawBlock[:], "block"); err != nil {
|
||||
return blockLocation{}, err
|
||||
}
|
||||
_, _ = hasher.Write(rawBlock)
|
||||
|
||||
// Castagnoli CRC-32 as a checksum of all the previous.
|
||||
if err := s.writeData(hasher.Sum(nil), "checksum"); err != nil {
|
||||
return blockLocation{}, err
|
||||
}
|
||||
|
||||
loc := blockLocation{
|
||||
blockFileNum: wc.curFileNum,
|
||||
fileOffset: origOffset,
|
||||
blockLen: fullLen,
|
||||
}
|
||||
return loc, nil
|
||||
}
|
||||
|
||||
// readBlock reads the specified block record and returns the serialized block.
|
||||
// It ensures the integrity of the block data by checking that the serialized
|
||||
// network matches the current network associated with the block store and
|
||||
// comparing the calculated checksum against the one stored in the flat file.
|
||||
// This function also automatically handles all file management such as opening
|
||||
// and closing files as necessary to stay within the maximum allowed open files
|
||||
// limit.
|
||||
//
|
||||
// Returns ErrDriverSpecific if the data fails to read for any reason and
|
||||
// ErrCorruption if the checksum of the read data doesn't match the checksum
|
||||
// read from the file.
|
||||
//
|
||||
// Format: <network><block length><serialized block><checksum>
|
||||
func (s *blockStore) readBlock(hash *daghash.Hash, loc blockLocation) ([]byte, error) {
|
||||
// Get the referenced block file handle opening the file as needed. The
|
||||
// function also handles closing files as needed to avoid going over the
|
||||
// max allowed open files.
|
||||
blockFile, err := s.blockFile(loc.blockFileNum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serializedData := make([]byte, loc.blockLen)
|
||||
n, err := blockFile.file.ReadAt(serializedData, int64(loc.fileOffset))
|
||||
blockFile.RUnlock()
|
||||
if err != nil {
|
||||
str := fmt.Sprintf("failed to read block %s from file %d, "+
|
||||
"offset %d: %s", hash, loc.blockFileNum, loc.fileOffset,
|
||||
err)
|
||||
return nil, makeDbErr(database.ErrDriverSpecific, str, err)
|
||||
}
|
||||
|
||||
// Calculate the checksum of the read data and ensure it matches the
|
||||
// serialized checksum. This will detect any data corruption in the
|
||||
// flat file without having to do much more expensive merkle root
|
||||
// calculations on the loaded block.
|
||||
serializedChecksum := binary.BigEndian.Uint32(serializedData[n-4:])
|
||||
calculatedChecksum := crc32.Checksum(serializedData[:n-4], castagnoli)
|
||||
if serializedChecksum != calculatedChecksum {
|
||||
str := fmt.Sprintf("block data for block %s checksum "+
|
||||
"does not match - got %x, want %x", hash,
|
||||
calculatedChecksum, serializedChecksum)
|
||||
return nil, makeDbErr(database.ErrCorruption, str, nil)
|
||||
}
|
||||
|
||||
// The network associated with the block must match the current active
|
||||
// network, otherwise somebody probably put the block files for the
|
||||
// wrong network in the directory.
|
||||
serializedNet := byteOrder.Uint32(serializedData[:4])
|
||||
if serializedNet != uint32(s.network) {
|
||||
str := fmt.Sprintf("block data for block %s is for the "+
|
||||
"wrong network - got %d, want %d", hash, serializedNet,
|
||||
uint32(s.network))
|
||||
return nil, makeDbErr(database.ErrDriverSpecific, str, nil)
|
||||
}
|
||||
|
||||
// The raw block excludes the network, length of the block, and
|
||||
// checksum.
|
||||
return serializedData[8 : n-4], nil
|
||||
}
|
||||
|
||||
// readBlockRegion reads the specified amount of data at the provided offset for
|
||||
// a given block location. The offset is relative to the start of the
|
||||
// serialized block (as opposed to the beginning of the block record). This
|
||||
// function automatically handles all file management such as opening and
|
||||
// closing files as necessary to stay within the maximum allowed open files
|
||||
// limit.
|
||||
//
|
||||
// Returns ErrDriverSpecific if the data fails to read for any reason.
|
||||
func (s *blockStore) readBlockRegion(loc blockLocation, offset, numBytes uint32) ([]byte, error) {
|
||||
// Get the referenced block file handle opening the file as needed. The
|
||||
// function also handles closing files as needed to avoid going over the
|
||||
// max allowed open files.
|
||||
blockFile, err := s.blockFile(loc.blockFileNum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Regions are offsets into the actual block, however the serialized
|
||||
// data for a block includes an initial 4 bytes for network + 4 bytes
|
||||
// for block length. Thus, add 8 bytes to adjust.
|
||||
readOffset := loc.fileOffset + 8 + offset
|
||||
serializedData := make([]byte, numBytes)
|
||||
_, err = blockFile.file.ReadAt(serializedData, int64(readOffset))
|
||||
blockFile.RUnlock()
|
||||
if err != nil {
|
||||
str := fmt.Sprintf("failed to read region from block file %d, "+
|
||||
"offset %d, len %d: %s", loc.blockFileNum, readOffset,
|
||||
numBytes, err)
|
||||
return nil, makeDbErr(database.ErrDriverSpecific, str, err)
|
||||
}
|
||||
|
||||
return serializedData, nil
|
||||
}
|
||||
|
||||
// syncBlocks performs a file system sync on the flat file associated with the
|
||||
// store's current write cursor. It is safe to call even when there is not a
|
||||
// current write file in which case it will have no effect.
|
||||
//
|
||||
// This is used when flushing cached metadata updates to disk to ensure all the
|
||||
// block data is fully written before updating the metadata. This ensures the
|
||||
// metadata and block data can be properly reconciled in failure scenarios.
|
||||
func (s *blockStore) syncBlocks() error {
|
||||
wc := s.writeCursor
|
||||
wc.RLock()
|
||||
defer wc.RUnlock()
|
||||
|
||||
// Nothing to do if there is no current file associated with the write
|
||||
// cursor.
|
||||
wc.curFile.RLock()
|
||||
defer wc.curFile.RUnlock()
|
||||
if wc.curFile.file == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync the file to disk.
|
||||
if err := wc.curFile.file.Sync(); err != nil {
|
||||
str := fmt.Sprintf("failed to sync file %d: %s", wc.curFileNum,
|
||||
err)
|
||||
return makeDbErr(database.ErrDriverSpecific, str, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleRollback rolls the block files on disk back to the provided file number
|
||||
// and offset. This involves potentially deleting and truncating the files that
|
||||
// were partially written.
|
||||
//
|
||||
// There are effectively two scenarios to consider here:
|
||||
// 1) Transient write failures from which recovery is possible
|
||||
// 2) More permanent failures such as hard disk death and/or removal
|
||||
//
|
||||
// In either case, the write cursor will be repositioned to the old block file
|
||||
// offset regardless of any other errors that occur while attempting to undo
|
||||
// writes.
|
||||
//
|
||||
// For the first scenario, this will lead to any data which failed to be undone
|
||||
// being overwritten and thus behaves as desired as the system continues to run.
|
||||
//
|
||||
// For the second scenario, the metadata which stores the current write cursor
|
||||
// position within the block files will not have been updated yet and thus if
|
||||
// the system eventually recovers (perhaps the hard drive is reconnected), it
|
||||
// will also lead to any data which failed to be undone being overwritten and
|
||||
// thus behaves as desired.
|
||||
//
|
||||
// Therefore, any errors are simply logged at a warning level rather than being
|
||||
// returned since there is nothing more that could be done about it anyways.
|
||||
func (s *blockStore) handleRollback(oldBlockFileNum, oldBlockOffset uint32) {
|
||||
// Grab the write cursor mutex since it is modified throughout this
|
||||
// function.
|
||||
wc := s.writeCursor
|
||||
wc.Lock()
|
||||
defer wc.Unlock()
|
||||
|
||||
// Nothing to do if the rollback point is the same as the current write
|
||||
// cursor.
|
||||
if wc.curFileNum == oldBlockFileNum && wc.curOffset == oldBlockOffset {
|
||||
return
|
||||
}
|
||||
|
||||
// Regardless of any failures that happen below, reposition the write
|
||||
// cursor to the old block file and offset.
|
||||
defer func() {
|
||||
wc.curFileNum = oldBlockFileNum
|
||||
wc.curOffset = oldBlockOffset
|
||||
}()
|
||||
|
||||
log.Debugf("ROLLBACK: Rolling back to file %d, offset %d",
|
||||
oldBlockFileNum, oldBlockOffset)
|
||||
|
||||
// Close the current write file if it needs to be deleted. Then delete
|
||||
// all files that are newer than the provided rollback file while
|
||||
// also moving the write cursor file backwards accordingly.
|
||||
if wc.curFileNum > oldBlockFileNum {
|
||||
wc.curFile.Lock()
|
||||
if wc.curFile.file != nil {
|
||||
_ = wc.curFile.file.Close()
|
||||
wc.curFile.file = nil
|
||||
}
|
||||
wc.curFile.Unlock()
|
||||
}
|
||||
for ; wc.curFileNum > oldBlockFileNum; wc.curFileNum-- {
|
||||
if err := s.deleteFileFunc(wc.curFileNum); err != nil {
|
||||
log.Warnf("ROLLBACK: Failed to delete block file "+
|
||||
"number %d: %s", wc.curFileNum, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Open the file for the current write cursor if needed.
|
||||
wc.curFile.Lock()
|
||||
if wc.curFile.file == nil {
|
||||
obf, err := s.openWriteFileFunc(wc.curFileNum)
|
||||
if err != nil {
|
||||
wc.curFile.Unlock()
|
||||
log.Warnf("ROLLBACK: %s", err)
|
||||
return
|
||||
}
|
||||
wc.curFile.file = obf
|
||||
}
|
||||
|
||||
// Truncate the to the provided rollback offset.
|
||||
if err := wc.curFile.file.Truncate(int64(oldBlockOffset)); err != nil {
|
||||
wc.curFile.Unlock()
|
||||
log.Warnf("ROLLBACK: Failed to truncate file %d: %s",
|
||||
wc.curFileNum, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Sync the file to disk.
|
||||
err := wc.curFile.file.Sync()
|
||||
wc.curFile.Unlock()
|
||||
if err != nil {
|
||||
log.Warnf("ROLLBACK: Failed to sync file %d: %s",
|
||||
wc.curFileNum, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// scanBlockFiles searches the database directory for all flat block files to
|
||||
// find the end of the most recent file. This position is considered the
|
||||
// current write cursor which is also stored in the metadata. Thus, it is used
|
||||
// to detect unexpected shutdowns in the middle of writes so the block files
|
||||
// can be reconciled.
|
||||
func scanBlockFiles(dbPath string) (int, uint32) {
|
||||
lastFile := -1
|
||||
fileLen := uint32(0)
|
||||
for i := 0; ; i++ {
|
||||
filePath := blockFilePath(dbPath, uint32(i))
|
||||
st, err := os.Stat(filePath)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
lastFile = i
|
||||
|
||||
fileLen = uint32(st.Size())
|
||||
}
|
||||
|
||||
log.Tracef("Scan found latest block file #%d with length %d", lastFile,
|
||||
fileLen)
|
||||
return lastFile, fileLen
|
||||
}
|
||||
|
||||
// newBlockStore returns a new block store with the current block file number
|
||||
// and offset set and all fields initialized.
|
||||
func newBlockStore(basePath string, network wire.KaspaNet) *blockStore {
|
||||
// Look for the end of the latest block to file to determine what the
|
||||
// write cursor position is from the viewpoing of the block files on
|
||||
// disk.
|
||||
fileNum, fileOff := scanBlockFiles(basePath)
|
||||
if fileNum == -1 {
|
||||
fileNum = 0
|
||||
fileOff = 0
|
||||
}
|
||||
|
||||
store := &blockStore{
|
||||
network: network,
|
||||
basePath: basePath,
|
||||
maxBlockFileSize: maxBlockFileSize,
|
||||
maxOpenFiles: maxOpenFiles,
|
||||
openBlockFiles: make(map[uint32]*lockableFile),
|
||||
openBlocksLRU: list.New(),
|
||||
fileNumToLRUElem: make(map[uint32]*list.Element),
|
||||
|
||||
writeCursor: &writeCursor{
|
||||
curFile: &lockableFile{},
|
||||
curFileNum: uint32(fileNum),
|
||||
curOffset: fileOff,
|
||||
},
|
||||
}
|
||||
store.openFileFunc = store.openFile
|
||||
store.openWriteFileFunc = store.openWriteFile
|
||||
store.deleteFileFunc = store.deleteFile
|
||||
return store
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
func TestDeleteFile(t *testing.T) {
|
||||
testBlock := util.NewBlock(wire.NewMsgBlock(
|
||||
wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
|
||||
|
||||
tests := []struct {
|
||||
fileNum uint32
|
||||
expectedErr bool
|
||||
}{
|
||||
{0, false},
|
||||
{1, true},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
func() {
|
||||
pdb := newTestDb("TestDeleteFile", t)
|
||||
defer func() {
|
||||
if !pdb.closed {
|
||||
pdb.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
err := pdb.Update(func(dbTx database.Tx) error {
|
||||
dbTx.StoreBlock(testBlock)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestDeleteFile: Error storing block: %s", err)
|
||||
}
|
||||
|
||||
err = pdb.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("TestDeleteFile: Error closing file before deletion: %s", err)
|
||||
}
|
||||
|
||||
err = pdb.store.deleteFile(test.fileNum)
|
||||
if (err != nil) != test.expectedErr {
|
||||
t.Errorf("TestDeleteFile: %d: Expected error status: %t, but got: %t",
|
||||
test.fileNum, test.expectedErr, (err != nil))
|
||||
}
|
||||
if err == nil {
|
||||
filePath := blockFilePath(pdb.store.basePath, test.fileNum)
|
||||
if _, err := os.Stat(filePath); !os.IsNotExist(err) {
|
||||
t.Errorf("TestDeleteFile: %d: File %s still exists", test.fileNum, filePath)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// TestHandleRollbackErrors tests all error-cases in *blockStore.handleRollback().
|
||||
// The non-error-cases are tested in the more general tests.
|
||||
// Since handleRollback just logs errors, this test simply causes all error-cases to be hit,
|
||||
// and makes sure no panic occurs, as well as ensures the writeCursor was updated correctly.
|
||||
func TestHandleRollbackErrors(t *testing.T) {
|
||||
testBlock := util.NewBlock(wire.NewMsgBlock(
|
||||
wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
|
||||
|
||||
testBlockSize := uint32(testBlock.MsgBlock().SerializeSize())
|
||||
tests := []struct {
|
||||
name string
|
||||
fileNum uint32
|
||||
offset uint32
|
||||
}{
|
||||
// offset should be size of block + 12 bytes for block network, size and checksum
|
||||
{"Nothing to rollback", 1, testBlockSize + 12},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
func() {
|
||||
pdb := newTestDb("TestHandleRollbackErrors", t)
|
||||
defer pdb.Close()
|
||||
|
||||
// Set maxBlockFileSize to testBlockSize so that writeCursor.curFileNum increments
|
||||
pdb.store.maxBlockFileSize = testBlockSize
|
||||
|
||||
err := pdb.Update(func(dbTx database.Tx) error {
|
||||
return dbTx.StoreBlock(testBlock)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestHandleRollbackErrors: %s: Error adding test block to database: %s", test.name, err)
|
||||
}
|
||||
|
||||
pdb.store.handleRollback(test.fileNum, test.offset)
|
||||
|
||||
if pdb.store.writeCursor.curFileNum != test.fileNum {
|
||||
t.Errorf("TestHandleRollbackErrors: %s: Expected fileNum: %d, but got: %d",
|
||||
test.name, test.fileNum, pdb.store.writeCursor.curFileNum)
|
||||
}
|
||||
|
||||
if pdb.store.writeCursor.curOffset != test.offset {
|
||||
t.Errorf("TestHandleRollbackErrors: %s: offset fileNum: %d, but got: %d",
|
||||
test.name, test.offset, pdb.store.writeCursor.curOffset)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
func newTestDb(testName string, t *testing.T) *db {
|
||||
dbPath := path.Join(os.TempDir(), "db_test", testName)
|
||||
err := os.RemoveAll(dbPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
t.Fatalf("%s: Error deleting database folder before starting: %s", testName, err)
|
||||
}
|
||||
|
||||
network := wire.Simnet
|
||||
|
||||
opts := opt.Options{
|
||||
ErrorIfExist: true,
|
||||
Strict: opt.DefaultStrict,
|
||||
Compression: opt.NoCompression,
|
||||
Filter: filter.NewBloomFilter(10),
|
||||
}
|
||||
metadataDbPath := filepath.Join(dbPath, metadataDbName)
|
||||
ldb, err := leveldb.OpenFile(metadataDbPath, &opts)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Error opening metadataDbPath: %s", testName, err)
|
||||
}
|
||||
err = initDB(ldb)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Error initializing metadata Db: %s", testName, err)
|
||||
}
|
||||
|
||||
store := newBlockStore(dbPath, network)
|
||||
cache := newDbCache(ldb, store, defaultCacheSize, defaultFlushSecs)
|
||||
return &db{store: store, cache: cache}
|
||||
}
|
||||
2074
database/ffldb/db.go
2074
database/ffldb/db.go
File diff suppressed because it is too large
Load Diff
@@ -1,658 +0,0 @@
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// TestCursorDeleteErrors tests all error-cases in *cursor.Delete().
|
||||
// The non-error-cases are tested in the more general tests.
|
||||
func TestCursorDeleteErrors(t *testing.T) {
|
||||
pdb := newTestDb("TestCursorDeleteErrors", t)
|
||||
|
||||
nestedBucket := []byte("nestedBucket")
|
||||
key := []byte("key")
|
||||
value := []byte("value")
|
||||
|
||||
err := pdb.Update(func(dbTx database.Tx) error {
|
||||
metadata := dbTx.Metadata()
|
||||
_, err := metadata.CreateBucket(nestedBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metadata.Put(key, value)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestCursorDeleteErrors: Error setting up test-database: %s", err)
|
||||
}
|
||||
|
||||
// Check for error when attempted to delete a bucket
|
||||
err = pdb.Update(func(dbTx database.Tx) error {
|
||||
cursor := dbTx.Metadata().Cursor()
|
||||
found := false
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
if bytes.Equal(cursor.Key(), nestedBucket) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("TestCursorDeleteErrors: Key '%s' not found", string(nestedBucket))
|
||||
}
|
||||
|
||||
err := cursor.Delete()
|
||||
if !database.IsErrorCode(err, database.ErrIncompatibleValue) {
|
||||
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrIncompatibleValue, "+
|
||||
"when deleting bucket, but got %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestCursorDeleteErrors: Unexpected error from pdb.Update "+
|
||||
"when attempting to delete bucket: %s", err)
|
||||
}
|
||||
|
||||
// Check for error when transaction is not writable
|
||||
err = pdb.View(func(dbTx database.Tx) error {
|
||||
cursor := dbTx.Metadata().Cursor()
|
||||
if !cursor.First() {
|
||||
t.Fatal("TestCursorDeleteErrors: Nothing in cursor when testing for delete in " +
|
||||
"non-writable transaction")
|
||||
}
|
||||
|
||||
err := cursor.Delete()
|
||||
if !database.IsErrorCode(err, database.ErrTxNotWritable) {
|
||||
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrTxNotWritable "+
|
||||
"when calling .Delete() on non-writable transaction, but got '%v' instead", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestCursorDeleteErrors: Unexpected error from pdb.Update "+
|
||||
"when attempting to delete on non-writable transaction: %s", err)
|
||||
}
|
||||
|
||||
// Check for error when cursor was exhausted
|
||||
err = pdb.Update(func(dbTx database.Tx) error {
|
||||
cursor := dbTx.Metadata().Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
}
|
||||
|
||||
err := cursor.Delete()
|
||||
if !database.IsErrorCode(err, database.ErrIncompatibleValue) {
|
||||
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrIncompatibleValue "+
|
||||
"when calling .Delete() on exhausted cursor, but got '%v' instead", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestCursorDeleteErrors: Unexpected error from pdb.Update "+
|
||||
"when attempting to delete on exhausted cursor: %s", err)
|
||||
}
|
||||
|
||||
// Check for error when transaction is closed
|
||||
tx, err := pdb.Begin(true)
|
||||
if err != nil {
|
||||
t.Fatalf("TestCursorDeleteErrors: Error in pdb.Begin(): %s", err)
|
||||
}
|
||||
cursor := tx.Metadata().Cursor()
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("TestCursorDeleteErrors: Error in tx.Commit(): %s", err)
|
||||
}
|
||||
|
||||
err = cursor.Delete()
|
||||
if !database.IsErrorCode(err, database.ErrTxClosed) {
|
||||
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrTxClosed "+
|
||||
"when calling .Delete() on with closed transaction, but got '%s' instead", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSkipPendingUpdates(t *testing.T) {
|
||||
pdb := newTestDb("TestSkipPendingUpdates", t)
|
||||
defer pdb.Close()
|
||||
|
||||
value := []byte("value")
|
||||
// Add numbered prefixes to keys so that they are in expected order, and before any other keys
|
||||
firstKey := []byte("1 - first")
|
||||
toDeleteKey := []byte("2 - toDelete")
|
||||
toUpdateKey := []byte("3 - toUpdate")
|
||||
secondKey := []byte("4 - second")
|
||||
|
||||
// create initial metadata for test
|
||||
err := pdb.Update(func(dbTx database.Tx) error {
|
||||
metadata := dbTx.Metadata()
|
||||
if err := metadata.Put(firstKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := metadata.Put(toDeleteKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := metadata.Put(toUpdateKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := metadata.Put(secondKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestSkipPendingUpdates: Error adding to metadata: %s", err)
|
||||
}
|
||||
|
||||
// test skips
|
||||
err = pdb.Update(func(dbTx database.Tx) error {
|
||||
metadata := dbTx.Metadata()
|
||||
if err := metadata.Delete(toDeleteKey); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := metadata.Put(toUpdateKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
cursor := metadata.Cursor().(*cursor)
|
||||
dbIter := cursor.dbIter
|
||||
|
||||
// Check that first is ok
|
||||
dbIter.First()
|
||||
expectedKey := bucketizedKey(metadataBucketID, firstKey)
|
||||
if !bytes.Equal(dbIter.Key(), expectedKey) {
|
||||
t.Errorf("TestSkipPendingUpdates: 1: key expected to be %v but is %v", expectedKey, dbIter.Key())
|
||||
}
|
||||
|
||||
// Go to the next key, which is toDelete
|
||||
dbIter.Next()
|
||||
expectedKey = bucketizedKey(metadataBucketID, toDeleteKey)
|
||||
if !bytes.Equal(dbIter.Key(), expectedKey) {
|
||||
t.Errorf("TestSkipPendingUpdates: 2: key expected to be %s but is %s", expectedKey, dbIter.Key())
|
||||
}
|
||||
|
||||
// at this point toDeleteKey and toUpdateKey should be skipped
|
||||
cursor.skipPendingUpdates(true)
|
||||
expectedKey = bucketizedKey(metadataBucketID, secondKey)
|
||||
if !bytes.Equal(dbIter.Key(), expectedKey) {
|
||||
t.Errorf("TestSkipPendingUpdates: 3: key expected to be %s but is %s", expectedKey, dbIter.Key())
|
||||
}
|
||||
|
||||
// now traverse backwards - should get toUpdate
|
||||
dbIter.Prev()
|
||||
expectedKey = bucketizedKey(metadataBucketID, toUpdateKey)
|
||||
if !bytes.Equal(dbIter.Key(), expectedKey) {
|
||||
t.Errorf("TestSkipPendingUpdates: 4: key expected to be %s but is %s", expectedKey, dbIter.Key())
|
||||
}
|
||||
|
||||
// at this point toUpdateKey and toDeleteKey should be skipped
|
||||
cursor.skipPendingUpdates(false)
|
||||
expectedKey = bucketizedKey(metadataBucketID, firstKey)
|
||||
if !bytes.Equal(dbIter.Key(), expectedKey) {
|
||||
t.Errorf("TestSkipPendingUpdates: 5: key expected to be %s but is %s", expectedKey, dbIter.Key())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestSkipPendingUpdates: Error running main part of test: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCursor tests various edge-cases in cursor that were not hit by the more general tests
|
||||
func TestCursor(t *testing.T) {
|
||||
pdb := newTestDb("TestCursor", t)
|
||||
defer pdb.Close()
|
||||
|
||||
value := []byte("value")
|
||||
// Add numbered prefixes to keys so that they are in expected order, and before any other keys
|
||||
firstKey := []byte("1 - first")
|
||||
toDeleteKey := []byte("2 - toDelete")
|
||||
toUpdateKey := []byte("3 - toUpdate")
|
||||
secondKey := []byte("4 - second")
|
||||
|
||||
// create initial metadata for test
|
||||
err := pdb.Update(func(dbTx database.Tx) error {
|
||||
metadata := dbTx.Metadata()
|
||||
if err := metadata.Put(firstKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := metadata.Put(toDeleteKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := metadata.Put(toUpdateKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := metadata.Put(secondKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error adding to metadata: %s", err)
|
||||
}
|
||||
|
||||
// run the actual tests
|
||||
err = pdb.Update(func(dbTx database.Tx) error {
|
||||
metadata := dbTx.Metadata()
|
||||
if err := metadata.Delete(toDeleteKey); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := metadata.Put(toUpdateKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
cursor := metadata.Cursor().(*cursor)
|
||||
|
||||
// Check prev when currentIter == nil
|
||||
if ok := cursor.Prev(); ok {
|
||||
t.Error("1: .Prev() should have returned false, but have returned true")
|
||||
}
|
||||
// Same thing for .Next()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
}
|
||||
if ok := cursor.Next(); ok {
|
||||
t.Error("2: .Next() should have returned false, but have returned true")
|
||||
}
|
||||
|
||||
// Check that Key(), rawKey(), Value(), and rawValue() all return nil when currentIter == nil
|
||||
if key := cursor.Key(); key != nil {
|
||||
t.Errorf("3: .Key() should have returned nil, but have returned '%s' instead", key)
|
||||
}
|
||||
if key := cursor.rawKey(); key != nil {
|
||||
t.Errorf("4: .rawKey() should have returned nil, but have returned '%s' instead", key)
|
||||
}
|
||||
if value := cursor.Value(); value != nil {
|
||||
t.Errorf("5: .Value() should have returned nil, but have returned '%s' instead", value)
|
||||
}
|
||||
if value := cursor.rawValue(); value != nil {
|
||||
t.Errorf("6: .rawValue() should have returned nil, but have returned '%s' instead", value)
|
||||
}
|
||||
|
||||
// Check rawValue in normal operation
|
||||
cursor.First()
|
||||
if rawValue := cursor.rawValue(); !bytes.Equal(rawValue, value) {
|
||||
t.Errorf("7: rawValue should have returned '%s' but have returned '%s' instead", value, rawValue)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error running the actual tests: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCreateBucketErrors tests all error-cases in *bucket.CreateBucket().
|
||||
// The non-error-cases are tested in the more general tests.
|
||||
func TestCreateBucketErrors(t *testing.T) {
|
||||
testKey := []byte("key")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
key []byte
|
||||
isWritable bool
|
||||
isClosed bool
|
||||
expectedErr database.ErrorCode
|
||||
}{
|
||||
{"empty key", []byte{}, true, false, database.ErrBucketNameRequired},
|
||||
{"transaction is closed", testKey, true, true, database.ErrTxClosed},
|
||||
{"transaction is not writable", testKey, false, false, database.ErrTxNotWritable},
|
||||
{"key already exists", blockIdxBucketName, true, false, database.ErrBucketExists},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
func() {
|
||||
pdb := newTestDb("TestCreateBucketErrors", t)
|
||||
defer pdb.Close()
|
||||
|
||||
tx, err := pdb.Begin(test.isWritable)
|
||||
defer tx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("TestCreateBucketErrors: %s: error from pdb.Begin: %s", test.name, err)
|
||||
}
|
||||
if test.isClosed {
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("TestCreateBucketErrors: %s: error from tx.Commit: %s", test.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
metadata := tx.Metadata()
|
||||
|
||||
_, err = metadata.CreateBucket(test.key)
|
||||
|
||||
if !database.IsErrorCode(err, test.expectedErr) {
|
||||
t.Errorf("TestCreateBucketErrors: %s: Expected error of type %d "+
|
||||
"but got '%v'", test.name, test.expectedErr, err)
|
||||
}
|
||||
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// TestPutErrors tests all error-cases in *bucket.Put().
|
||||
// The non-error-cases are tested in the more general tests.
|
||||
func TestPutErrors(t *testing.T) {
|
||||
testKey := []byte("key")
|
||||
testValue := []byte("value")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
key []byte
|
||||
isWritable bool
|
||||
isClosed bool
|
||||
expectedErr database.ErrorCode
|
||||
}{
|
||||
{"empty key", []byte{}, true, false, database.ErrKeyRequired},
|
||||
{"transaction is closed", testKey, true, true, database.ErrTxClosed},
|
||||
{"transaction is not writable", testKey, false, false, database.ErrTxNotWritable},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
func() {
|
||||
pdb := newTestDb("TestPutErrors", t)
|
||||
defer pdb.Close()
|
||||
|
||||
tx, err := pdb.Begin(test.isWritable)
|
||||
defer tx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("TestPutErrors: %s: error from pdb.Begin: %s", test.name, err)
|
||||
}
|
||||
if test.isClosed {
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("TestPutErrors: %s: error from tx.Commit: %s", test.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
metadata := tx.Metadata()
|
||||
|
||||
err = metadata.Put(test.key, testValue)
|
||||
|
||||
if !database.IsErrorCode(err, test.expectedErr) {
|
||||
t.Errorf("TestPutErrors: %s: Expected error of type %d "+
|
||||
"but got '%v'", test.name, test.expectedErr, err)
|
||||
}
|
||||
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetErrors tests all error-cases in *bucket.Get().
|
||||
// The non-error-cases are tested in the more general tests.
|
||||
func TestGetErrors(t *testing.T) {
|
||||
testKey := []byte("key")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
key []byte
|
||||
isClosed bool
|
||||
}{
|
||||
{"empty key", []byte{}, false},
|
||||
{"transaction is closed", testKey, true},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
func() {
|
||||
pdb := newTestDb("TestGetErrors", t)
|
||||
defer pdb.Close()
|
||||
|
||||
tx, err := pdb.Begin(false)
|
||||
defer tx.Rollback()
|
||||
if err != nil {
|
||||
t.Fatalf("TestGetErrors: %s: error from pdb.Begin: %s", test.name, err)
|
||||
}
|
||||
if test.isClosed {
|
||||
err = tx.Rollback()
|
||||
if err != nil {
|
||||
t.Fatalf("TestGetErrors: %s: error from tx.Commit: %s", test.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
metadata := tx.Metadata()
|
||||
|
||||
if result := metadata.Get(test.key); result != nil {
|
||||
t.Errorf("TestGetErrors: %s: Expected to return nil, but got %v", test.name, result)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteErrors tests all error-cases in *bucket.Delete().
|
||||
// The non-error-cases are tested in the more general tests.
|
||||
func TestDeleteErrors(t *testing.T) {
|
||||
testKey := []byte("key")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
key []byte
|
||||
isWritable bool
|
||||
isClosed bool
|
||||
expectedErr database.ErrorCode
|
||||
}{
|
||||
{"empty key", []byte{}, true, false, database.ErrKeyRequired},
|
||||
{"transaction is closed", testKey, true, true, database.ErrTxClosed},
|
||||
{"transaction is not writable", testKey, false, false, database.ErrTxNotWritable},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
func() {
|
||||
pdb := newTestDb("TestDeleteErrors", t)
|
||||
defer pdb.Close()
|
||||
|
||||
tx, err := pdb.Begin(test.isWritable)
|
||||
defer tx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("TestDeleteErrors: %s: error from pdb.Begin: %s", test.name, err)
|
||||
}
|
||||
if test.isClosed {
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("TestDeleteErrors: %s: error from tx.Commit: %s", test.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
metadata := tx.Metadata()
|
||||
|
||||
err = metadata.Delete(test.key)
|
||||
|
||||
if !database.IsErrorCode(err, test.expectedErr) {
|
||||
t.Errorf("TestDeleteErrors: %s: Expected error of type %d "+
|
||||
"but got '%v'", test.name, test.expectedErr, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestForEachBucket(t *testing.T) {
|
||||
pdb := newTestDb("TestForEachBucket", t)
|
||||
|
||||
// set-up test
|
||||
testKey := []byte("key")
|
||||
testValue := []byte("value")
|
||||
bucketKeys := [][]byte{{1}, {2}, {3}}
|
||||
|
||||
err := pdb.Update(func(dbTx database.Tx) error {
|
||||
metadata := dbTx.Metadata()
|
||||
for _, bucketKey := range bucketKeys {
|
||||
bucket, err := metadata.CreateBucket(bucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = bucket.Put(testKey, testValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestForEachBucket: Error setting up test-database: %s", err)
|
||||
}
|
||||
|
||||
// actual test
|
||||
err = pdb.View(func(dbTx database.Tx) error {
|
||||
i := 0
|
||||
metadata := dbTx.Metadata()
|
||||
|
||||
err := metadata.ForEachBucket(func(bucketKey []byte) error {
|
||||
if i >= len(bucketKeys) { // in case there are any other buckets in metadata
|
||||
return nil
|
||||
}
|
||||
|
||||
expectedBucketKey := bucketKeys[i]
|
||||
if !bytes.Equal(expectedBucketKey, bucketKey) {
|
||||
t.Errorf("TestForEachBucket: %d: Expected bucket key: %v, but got: %v",
|
||||
i, expectedBucketKey, bucketKey)
|
||||
return nil
|
||||
}
|
||||
bucket := metadata.Bucket(bucketKey)
|
||||
if bucket == nil {
|
||||
t.Errorf("TestForEachBucket: %d: Bucket is nil", i)
|
||||
return nil
|
||||
}
|
||||
|
||||
value := bucket.Get(testKey)
|
||||
if !bytes.Equal(testValue, value) {
|
||||
t.Errorf("TestForEachBucket: %d: Expected value: %s, but got: %s",
|
||||
i, testValue, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
i++
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestForEachBucket: Error running actual tests: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestStoreBlockErrors tests all error-cases in *tx.StoreBlock().
|
||||
// The non-error-cases are tested in the more general tests.
|
||||
func TestStoreBlockErrors(t *testing.T) {
|
||||
testBlock := util.NewBlock(wire.NewMsgBlock(wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
isWritable bool
|
||||
isClosed bool
|
||||
expectedErr database.ErrorCode
|
||||
}{
|
||||
{"transaction is closed", true, true, database.ErrTxClosed},
|
||||
{"transaction is not writable", false, false, database.ErrTxNotWritable},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
func() {
|
||||
pdb := newTestDb("TestStoreBlockErrors", t)
|
||||
defer pdb.Close()
|
||||
|
||||
tx, err := pdb.Begin(test.isWritable)
|
||||
defer tx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("TestStoreBlockErrors: %s: error from pdb.Begin: %s", test.name, err)
|
||||
}
|
||||
if test.isClosed {
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("TestStoreBlockErrors: %s: error from tx.Commit: %s", test.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
err = tx.StoreBlock(testBlock)
|
||||
if !database.IsErrorCode(err, test.expectedErr) {
|
||||
t.Errorf("TestStoreBlockErrors: %s: Expected error of type %d "+
|
||||
"but got '%v'", test.name, test.expectedErr, err)
|
||||
}
|
||||
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteDoubleNestedBucket tests what happens when bucket.DeleteBucket()
|
||||
// is invoked on a bucket that contains a nested bucket.
|
||||
func TestDeleteDoubleNestedBucket(t *testing.T) {
|
||||
pdb := newTestDb("TestDeleteDoubleNestedBucket", t)
|
||||
defer pdb.Close()
|
||||
|
||||
firstKey := []byte("first")
|
||||
secondKey := []byte("second")
|
||||
key := []byte("key")
|
||||
value := []byte("value")
|
||||
var rawKey, rawSecondKey []byte
|
||||
|
||||
// Test setup
|
||||
err := pdb.Update(func(dbTx database.Tx) error {
|
||||
metadata := dbTx.Metadata()
|
||||
firstBucket, err := metadata.CreateBucket(firstKey)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error creating first bucket: %s", err)
|
||||
}
|
||||
secondBucket, err := firstBucket.CreateBucket(secondKey)
|
||||
if err != nil {
|
||||
return errors.Errorf("Error creating second bucket: %s", err)
|
||||
}
|
||||
secondBucket.Put(key, value)
|
||||
|
||||
// extract rawKey from cursor and make sure it's in raw database
|
||||
c := secondBucket.Cursor()
|
||||
for ok := c.First(); ok && !bytes.Equal(c.Key(), key); ok = c.Next() {
|
||||
}
|
||||
if !bytes.Equal(c.Key(), key) {
|
||||
return errors.Errorf("Couldn't find key to extract rawKey")
|
||||
}
|
||||
rawKey = c.(*cursor).rawKey()
|
||||
if dbTx.(*transaction).fetchKey(rawKey) == nil {
|
||||
return errors.Errorf("rawKey not found")
|
||||
}
|
||||
|
||||
// extract rawSecondKey from cursor and make sure it's in raw database
|
||||
c = firstBucket.Cursor()
|
||||
for ok := c.First(); ok && !bytes.Equal(c.Key(), secondKey); ok = c.Next() {
|
||||
}
|
||||
if !bytes.Equal(c.Key(), secondKey) {
|
||||
return errors.Errorf("Couldn't find secondKey to extract rawSecondKey")
|
||||
}
|
||||
rawSecondKey = c.(*cursor).rawKey()
|
||||
if dbTx.(*transaction).fetchKey(rawSecondKey) == nil {
|
||||
return errors.Errorf("rawSecondKey not found for some reason")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestDeleteDoubleNestedBucket: Error in test setup pdb.Update: %s", err)
|
||||
}
|
||||
|
||||
// Actual test
|
||||
err = pdb.Update(func(dbTx database.Tx) error {
|
||||
metadata := dbTx.Metadata()
|
||||
err := metadata.DeleteBucket(firstKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dbTx.(*transaction).fetchKey(rawSecondKey) != nil {
|
||||
t.Error("TestDeleteDoubleNestedBucket: secondBucket was not deleted")
|
||||
}
|
||||
|
||||
if dbTx.(*transaction).fetchKey(rawKey) != nil {
|
||||
t.Error("TestDeleteDoubleNestedBucket: value inside secondBucket was not deleted")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestDeleteDoubleNestedBucket: Error in actual test pdb.Update: %s", err)
|
||||
}
|
||||
}
|
||||
@@ -1,647 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/database/internal/treap"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultCacheSize is the default size for the database cache.
|
||||
defaultCacheSize = 100 * 1024 * 1024 // 100 MB
|
||||
|
||||
// defaultFlushSecs is the default number of seconds to use as a
|
||||
// threshold in between database cache flushes when the cache size has
|
||||
// not been exceeded.
|
||||
defaultFlushSecs = 300 // 5 minutes
|
||||
)
|
||||
|
||||
// ldbCacheIter wraps a treap iterator to provide the additional functionality
|
||||
// needed to satisfy the leveldb iterator.Iterator interface.
|
||||
type ldbCacheIter struct {
|
||||
*treap.Iterator
|
||||
}
|
||||
|
||||
// Enforce ldbCacheIterator implements the leveldb iterator.Iterator interface.
|
||||
var _ iterator.Iterator = (*ldbCacheIter)(nil)
|
||||
|
||||
// Error is only provided to satisfy the iterator interface as there are no
|
||||
// errors for this memory-only structure.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *ldbCacheIter) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetReleaser is only provided to satisfy the iterator interface as there is no
|
||||
// need to override it.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *ldbCacheIter) SetReleaser(releaser util.Releaser) {
|
||||
}
|
||||
|
||||
// Release is only provided to satisfy the iterator interface.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *ldbCacheIter) Release() {
|
||||
}
|
||||
|
||||
// newLdbCacheIter creates a new treap iterator for the given slice against the
|
||||
// pending keys for the passed cache snapshot and returns it wrapped in an
|
||||
// ldbCacheIter so it can be used as a leveldb iterator.
|
||||
func newLdbCacheIter(snap *dbCacheSnapshot, slice *util.Range) *ldbCacheIter {
|
||||
iter := snap.pendingKeys.Iterator(slice.Start, slice.Limit)
|
||||
return &ldbCacheIter{Iterator: iter}
|
||||
}
|
||||
|
||||
// dbCacheIterator defines an iterator over the key/value pairs in the database
|
||||
// cache and underlying database.
|
||||
type dbCacheIterator struct {
|
||||
cacheSnapshot *dbCacheSnapshot
|
||||
dbIter iterator.Iterator
|
||||
cacheIter iterator.Iterator
|
||||
currentIter iterator.Iterator
|
||||
released bool
|
||||
}
|
||||
|
||||
// Enforce dbCacheIterator implements the leveldb iterator.Iterator interface.
|
||||
var _ iterator.Iterator = (*dbCacheIterator)(nil)
|
||||
|
||||
// skipPendingUpdates skips any keys at the current database iterator position
|
||||
// that are being updated by the cache. The forwards flag indicates the
|
||||
// direction the iterator is moving.
|
||||
func (iter *dbCacheIterator) skipPendingUpdates(forwards bool) {
|
||||
for iter.dbIter.Valid() {
|
||||
var skip bool
|
||||
key := iter.dbIter.Key()
|
||||
if iter.cacheSnapshot.pendingRemove.Has(key) {
|
||||
skip = true
|
||||
} else if iter.cacheSnapshot.pendingKeys.Has(key) {
|
||||
skip = true
|
||||
}
|
||||
if !skip {
|
||||
break
|
||||
}
|
||||
|
||||
if forwards {
|
||||
iter.dbIter.Next()
|
||||
} else {
|
||||
iter.dbIter.Prev()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// chooseIterator first skips any entries in the database iterator that are
|
||||
// being updated by the cache and sets the current iterator to the appropriate
|
||||
// iterator depending on their validity and the order they compare in while taking
|
||||
// into account the direction flag. When the iterator is being moved forwards
|
||||
// and both iterators are valid, the iterator with the smaller key is chosen and
|
||||
// vice versa when the iterator is being moved backwards.
|
||||
func (iter *dbCacheIterator) chooseIterator(forwards bool) bool {
|
||||
// Skip any keys at the current database iterator position that are
|
||||
// being updated by the cache.
|
||||
iter.skipPendingUpdates(forwards)
|
||||
|
||||
// When both iterators are exhausted, the iterator is exhausted too.
|
||||
if !iter.dbIter.Valid() && !iter.cacheIter.Valid() {
|
||||
iter.currentIter = nil
|
||||
return false
|
||||
}
|
||||
|
||||
// Choose the database iterator when the cache iterator is exhausted.
|
||||
if !iter.cacheIter.Valid() {
|
||||
iter.currentIter = iter.dbIter
|
||||
return true
|
||||
}
|
||||
|
||||
// Choose the cache iterator when the database iterator is exhausted.
|
||||
if !iter.dbIter.Valid() {
|
||||
iter.currentIter = iter.cacheIter
|
||||
return true
|
||||
}
|
||||
|
||||
// Both iterators are valid, so choose the iterator with either the
|
||||
// smaller or larger key depending on the forwards flag.
|
||||
compare := bytes.Compare(iter.dbIter.Key(), iter.cacheIter.Key())
|
||||
if (forwards && compare > 0) || (!forwards && compare < 0) {
|
||||
iter.currentIter = iter.cacheIter
|
||||
} else {
|
||||
iter.currentIter = iter.dbIter
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// First positions the iterator at the first key/value pair and returns whether
|
||||
// or not the pair exists.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *dbCacheIterator) First() bool {
|
||||
// Seek to the first key in both the database and cache iterators and
|
||||
// choose the iterator that is both valid and has the smaller key.
|
||||
iter.dbIter.First()
|
||||
iter.cacheIter.First()
|
||||
return iter.chooseIterator(true)
|
||||
}
|
||||
|
||||
// Last positions the iterator at the last key/value pair and returns whether or
|
||||
// not the pair exists.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *dbCacheIterator) Last() bool {
|
||||
// Seek to the last key in both the database and cache iterators and
|
||||
// choose the iterator that is both valid and has the larger key.
|
||||
iter.dbIter.Last()
|
||||
iter.cacheIter.Last()
|
||||
return iter.chooseIterator(false)
|
||||
}
|
||||
|
||||
// Next moves the iterator one key/value pair forward and returns whether or not
|
||||
// the pair exists.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *dbCacheIterator) Next() bool {
|
||||
// Nothing to return if cursor is exhausted.
|
||||
if iter.currentIter == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Move the current iterator to the next entry and choose the iterator
|
||||
// that is both valid and has the smaller key.
|
||||
iter.currentIter.Next()
|
||||
return iter.chooseIterator(true)
|
||||
}
|
||||
|
||||
// Prev moves the iterator one key/value pair backward and returns whether or
|
||||
// not the pair exists.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *dbCacheIterator) Prev() bool {
|
||||
// Nothing to return if cursor is exhausted.
|
||||
if iter.currentIter == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Move the current iterator to the previous entry and choose the
|
||||
// iterator that is both valid and has the larger key.
|
||||
iter.currentIter.Prev()
|
||||
return iter.chooseIterator(false)
|
||||
}
|
||||
|
||||
// Seek positions the iterator at the first key/value pair that is greater than
|
||||
// or equal to the passed seek key. Returns false if no suitable key was found.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *dbCacheIterator) Seek(key []byte) bool {
|
||||
// Seek to the provided key in both the database and cache iterators
|
||||
// then choose the iterator that is both valid and has the larger key.
|
||||
iter.dbIter.Seek(key)
|
||||
iter.cacheIter.Seek(key)
|
||||
return iter.chooseIterator(true)
|
||||
}
|
||||
|
||||
// Valid indicates whether the iterator is positioned at a valid key/value pair.
|
||||
// It will be considered invalid when the iterator is newly created or exhausted.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *dbCacheIterator) Valid() bool {
|
||||
return iter.currentIter != nil
|
||||
}
|
||||
|
||||
// Key returns the current key the iterator is pointing to.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *dbCacheIterator) Key() []byte {
|
||||
// Nothing to return if iterator is exhausted.
|
||||
if iter.currentIter == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return iter.currentIter.Key()
|
||||
}
|
||||
|
||||
// Value returns the current value the iterator is pointing to.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *dbCacheIterator) Value() []byte {
|
||||
// Nothing to return if iterator is exhausted.
|
||||
if iter.currentIter == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return iter.currentIter.Value()
|
||||
}
|
||||
|
||||
// SetReleaser is only provided to satisfy the iterator interface as there is no
|
||||
// need to override it.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *dbCacheIterator) SetReleaser(releaser util.Releaser) {
|
||||
}
|
||||
|
||||
// Release releases the iterator by removing the underlying treap iterator from
|
||||
// the list of active iterators against the pending keys treap.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *dbCacheIterator) Release() {
|
||||
if !iter.released {
|
||||
iter.dbIter.Release()
|
||||
iter.cacheIter.Release()
|
||||
iter.currentIter = nil
|
||||
iter.released = true
|
||||
}
|
||||
}
|
||||
|
||||
// Error is only provided to satisfy the iterator interface as there are no
|
||||
// errors for this memory-only structure.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *dbCacheIterator) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// dbCacheSnapshot defines a snapshot of the database cache and underlying
|
||||
// database at a particular point in time.
|
||||
type dbCacheSnapshot struct {
|
||||
dbSnapshot *leveldb.Snapshot
|
||||
pendingKeys *treap.Immutable
|
||||
pendingRemove *treap.Immutable
|
||||
}
|
||||
|
||||
// Has returns whether or not the passed key exists.
|
||||
func (snap *dbCacheSnapshot) Has(key []byte) bool {
|
||||
// Check the cached entries first.
|
||||
if snap.pendingRemove.Has(key) {
|
||||
return false
|
||||
}
|
||||
if snap.pendingKeys.Has(key) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Consult the database.
|
||||
hasKey, _ := snap.dbSnapshot.Has(key, nil)
|
||||
return hasKey
|
||||
}
|
||||
|
||||
// Get returns the value for the passed key. The function will return nil when
|
||||
// the key does not exist.
|
||||
func (snap *dbCacheSnapshot) Get(key []byte) []byte {
|
||||
// Check the cached entries first.
|
||||
if snap.pendingRemove.Has(key) {
|
||||
return nil
|
||||
}
|
||||
if value := snap.pendingKeys.Get(key); value != nil {
|
||||
return value
|
||||
}
|
||||
|
||||
// Consult the database.
|
||||
value, err := snap.dbSnapshot.Get(key, nil)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// Release releases the snapshot.
|
||||
func (snap *dbCacheSnapshot) Release() {
|
||||
snap.dbSnapshot.Release()
|
||||
snap.pendingKeys = nil
|
||||
snap.pendingRemove = nil
|
||||
}
|
||||
|
||||
// NewIterator returns a new iterator for the snapshot. The newly returned
|
||||
// iterator is not pointing to a valid item until a call to one of the methods
|
||||
// to position it is made.
|
||||
//
|
||||
// The slice parameter allows the iterator to be limited to a range of keys.
|
||||
// The start key is inclusive and the limit key is exclusive. Either or both
|
||||
// can be nil if the functionality is not desired.
|
||||
func (snap *dbCacheSnapshot) NewIterator(slice *util.Range) *dbCacheIterator {
|
||||
return &dbCacheIterator{
|
||||
dbIter: snap.dbSnapshot.NewIterator(slice, nil),
|
||||
cacheIter: newLdbCacheIter(snap, slice),
|
||||
cacheSnapshot: snap,
|
||||
}
|
||||
}
|
||||
|
||||
// dbCache provides a database cache layer backed by an underlying database. It
|
||||
// allows a maximum cache size and flush interval to be specified such that the
|
||||
// cache is flushed to the database when the cache size exceeds the maximum
|
||||
// configured value or it has been longer than the configured interval since the
|
||||
// last flush. This effectively provides transaction batching so that callers
|
||||
// can commit transactions at will without incurring large performance hits due
|
||||
// to frequent disk syncs.
|
||||
type dbCache struct {
|
||||
// ldb is the underlying leveldb DB for metadata.
|
||||
ldb *leveldb.DB
|
||||
|
||||
// store is used to sync blocks to flat files.
|
||||
store *blockStore
|
||||
|
||||
// The following fields are related to flushing the cache to persistent
|
||||
// storage. Note that all flushing is performed in an opportunistic
|
||||
// fashion. This means that it is only flushed during a transaction or
|
||||
// when the database cache is closed.
|
||||
//
|
||||
// maxSize is the maximum size threshold the cache can grow to before
|
||||
// it is flushed.
|
||||
//
|
||||
// flushInterval is the threshold interval of time that is allowed to
|
||||
// pass before the cache is flushed.
|
||||
//
|
||||
// lastFlush is the time the cache was last flushed. It is used in
|
||||
// conjunction with the current time and the flush interval.
|
||||
//
|
||||
// NOTE: These flush related fields are protected by the database write
|
||||
// lock.
|
||||
maxSize uint64
|
||||
flushInterval time.Duration
|
||||
lastFlush time.Time
|
||||
|
||||
// The following fields hold the keys that need to be stored or deleted
|
||||
// from the underlying database once the cache is full, enough time has
|
||||
// passed, or when the database is shutting down. Note that these are
|
||||
// stored using immutable treaps to support O(1) MVCC snapshots against
|
||||
// the cached data. The cacheLock is used to protect concurrent access
|
||||
// for cache updates and snapshots.
|
||||
cacheLock sync.RWMutex
|
||||
cachedKeys *treap.Immutable
|
||||
cachedRemove *treap.Immutable
|
||||
}
|
||||
|
||||
// Snapshot returns a snapshot of the database cache and underlying database at
|
||||
// a particular point in time.
|
||||
//
|
||||
// The snapshot must be released after use by calling Release.
|
||||
func (c *dbCache) Snapshot() (*dbCacheSnapshot, error) {
|
||||
dbSnapshot, err := c.ldb.GetSnapshot()
|
||||
if err != nil {
|
||||
str := "failed to open transaction"
|
||||
return nil, convertErr(str, err)
|
||||
}
|
||||
|
||||
// Since the cached keys to be added and removed use an immutable treap,
|
||||
// a snapshot is simply obtaining the root of the tree under the lock
|
||||
// which is used to atomically swap the root.
|
||||
c.cacheLock.RLock()
|
||||
cacheSnapshot := &dbCacheSnapshot{
|
||||
dbSnapshot: dbSnapshot,
|
||||
pendingKeys: c.cachedKeys,
|
||||
pendingRemove: c.cachedRemove,
|
||||
}
|
||||
c.cacheLock.RUnlock()
|
||||
return cacheSnapshot, nil
|
||||
}
|
||||
|
||||
// updateDB invokes the passed function in the context of a managed leveldb
|
||||
// transaction. Any errors returned from the user-supplied function will cause
|
||||
// the transaction to be rolled back and are returned from this function.
|
||||
// Otherwise, the transaction is committed when the user-supplied function
|
||||
// returns a nil error.
|
||||
func (c *dbCache) updateDB(fn func(ldbTx *leveldb.Transaction) error) error {
|
||||
// Start a leveldb transaction.
|
||||
ldbTx, err := c.ldb.OpenTransaction()
|
||||
if err != nil {
|
||||
return convertErr("failed to open ldb transaction", err)
|
||||
}
|
||||
|
||||
if err := fn(ldbTx); err != nil {
|
||||
ldbTx.Discard()
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the leveldb transaction and convert any errors as needed.
|
||||
if err := ldbTx.Commit(); err != nil {
|
||||
return convertErr("failed to commit leveldb transaction", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TreapForEacher is an interface which allows iteration of a treap in ascending
|
||||
// order using a user-supplied callback for each key/value pair. It mainly
|
||||
// exists so both mutable and immutable treaps can be atomically committed to
|
||||
// the database with the same function.
|
||||
type TreapForEacher interface {
|
||||
ForEach(func(k, v []byte) bool)
|
||||
}
|
||||
|
||||
// commitTreaps atomically commits all of the passed pending add/update/remove
|
||||
// updates to the underlying database.
|
||||
func (c *dbCache) commitTreaps(pendingKeys, pendingRemove TreapForEacher) error {
|
||||
// Perform all leveldb updates using an atomic transaction.
|
||||
return c.updateDB(func(ldbTx *leveldb.Transaction) error {
|
||||
var innerErr error
|
||||
pendingKeys.ForEach(func(k, v []byte) bool {
|
||||
if dbErr := ldbTx.Put(k, v, nil); dbErr != nil {
|
||||
str := fmt.Sprintf("failed to put key %q to "+
|
||||
"ldb transaction", k)
|
||||
innerErr = convertErr(str, dbErr)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if innerErr != nil {
|
||||
return innerErr
|
||||
}
|
||||
|
||||
pendingRemove.ForEach(func(k, v []byte) bool {
|
||||
if dbErr := ldbTx.Delete(k, nil); dbErr != nil {
|
||||
str := fmt.Sprintf("failed to delete "+
|
||||
"key %q from ldb transaction",
|
||||
k)
|
||||
innerErr = convertErr(str, dbErr)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return innerErr
|
||||
})
|
||||
}
|
||||
|
||||
// flush flushes the database cache to persistent storage. This involes syncing
|
||||
// the block store and replaying all transactions that have been applied to the
|
||||
// cache to the underlying database.
|
||||
//
|
||||
// This function MUST be called with the database write lock held.
|
||||
func (c *dbCache) flush() error {
|
||||
c.lastFlush = time.Now()
|
||||
|
||||
// Sync the current write file associated with the block store. This is
|
||||
// necessary before writing the metadata to prevent the case where the
|
||||
// metadata contains information about a block which actually hasn't
|
||||
// been written yet in unexpected shutdown scenarios.
|
||||
if err := c.store.syncBlocks(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since the cached keys to be added and removed use an immutable treap,
|
||||
// a snapshot is simply obtaining the root of the tree under the lock
|
||||
// which is used to atomically swap the root.
|
||||
c.cacheLock.RLock()
|
||||
cachedKeys := c.cachedKeys
|
||||
cachedRemove := c.cachedRemove
|
||||
c.cacheLock.RUnlock()
|
||||
|
||||
// Nothing to do if there is no data to flush.
|
||||
if cachedKeys.Len() == 0 && cachedRemove.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perform all leveldb updates using an atomic transaction.
|
||||
if err := c.commitTreaps(cachedKeys, cachedRemove); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Clear the cache since it has been flushed.
|
||||
c.cacheLock.Lock()
|
||||
c.cachedKeys = treap.NewImmutable()
|
||||
c.cachedRemove = treap.NewImmutable()
|
||||
c.cacheLock.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// needsFlush returns whether or not the database cache needs to be flushed to
|
||||
// persistent storage based on its current size, whether or not adding all of
|
||||
// the entries in the passed database transaction would cause it to exceed the
|
||||
// configured limit, and how much time has elapsed since the last time the cache
|
||||
// was flushed.
|
||||
//
|
||||
// This function MUST be called with the database write lock held.
|
||||
func (c *dbCache) needsFlush(tx *transaction) bool {
|
||||
// A flush is needed when more time has elapsed than the configured
|
||||
// flush interval.
|
||||
if time.Since(c.lastFlush) >= c.flushInterval {
|
||||
return true
|
||||
}
|
||||
|
||||
// A flush is needed when the size of the database cache exceeds the
|
||||
// specified max cache size. The total calculated size is multiplied by
|
||||
// 1.5 here to account for additional memory consumption that will be
|
||||
// needed during the flush as well as old nodes in the cache that are
|
||||
// referenced by the snapshot used by the transaction.
|
||||
snap := tx.snapshot
|
||||
totalSize := snap.pendingKeys.Size() + snap.pendingRemove.Size()
|
||||
totalSize = uint64(float64(totalSize) * 1.5)
|
||||
return totalSize > c.maxSize
|
||||
}
|
||||
|
||||
// commitTx atomically adds all of the pending keys to add and remove into the
|
||||
// database cache. When adding the pending keys would cause the size of the
|
||||
// cache to exceed the max cache size, or the time since the last flush exceeds
|
||||
// the configured flush interval, the cache will be flushed to the underlying
|
||||
// persistent database.
|
||||
//
|
||||
// This is an atomic operation with respect to the cache in that either all of
|
||||
// the pending keys to add and remove in the transaction will be applied or none
|
||||
// of them will.
|
||||
//
|
||||
// The database cache itself might be flushed to the underlying persistent
|
||||
// database even if the transaction fails to apply, but it will only be the
|
||||
// state of the cache without the transaction applied.
|
||||
//
|
||||
// This function MUST be called during a database write transaction which in
|
||||
// turn implies the database write lock will be held.
|
||||
func (c *dbCache) commitTx(tx *transaction) error {
|
||||
// Flush the cache and write the current transaction directly to the
|
||||
// database if a flush is needed.
|
||||
if c.needsFlush(tx) {
|
||||
if err := c.flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform all leveldb updates using an atomic transaction.
|
||||
err := c.commitTreaps(tx.pendingKeys, tx.pendingRemove)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Clear the transaction entries since they have been committed.
|
||||
tx.pendingKeys = nil
|
||||
tx.pendingRemove = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// At this point a database flush is not needed, so atomically commit
|
||||
// the transaction to the cache.
|
||||
|
||||
// Since the cached keys to be added and removed use an immutable treap,
|
||||
// a snapshot is simply obtaining the root of the tree under the lock
|
||||
// which is used to atomically swap the root.
|
||||
c.cacheLock.RLock()
|
||||
newCachedKeys := c.cachedKeys
|
||||
newCachedRemove := c.cachedRemove
|
||||
c.cacheLock.RUnlock()
|
||||
|
||||
// Apply every key to add in the database transaction to the cache.
|
||||
tx.pendingKeys.ForEach(func(k, v []byte) bool {
|
||||
newCachedRemove = newCachedRemove.Delete(k)
|
||||
newCachedKeys = newCachedKeys.Put(k, v)
|
||||
return true
|
||||
})
|
||||
tx.pendingKeys = nil
|
||||
|
||||
// Apply every key to remove in the database transaction to the cache.
|
||||
tx.pendingRemove.ForEach(func(k, v []byte) bool {
|
||||
newCachedKeys = newCachedKeys.Delete(k)
|
||||
newCachedRemove = newCachedRemove.Put(k, nil)
|
||||
return true
|
||||
})
|
||||
tx.pendingRemove = nil
|
||||
|
||||
// Atomically replace the immutable treaps which hold the cached keys to
|
||||
// add and delete.
|
||||
c.cacheLock.Lock()
|
||||
c.cachedKeys = newCachedKeys
|
||||
c.cachedRemove = newCachedRemove
|
||||
c.cacheLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close cleanly shuts down the database cache by syncing all data and closing
|
||||
// the underlying leveldb database.
|
||||
//
|
||||
// This function MUST be called with the database write lock held.
|
||||
func (c *dbCache) Close() error {
|
||||
// Flush any outstanding cached entries to disk.
|
||||
if err := c.flush(); err != nil {
|
||||
// Even if there is an error while flushing, attempt to close
|
||||
// the underlying database. The error is ignored since it would
|
||||
// mask the flush error.
|
||||
_ = c.ldb.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// Close the underlying leveldb database.
|
||||
if err := c.ldb.Close(); err != nil {
|
||||
str := "failed to close underlying leveldb database"
|
||||
return convertErr(str, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newDbCache returns a new database cache instance backed by the provided
|
||||
// leveldb instance. The cache will be flushed to leveldb when the max size
|
||||
// exceeds the provided value or it has been longer than the provided interval
|
||||
// since the last flush.
|
||||
func newDbCache(ldb *leveldb.DB, store *blockStore, maxSize uint64, flushIntervalSecs uint32) *dbCache {
|
||||
return &dbCache{
|
||||
ldb: ldb,
|
||||
store: store,
|
||||
maxSize: maxSize,
|
||||
flushInterval: time.Second * time.Duration(flushIntervalSecs),
|
||||
lastFlush: time.Now(),
|
||||
cachedKeys: treap.NewImmutable(),
|
||||
cachedRemove: treap.NewImmutable(),
|
||||
}
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
ldbutil "github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
func TestExhaustedDbCacheIterator(t *testing.T) {
|
||||
db := newTestDb("TestExhaustedDbCacheIterator", t)
|
||||
defer db.Close()
|
||||
|
||||
snapshot, err := db.cache.Snapshot()
|
||||
if err != nil {
|
||||
t.Fatalf("TestExhaustedDbCacheIterator: Error creating cache snapshot: %s", err)
|
||||
}
|
||||
iterator := snapshot.NewIterator(&ldbutil.Range{})
|
||||
|
||||
if next := iterator.Next(); next != false {
|
||||
t.Errorf("TestExhaustedDbCacheIterator: Expected .Next() = false, but got %v", next)
|
||||
}
|
||||
|
||||
if prev := iterator.Prev(); prev != false {
|
||||
t.Errorf("TestExhaustedDbCacheIterator: Expected .Prev() = false, but got %v", prev)
|
||||
}
|
||||
|
||||
if key := iterator.Key(); key != nil {
|
||||
t.Errorf("TestExhaustedDbCacheIterator: Expected .Key() = nil, but got %v", key)
|
||||
}
|
||||
|
||||
if value := iterator.Value(); value != nil {
|
||||
t.Errorf("TestExhaustedDbCacheIterator: Expected .Value() = nil, but got %v", value)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLDBIteratorImplPlaceholders hits functions that are there to implement leveldb iterator.Iterator interface,
|
||||
// but surve no other purpose.
|
||||
func TestLDBIteratorImplPlaceholders(t *testing.T) {
|
||||
db := newTestDb("TestIteratorImplPlaceholders", t)
|
||||
defer db.Close()
|
||||
|
||||
snapshot, err := db.cache.Snapshot()
|
||||
if err != nil {
|
||||
t.Fatalf("TestLDBIteratorImplPlaceholders: Error creating cache snapshot: %s", err)
|
||||
}
|
||||
iterator := newLdbCacheIter(snapshot, &ldbutil.Range{})
|
||||
|
||||
if err = iterator.Error(); err != nil {
|
||||
t.Errorf("TestLDBIteratorImplPlaceholders: Expected .Error() = nil, but got %v", err)
|
||||
}
|
||||
|
||||
// Call SetReleaser to achieve coverage of it. Actually does nothing
|
||||
iterator.SetReleaser(nil)
|
||||
}
|
||||
|
||||
func TestSkipPendingUpdatesCache(t *testing.T) {
|
||||
pdb := newTestDb("TestSkipPendingUpdatesCache", t)
|
||||
defer pdb.Close()
|
||||
|
||||
value := []byte("value")
|
||||
// Add numbered prefixes to keys so that they are in expected order, and before any other keys
|
||||
firstKey := []byte("1 - first")
|
||||
toDeleteKey := []byte("2 - toDelete")
|
||||
toUpdateKey := []byte("3 - toUpdate")
|
||||
secondKey := []byte("4 - second")
|
||||
|
||||
// create initial metadata for test
|
||||
err := pdb.Update(func(dbTx database.Tx) error {
|
||||
metadata := dbTx.Metadata()
|
||||
if err := metadata.Put(firstKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := metadata.Put(toDeleteKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := metadata.Put(toUpdateKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := metadata.Put(secondKey, value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error adding to metadata: %s", err)
|
||||
}
|
||||
|
||||
err = pdb.cache.flush()
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing cache: %s", err)
|
||||
}
|
||||
|
||||
// test skips
|
||||
err = pdb.Update(func(dbTx database.Tx) error {
|
||||
snapshot, err := pdb.cache.Snapshot()
|
||||
if err != nil {
|
||||
t.Fatalf("TestSkipPendingUpdatesCache: Error getting snapshot: %s", err)
|
||||
}
|
||||
|
||||
iterator := snapshot.NewIterator(&ldbutil.Range{})
|
||||
snapshot.pendingRemove = snapshot.pendingRemove.Put(bucketizedKey(metadataBucketID, toDeleteKey), value)
|
||||
snapshot.pendingKeys = snapshot.pendingKeys.Put(bucketizedKey(metadataBucketID, toUpdateKey), value)
|
||||
|
||||
// Check that first is ok
|
||||
iterator.First()
|
||||
expectedKey := bucketizedKey(metadataBucketID, firstKey)
|
||||
actualKey := iterator.Key()
|
||||
if !bytes.Equal(actualKey, expectedKey) {
|
||||
t.Errorf("TestSkipPendingUpdatesCache: 1: key expected to be %v but is %v", expectedKey, actualKey)
|
||||
}
|
||||
|
||||
// Go to the next key, which is second, toDelete and toUpdate will be skipped
|
||||
iterator.Next()
|
||||
expectedKey = bucketizedKey(metadataBucketID, secondKey)
|
||||
actualKey = iterator.Key()
|
||||
if !bytes.Equal(actualKey, expectedKey) {
|
||||
t.Errorf("TestSkipPendingUpdatesCache: 2: key expected to be %s but is %s", expectedKey, actualKey)
|
||||
}
|
||||
|
||||
// now traverse backwards - should get first, toUpdate and toDelete will be skipped
|
||||
iterator.Prev()
|
||||
expectedKey = bucketizedKey(metadataBucketID, firstKey)
|
||||
actualKey = iterator.Key()
|
||||
if !bytes.Equal(actualKey, expectedKey) {
|
||||
t.Errorf("TestSkipPendingUpdatesCache: 4: key expected to be %s but is %s", expectedKey, actualKey)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestSkipPendingUpdatesCache: Error running main part of test: %s", err)
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
/*
|
||||
Package ffldb implements a driver for the database package that uses leveldb
|
||||
for the backing metadata and flat files for block storage.
|
||||
|
||||
This driver is the recommended driver for use with kaspad. It makes use leveldb
|
||||
for the metadata, flat files for block storage, and checksums in key areas to
|
||||
ensure data integrity.
|
||||
|
||||
Usage
|
||||
|
||||
This package is a driver to the database package and provides the database type
|
||||
of "ffldb". The parameters the Open and Create functions take are the
|
||||
database path as a string and the block network:
|
||||
|
||||
db, err := database.Open("ffldb", "path/to/database", wire.Mainnet)
|
||||
if err != nil {
|
||||
// Handle error
|
||||
}
|
||||
|
||||
db, err := database.Create("ffldb", "path/to/database", wire.Mainnet)
|
||||
if err != nil {
|
||||
// Handle error
|
||||
}
|
||||
*/
|
||||
package ffldb
|
||||
@@ -1,60 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
dbType = "ffldb"
|
||||
)
|
||||
|
||||
// parseArgs parses the arguments from the database Open/Create methods.
|
||||
func parseArgs(funcName string, args ...interface{}) (string, wire.KaspaNet, error) {
|
||||
if len(args) != 2 {
|
||||
return "", 0, errors.Errorf("invalid arguments to %s.%s -- "+
|
||||
"expected database path and block network", dbType,
|
||||
funcName)
|
||||
}
|
||||
|
||||
dbPath, ok := args[0].(string)
|
||||
if !ok {
|
||||
return "", 0, errors.Errorf("first argument to %s.%s is invalid -- "+
|
||||
"expected database path string", dbType, funcName)
|
||||
}
|
||||
|
||||
network, ok := args[1].(wire.KaspaNet)
|
||||
if !ok {
|
||||
return "", 0, errors.Errorf("second argument to %s.%s is invalid -- "+
|
||||
"expected block network", dbType, funcName)
|
||||
}
|
||||
|
||||
return dbPath, network, nil
|
||||
}
|
||||
|
||||
// openDBDriver is the callback provided during driver registration that opens
|
||||
// an existing database for use.
|
||||
func openDBDriver(args ...interface{}) (database.DB, error) {
|
||||
dbPath, network, err := parseArgs("Open", args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return openDB(dbPath, network, false)
|
||||
}
|
||||
|
||||
// createDBDriver is the callback provided during driver registration that
|
||||
// creates, initializes, and opens a database for use.
|
||||
func createDBDriver(args ...interface{}) (database.DB, error) {
|
||||
dbPath, network, err := parseArgs("Create", args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return openDB(dbPath, network, true)
|
||||
}
|
||||
@@ -1,290 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ffldb_test
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/database/ffldb"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// dbType is the database type name for this driver.
|
||||
const dbType = "ffldb"
|
||||
|
||||
// TestCreateOpenFail ensures that errors related to creating and opening a
|
||||
// database are handled properly.
|
||||
func TestCreateOpenFail(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Ensure that attempting to open a database that doesn't exist returns
|
||||
// the expected error.
|
||||
wantErrCode := database.ErrDbDoesNotExist
|
||||
_, err := database.Open(dbType, "noexist", blockDataNet)
|
||||
if !checkDbError(t, "Open", err, wantErrCode) {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that attempting to open a database with the wrong number of
|
||||
// parameters returns the expected error.
|
||||
wantErr := errors.Errorf("invalid arguments to %s.Open -- expected "+
|
||||
"database path and block network", dbType)
|
||||
_, err = database.Open(dbType, 1, 2, 3)
|
||||
if err.Error() != wantErr.Error() {
|
||||
t.Errorf("Open: did not receive expected error - got %v, "+
|
||||
"want %v", err, wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that attempting to open a database with an invalid type for
|
||||
// the first parameter returns the expected error.
|
||||
wantErr = errors.Errorf("first argument to %s.Open is invalid -- "+
|
||||
"expected database path string", dbType)
|
||||
_, err = database.Open(dbType, 1, blockDataNet)
|
||||
if err.Error() != wantErr.Error() {
|
||||
t.Errorf("Open: did not receive expected error - got %v, "+
|
||||
"want %v", err, wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that attempting to open a database with an invalid type for
|
||||
// the second parameter returns the expected error.
|
||||
wantErr = errors.Errorf("second argument to %s.Open is invalid -- "+
|
||||
"expected block network", dbType)
|
||||
_, err = database.Open(dbType, "noexist", "invalid")
|
||||
if err.Error() != wantErr.Error() {
|
||||
t.Errorf("Open: did not receive expected error - got %v, "+
|
||||
"want %v", err, wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that attempting to create a database with the wrong number of
|
||||
// parameters returns the expected error.
|
||||
wantErr = errors.Errorf("invalid arguments to %s.Create -- expected "+
|
||||
"database path and block network", dbType)
|
||||
_, err = database.Create(dbType, 1, 2, 3)
|
||||
if err.Error() != wantErr.Error() {
|
||||
t.Errorf("Create: did not receive expected error - got %v, "+
|
||||
"want %v", err, wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that attempting to create a database with an invalid type for
|
||||
// the first parameter returns the expected error.
|
||||
wantErr = errors.Errorf("first argument to %s.Create is invalid -- "+
|
||||
"expected database path string", dbType)
|
||||
_, err = database.Create(dbType, 1, blockDataNet)
|
||||
if err.Error() != wantErr.Error() {
|
||||
t.Errorf("Create: did not receive expected error - got %v, "+
|
||||
"want %v", err, wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that attempting to create a database with an invalid type for
|
||||
// the second parameter returns the expected error.
|
||||
wantErr = errors.Errorf("second argument to %s.Create is invalid -- "+
|
||||
"expected block network", dbType)
|
||||
_, err = database.Create(dbType, "noexist", "invalid")
|
||||
if err.Error() != wantErr.Error() {
|
||||
t.Errorf("Create: did not receive expected error - got %v, "+
|
||||
"want %v", err, wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure operations against a closed database return the expected
|
||||
// error.
|
||||
dbPath := filepath.Join(os.TempDir(), "ffldb-createfail")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
db, err := database.Create(dbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
t.Errorf("Create: unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(dbPath)
|
||||
db.Close()
|
||||
|
||||
wantErrCode = database.ErrDbNotOpen
|
||||
err = db.View(func(dbTx database.Tx) error {
|
||||
return nil
|
||||
})
|
||||
if !checkDbError(t, "View", err, wantErrCode) {
|
||||
return
|
||||
}
|
||||
|
||||
wantErrCode = database.ErrDbNotOpen
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
return nil
|
||||
})
|
||||
if !checkDbError(t, "Update", err, wantErrCode) {
|
||||
return
|
||||
}
|
||||
|
||||
wantErrCode = database.ErrDbNotOpen
|
||||
_, err = db.Begin(false)
|
||||
if !checkDbError(t, "Begin(false)", err, wantErrCode) {
|
||||
return
|
||||
}
|
||||
|
||||
wantErrCode = database.ErrDbNotOpen
|
||||
_, err = db.Begin(true)
|
||||
if !checkDbError(t, "Begin(true)", err, wantErrCode) {
|
||||
return
|
||||
}
|
||||
|
||||
wantErrCode = database.ErrDbNotOpen
|
||||
err = db.Close()
|
||||
if !checkDbError(t, "Close", err, wantErrCode) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// TestPersistence ensures that values stored are still valid after closing and
|
||||
// reopening the database.
|
||||
func TestPersistence(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a new database to run tests against.
|
||||
dbPath := filepath.Join(os.TempDir(), "ffldb-persistencetest")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
db, err := database.Create(dbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create test database (%s) %v", dbType, err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(dbPath)
|
||||
defer db.Close()
|
||||
|
||||
// Create a bucket, put some values into it, and store a block so they
|
||||
// can be tested for existence on re-open.
|
||||
bucket1Key := []byte("bucket1")
|
||||
storeValues := map[string]string{
|
||||
"b1key1": "foo1",
|
||||
"b1key2": "foo2",
|
||||
"b1key3": "foo3",
|
||||
}
|
||||
genesisBlock := util.NewBlock(dagconfig.MainnetParams.GenesisBlock)
|
||||
genesisHash := dagconfig.MainnetParams.GenesisHash
|
||||
err = db.Update(func(dbTx database.Tx) error {
|
||||
metadataBucket := dbTx.Metadata()
|
||||
if metadataBucket == nil {
|
||||
return errors.Errorf("Metadata: unexpected nil bucket")
|
||||
}
|
||||
|
||||
bucket1, err := metadataBucket.CreateBucket(bucket1Key)
|
||||
if err != nil {
|
||||
return errors.Errorf("CreateBucket: unexpected error: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
for k, v := range storeValues {
|
||||
err := bucket1.Put([]byte(k), []byte(v))
|
||||
if err != nil {
|
||||
return errors.Errorf("Put: unexpected error: %v",
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := dbTx.StoreBlock(genesisBlock); err != nil {
|
||||
return errors.Errorf("StoreBlock: unexpected error: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Update: unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Close and reopen the database to ensure the values persist.
|
||||
db.Close()
|
||||
db, err = database.Open(dbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to open test database (%s) %v", dbType, err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Ensure the values previously stored in the 3rd namespace still exist
|
||||
// and are correct.
|
||||
err = db.View(func(dbTx database.Tx) error {
|
||||
metadataBucket := dbTx.Metadata()
|
||||
if metadataBucket == nil {
|
||||
return errors.Errorf("Metadata: unexpected nil bucket")
|
||||
}
|
||||
|
||||
bucket1 := metadataBucket.Bucket(bucket1Key)
|
||||
if bucket1 == nil {
|
||||
return errors.Errorf("Bucket1: unexpected nil bucket")
|
||||
}
|
||||
|
||||
for k, v := range storeValues {
|
||||
gotVal := bucket1.Get([]byte(k))
|
||||
if !reflect.DeepEqual(gotVal, []byte(v)) {
|
||||
return errors.Errorf("Get: key '%s' does not "+
|
||||
"match expected value - got %s, want %s",
|
||||
k, gotVal, v)
|
||||
}
|
||||
}
|
||||
|
||||
genesisBlockBytes, _ := genesisBlock.Bytes()
|
||||
gotBytes, err := dbTx.FetchBlock(genesisHash)
|
||||
if err != nil {
|
||||
return errors.Errorf("FetchBlock: unexpected error: %v",
|
||||
err)
|
||||
}
|
||||
if !reflect.DeepEqual(gotBytes, genesisBlockBytes) {
|
||||
return errors.Errorf("FetchBlock: stored block mismatch")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("View: unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// TestInterface performs all interfaces tests for this database driver.
|
||||
func TestInterface(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a new database to run tests against.
|
||||
dbPath := filepath.Join(os.TempDir(), "ffldb-interfacetest")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
db, err := database.Create(dbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create test database (%s) %v", dbType, err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(dbPath)
|
||||
defer db.Close()
|
||||
|
||||
// Ensure the driver type is the expected value.
|
||||
gotDbType := db.Type()
|
||||
if gotDbType != dbType {
|
||||
t.Errorf("Type: unepxected driver type - got %v, want %v",
|
||||
gotDbType, dbType)
|
||||
return
|
||||
}
|
||||
|
||||
// Run all of the interface tests against the database.
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
// Change the maximum file size to a small value to force multiple flat
|
||||
// files with the test data set.
|
||||
// Change maximum open files to small value to force shifts in the LRU
|
||||
// mechanism
|
||||
ffldb.TstRunWithMaxBlockFileSizeAndMaxOpenFiles(db, 2048, 10, func() {
|
||||
testInterface(t, db)
|
||||
})
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
This test file is part of the ffldb package rather than than the ffldb_test
|
||||
package so it can bridge access to the internals to properly test cases which
|
||||
are either not possible or can't reliably be tested via the public interface.
|
||||
The functions are only exported while the tests are being run.
|
||||
*/
|
||||
|
||||
package ffldb
|
||||
|
||||
import "github.com/kaspanet/kaspad/database"
|
||||
|
||||
// TstRunWithMaxBlockFileSize runs the passed function with the maximum allowed
|
||||
// file size for the database set to the provided value. The value will be set
|
||||
// back to the original value upon completion.
|
||||
func TstRunWithMaxBlockFileSizeAndMaxOpenFiles(idb database.DB, size uint32, maxOpenFiles int, fn func()) {
|
||||
ffldb := idb.(*db)
|
||||
origSize := ffldb.store.maxBlockFileSize
|
||||
origMaxOpenFiles := ffldb.store.maxOpenFiles
|
||||
|
||||
ffldb.store.maxBlockFileSize = size
|
||||
ffldb.store.maxOpenFiles = maxOpenFiles
|
||||
fn()
|
||||
ffldb.store.maxBlockFileSize = origSize
|
||||
ffldb.store.maxOpenFiles = origMaxOpenFiles
|
||||
}
|
||||
229
database/ffldb/ff/flatfile.go
Normal file
229
database/ffldb/ff/flatfile.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package ff
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"hash/crc32"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxOpenFiles is the max number of open files to maintain in each store's
|
||||
// cache. Note that this does not include the current/write file, so there
|
||||
// will typically be one more than this value open.
|
||||
maxOpenFiles = 25
|
||||
|
||||
// maxFileSize is the maximum size for each file used to store data.
|
||||
//
|
||||
// NOTE: The current code uses uint32 for all offsets, so this value
|
||||
// must be less than 2^32 (4 GiB). This is also why it's a typed
|
||||
// constant.
|
||||
maxFileSize uint32 = 512 * 1024 * 1024 // 512 MiB
|
||||
)
|
||||
|
||||
var (
|
||||
// byteOrder is the preferred byte order used through the flat files.
|
||||
// Sometimes big endian will be used to allow ordered byte sortable
|
||||
// integer values.
|
||||
byteOrder = binary.LittleEndian
|
||||
|
||||
// crc32ByteOrder is the byte order used for CRC-32 checksums.
|
||||
crc32ByteOrder = binary.BigEndian
|
||||
|
||||
// crc32ChecksumLength is the length in bytes of a CRC-32 checksum.
|
||||
crc32ChecksumLength = 4
|
||||
|
||||
// dataLengthLength is the length in bytes of the "data length" section
|
||||
// of a serialized entry in a flat file store.
|
||||
dataLengthLength = 4
|
||||
|
||||
// castagnoli houses the Catagnoli polynomial used for CRC-32 checksums.
|
||||
castagnoli = crc32.MakeTable(crc32.Castagnoli)
|
||||
)
|
||||
|
||||
// flatFileStore houses information used to handle reading and writing data
|
||||
// into flat files with support for multiple concurrent readers.
|
||||
type flatFileStore struct {
|
||||
// basePath is the base path used for the flat files.
|
||||
basePath string
|
||||
|
||||
// storeName is the name of this flat-file store.
|
||||
storeName string
|
||||
|
||||
// The following fields are related to the flat files which hold the
|
||||
// actual data. The number of open files is limited by maxOpenFiles.
|
||||
//
|
||||
// openFilesMutex protects concurrent access to the openFiles map. It
|
||||
// is a RWMutex so multiple readers can simultaneously access open
|
||||
// files.
|
||||
//
|
||||
// openFiles houses the open file handles for existing files which have
|
||||
// been opened read-only along with an individual RWMutex. This scheme
|
||||
// allows multiple concurrent readers to the same file while preventing
|
||||
// the file from being closed out from under them.
|
||||
//
|
||||
// lruMutex protects concurrent access to the least recently used list
|
||||
// and lookup map.
|
||||
//
|
||||
// openFilesLRU tracks how the open files are referenced by pushing the
|
||||
// most recently used files to the front of the list thereby trickling
|
||||
// the least recently used files to end of the list. When a file needs
|
||||
// to be closed due to exceeding the max number of allowed open
|
||||
// files, the one at the end of the list is closed.
|
||||
//
|
||||
// fileNumberToLRUElement is a mapping between a specific file number and
|
||||
// the associated list element on the least recently used list.
|
||||
//
|
||||
// Thus, with the combination of these fields, the database supports
|
||||
// concurrent non-blocking reads across multiple and individual files
|
||||
// along with intelligently limiting the number of open file handles by
|
||||
// closing the least recently used files as needed.
|
||||
//
|
||||
// NOTE: The locking order used throughout is well-defined and MUST be
|
||||
// followed. Failure to do so could lead to deadlocks. In particular,
|
||||
// the locking order is as follows:
|
||||
// 1) openFilesMutex
|
||||
// 2) lruMutex
|
||||
// 3) writeCursor mutex
|
||||
// 4) specific file mutexes
|
||||
//
|
||||
// None of the mutexes are required to be locked at the same time, and
|
||||
// often aren't. However, if they are to be locked simultaneously, they
|
||||
// MUST be locked in the order previously specified.
|
||||
//
|
||||
// Due to the high performance and multi-read concurrency requirements,
|
||||
// write locks should only be held for the minimum time necessary.
|
||||
openFilesMutex sync.RWMutex
|
||||
openFiles map[uint32]*lockableFile
|
||||
lruMutex sync.Mutex
|
||||
openFilesLRU *list.List // Contains uint32 file numbers.
|
||||
fileNumberToLRUElement map[uint32]*list.Element
|
||||
|
||||
// writeCursor houses the state for the current file and location that
|
||||
// new data is written to.
|
||||
writeCursor *writeCursor
|
||||
|
||||
// isClosed is true when the store is closed. Any operations on a closed
|
||||
// store will fail.
|
||||
isClosed bool
|
||||
}
|
||||
|
||||
// writeCursor represents the current file and offset of the flat file on disk
|
||||
// for performing all writes. It also contains a read-write mutex to support
|
||||
// multiple concurrent readers which can reuse the file handle.
|
||||
type writeCursor struct {
|
||||
sync.RWMutex
|
||||
|
||||
// currentFile is the current file that will be appended to when writing
|
||||
// new data.
|
||||
currentFile *lockableFile
|
||||
|
||||
// currentFileNumber is the current file number and is used to allow
|
||||
// readers to use the same open file handle.
|
||||
currentFileNumber uint32
|
||||
|
||||
// currentOffset is the offset in the current file where the next new
|
||||
// data will be written.
|
||||
currentOffset uint32
|
||||
}
|
||||
|
||||
// openFlatFileStore returns a new flat file store with the current file number
|
||||
// and offset set and all fields initialized.
|
||||
func openFlatFileStore(basePath string, storeName string) (*flatFileStore, error) {
|
||||
// Look for the end of the latest file to determine what the write cursor
|
||||
// position is from the viewpoint of the flat files on disk.
|
||||
fileNumber, fileOffset, err := findCurrentLocation(basePath, storeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store := &flatFileStore{
|
||||
basePath: basePath,
|
||||
storeName: storeName,
|
||||
openFiles: make(map[uint32]*lockableFile),
|
||||
openFilesLRU: list.New(),
|
||||
fileNumberToLRUElement: make(map[uint32]*list.Element),
|
||||
writeCursor: &writeCursor{
|
||||
currentFile: &lockableFile{},
|
||||
currentFileNumber: fileNumber,
|
||||
currentOffset: fileOffset,
|
||||
},
|
||||
isClosed: false,
|
||||
}
|
||||
return store, nil
|
||||
}
|
||||
|
||||
func (s *flatFileStore) Close() error {
|
||||
if s.isClosed {
|
||||
return errors.Errorf("cannot close a closed store %s",
|
||||
s.storeName)
|
||||
}
|
||||
s.isClosed = true
|
||||
|
||||
// Close the write cursor. We lock the write cursor here
|
||||
// to let it finish any undergoing writing.
|
||||
s.writeCursor.Lock()
|
||||
defer s.writeCursor.Unlock()
|
||||
err := s.writeCursor.currentFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close all open files
|
||||
for _, openFile := range s.openFiles {
|
||||
err := openFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *flatFileStore) currentLocation() *flatFileLocation {
|
||||
return &flatFileLocation{
|
||||
fileNumber: s.writeCursor.currentFileNumber,
|
||||
fileOffset: s.writeCursor.currentOffset,
|
||||
dataLength: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// findCurrentLocation searches the database directory for all flat files for a given
|
||||
// store to find the end of the most recent file. This position is considered
|
||||
// the current write cursor.
|
||||
func findCurrentLocation(dbPath string, storeName string) (fileNumber uint32, fileLength uint32, err error) {
|
||||
currentFileNumber := uint32(0)
|
||||
currentFileLength := uint32(0)
|
||||
for {
|
||||
currentFilePath := flatFilePath(dbPath, storeName, currentFileNumber)
|
||||
stat, err := os.Stat(currentFilePath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return 0, 0, errors.WithStack(err)
|
||||
}
|
||||
if currentFileNumber > 0 {
|
||||
fileNumber = currentFileNumber - 1
|
||||
}
|
||||
fileLength = currentFileLength
|
||||
break
|
||||
}
|
||||
currentFileLength = uint32(stat.Size())
|
||||
currentFileNumber++
|
||||
}
|
||||
|
||||
log.Tracef("Scan for store '%s' found latest file #%d with length %d",
|
||||
storeName, fileNumber, fileLength)
|
||||
return fileNumber, fileLength, nil
|
||||
}
|
||||
|
||||
// flatFilePath return the file path for the provided store's flat file number.
|
||||
func flatFilePath(dbPath string, storeName string, fileNumber uint32) string {
|
||||
// Choose 9 digits of precision for the filenames. 9 digits provide
|
||||
// 10^9 files @ 512MiB each a total of ~476.84PiB.
|
||||
|
||||
fileName := fmt.Sprintf("%s-%09d.fdb", storeName, fileNumber)
|
||||
return filepath.Join(dbPath, fileName)
|
||||
}
|
||||
74
database/ffldb/ff/flatfile_test.go
Normal file
74
database/ffldb/ff/flatfile_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package ff
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFlatFileStoreSanity(t *testing.T) {
|
||||
// Open a test store
|
||||
path, err := ioutil.TempDir("", "TestFlatFileStoreSanity")
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlatFileStoreSanity: TempDir unexpectedly "+
|
||||
"failed: %s", err)
|
||||
}
|
||||
name := "test"
|
||||
store, err := openFlatFileStore(path, name)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlatFileStoreSanity: openFlatFileStore "+
|
||||
"unexpectedly failed: %s", err)
|
||||
}
|
||||
|
||||
// Write something to the store
|
||||
writeData := []byte("Hello world!")
|
||||
location, err := store.write(writeData)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlatFileStoreSanity: Write returned "+
|
||||
"unexpected error: %s", err)
|
||||
}
|
||||
|
||||
// Read from the location previously written to
|
||||
readData, err := store.read(location)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlatFileStoreSanity: read returned "+
|
||||
"unexpected error: %s", err)
|
||||
}
|
||||
|
||||
// Make sure that the written data and the read data are equal
|
||||
if !reflect.DeepEqual(readData, writeData) {
|
||||
t.Fatalf("TestFlatFileStoreSanity: read data and "+
|
||||
"write data are not equal. Wrote: %s, read: %s",
|
||||
string(writeData), string(readData))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlatFilePath(t *testing.T) {
|
||||
tests := []struct {
|
||||
dbPath string
|
||||
storeName string
|
||||
fileNumber uint32
|
||||
expectedPath string
|
||||
}{
|
||||
{
|
||||
dbPath: "path",
|
||||
storeName: "store",
|
||||
fileNumber: 0,
|
||||
expectedPath: "path/store-000000000.fdb",
|
||||
},
|
||||
{
|
||||
dbPath: "path/to/database",
|
||||
storeName: "blocks",
|
||||
fileNumber: 123456789,
|
||||
expectedPath: "path/to/database/blocks-123456789.fdb",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
path := flatFilePath(test.dbPath, test.storeName, test.fileNumber)
|
||||
if path != test.expectedPath {
|
||||
t.Errorf("TestFlatFilePath: unexpected path. Want: %s, got: %s",
|
||||
test.expectedPath, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
103
database/ffldb/ff/flatfiledb.go
Normal file
103
database/ffldb/ff/flatfiledb.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package ff
|
||||
|
||||
// FlatFileDB is a flat-file database. It supports opening
|
||||
// multiple flat-file stores. See flatFileStore for further
|
||||
// details.
|
||||
type FlatFileDB struct {
|
||||
path string
|
||||
flatFileStores map[string]*flatFileStore
|
||||
}
|
||||
|
||||
// NewFlatFileDB opens the flat-file database defined by
|
||||
// the given path.
|
||||
func NewFlatFileDB(path string) *FlatFileDB {
|
||||
return &FlatFileDB{
|
||||
path: path,
|
||||
flatFileStores: make(map[string]*flatFileStore),
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the flat-file database.
|
||||
func (ffdb *FlatFileDB) Close() error {
|
||||
for _, store := range ffdb.flatFileStores {
|
||||
err := store.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write appends the specified data bytes to the specified store.
|
||||
// It returns a serialized location handle that's meant to be
|
||||
// stored and later used when querying the data that has just now
|
||||
// been inserted.
|
||||
// See flatFileStore.write() for further details.
|
||||
func (ffdb *FlatFileDB) Write(storeName string, data []byte) ([]byte, error) {
|
||||
store, err := ffdb.store(storeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
location, err := store.write(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return serializeLocation(location), nil
|
||||
}
|
||||
|
||||
// Read reads data from the specified flat file store at the
|
||||
// location specified by the given serialized location handle.
|
||||
// It returns ErrNotFound if the location does not exist.
|
||||
// See flatFileStore.read() for further details.
|
||||
func (ffdb *FlatFileDB) Read(storeName string, serializedLocation []byte) ([]byte, error) {
|
||||
store, err := ffdb.store(storeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
location, err := deserializeLocation(serializedLocation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return store.read(location)
|
||||
}
|
||||
|
||||
// CurrentLocation returns the serialized location handle to
|
||||
// the current location within the flat file store defined
|
||||
// storeName. It is mainly to be used to rollback flat-file
|
||||
// stores in case of data incongruency.
|
||||
func (ffdb *FlatFileDB) CurrentLocation(storeName string) ([]byte, error) {
|
||||
store, err := ffdb.store(storeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentLocation := store.currentLocation()
|
||||
return serializeLocation(currentLocation), nil
|
||||
}
|
||||
|
||||
// Rollback truncates the flat-file store defined by the given
|
||||
// storeName to the location defined by the given serialized
|
||||
// location handle.
|
||||
func (ffdb *FlatFileDB) Rollback(storeName string, serializedLocation []byte) error {
|
||||
store, err := ffdb.store(storeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
location, err := deserializeLocation(serializedLocation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return store.rollback(location)
|
||||
}
|
||||
|
||||
func (ffdb *FlatFileDB) store(storeName string) (*flatFileStore, error) {
|
||||
store, ok := ffdb.flatFileStores[storeName]
|
||||
if !ok {
|
||||
var err error
|
||||
store, err = openFlatFileStore(ffdb.path, storeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ffdb.flatFileStores[storeName] = store
|
||||
}
|
||||
return store, nil
|
||||
}
|
||||
44
database/ffldb/ff/location.go
Normal file
44
database/ffldb/ff/location.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package ff
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// flatFileLocationSerializedSize is the size in bytes of a serialized flat
|
||||
// file location. See serializeLocation for further details.
|
||||
const flatFileLocationSerializedSize = 12
|
||||
|
||||
// flatFileLocation identifies a particular flat file location.
|
||||
type flatFileLocation struct {
|
||||
fileNumber uint32
|
||||
fileOffset uint32
|
||||
dataLength uint32
|
||||
}
|
||||
|
||||
// serializeLocation returns the serialization of the passed flat file location
|
||||
// of certain data. This to later on be used for retrieval of said data.
|
||||
// The serialized location format is:
|
||||
//
|
||||
// [0:4] File Number (4 bytes)
|
||||
// [4:8] File offset (4 bytes)
|
||||
// [8:12] Data length (4 bytes)
|
||||
func serializeLocation(location *flatFileLocation) []byte {
|
||||
var serializedLocation [flatFileLocationSerializedSize]byte
|
||||
byteOrder.PutUint32(serializedLocation[0:4], location.fileNumber)
|
||||
byteOrder.PutUint32(serializedLocation[4:8], location.fileOffset)
|
||||
byteOrder.PutUint32(serializedLocation[8:12], location.dataLength)
|
||||
return serializedLocation[:]
|
||||
}
|
||||
|
||||
// deserializeLocation deserializes the passed serialized flat file location.
|
||||
// See serializeLocation for further details.
|
||||
func deserializeLocation(serializedLocation []byte) (*flatFileLocation, error) {
|
||||
if len(serializedLocation) != flatFileLocationSerializedSize {
|
||||
return nil, errors.Errorf("unexpected serializedLocation length: %d",
|
||||
len(serializedLocation))
|
||||
}
|
||||
location := &flatFileLocation{
|
||||
fileNumber: byteOrder.Uint32(serializedLocation[0:4]),
|
||||
fileOffset: byteOrder.Uint32(serializedLocation[4:8]),
|
||||
dataLength: byteOrder.Uint32(serializedLocation[8:12]),
|
||||
}
|
||||
return location, nil
|
||||
}
|
||||
40
database/ffldb/ff/lockablefile.go
Normal file
40
database/ffldb/ff/lockablefile.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package ff
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// lockableFile represents a flat file on disk that has been opened for either
|
||||
// read or read/write access. It also contains a read-write mutex to support
|
||||
// multiple concurrent readers.
|
||||
type lockableFile struct {
|
||||
sync.RWMutex
|
||||
file
|
||||
|
||||
isClosed bool
|
||||
}
|
||||
|
||||
// file is an interface which acts very similar to a *os.File and is typically
|
||||
// implemented by it. It exists so the test code can provide mock files for
|
||||
// properly testing corruption and file system issues.
|
||||
type file interface {
|
||||
io.Closer
|
||||
io.WriterAt
|
||||
io.ReaderAt
|
||||
Truncate(size int64) error
|
||||
Sync() error
|
||||
}
|
||||
|
||||
func (lf *lockableFile) Close() error {
|
||||
if lf.isClosed {
|
||||
return errors.Errorf("cannot close an already closed file")
|
||||
}
|
||||
lf.isClosed = true
|
||||
|
||||
lf.Lock()
|
||||
defer lf.Unlock()
|
||||
|
||||
return errors.WithStack(lf.file.Close())
|
||||
}
|
||||
5
database/ffldb/ff/log.go
Normal file
5
database/ffldb/ff/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package ff
|
||||
|
||||
import "github.com/kaspanet/kaspad/logger"
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.KSDB)
|
||||
167
database/ffldb/ff/read.go
Normal file
167
database/ffldb/ff/read.go
Normal file
@@ -0,0 +1,167 @@
|
||||
package ff
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/pkg/errors"
|
||||
"hash/crc32"
|
||||
"os"
|
||||
)
|
||||
|
||||
// read reads the specified flat file record and returns the data. It ensures
|
||||
// the integrity of the data by comparing the calculated checksum against the
|
||||
// one stored in the flat file. This function also automatically handles all
|
||||
// file management such as opening and closing files as necessary to stay
|
||||
// within the maximum allowed open files limit. It returns ErrNotFound if the
|
||||
// location does not exist.
|
||||
//
|
||||
// Format: <data length><data><checksum>
|
||||
func (s *flatFileStore) read(location *flatFileLocation) ([]byte, error) {
|
||||
if s.isClosed {
|
||||
return nil, errors.Errorf("cannot read from a closed store %s",
|
||||
s.storeName)
|
||||
}
|
||||
|
||||
// Return not-found if the location is greater than or equal to
|
||||
// the current write cursor.
|
||||
if s.writeCursor.currentFileNumber < location.fileNumber ||
|
||||
(s.writeCursor.currentFileNumber == location.fileNumber && s.writeCursor.currentOffset <= location.fileOffset) {
|
||||
return nil, database.ErrNotFound
|
||||
}
|
||||
|
||||
// Get the referenced flat file.
|
||||
flatFile, err := s.flatFile(location.fileNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data := make([]byte, location.dataLength)
|
||||
n, err := flatFile.file.ReadAt(data, int64(location.fileOffset))
|
||||
flatFile.RUnlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to read data in store '%s' "+
|
||||
"from file %d, offset %d", s.storeName, location.fileNumber,
|
||||
location.fileOffset)
|
||||
}
|
||||
|
||||
// Calculate the checksum of the read data and ensure it matches the
|
||||
// serialized checksum.
|
||||
serializedChecksum := crc32ByteOrder.Uint32(data[n-crc32ChecksumLength:])
|
||||
calculatedChecksum := crc32.Checksum(data[:n-crc32ChecksumLength], castagnoli)
|
||||
if serializedChecksum != calculatedChecksum {
|
||||
return nil, errors.Errorf("data in store '%s' does not match "+
|
||||
"checksum - got %x, want %x", s.storeName, calculatedChecksum,
|
||||
serializedChecksum)
|
||||
}
|
||||
|
||||
// The data excludes the length of the data and the checksum.
|
||||
return data[dataLengthLength : n-crc32ChecksumLength], nil
|
||||
}
|
||||
|
||||
// flatFile attempts to return an existing file handle for the passed flat file
|
||||
// number if it is already open as well as marking it as most recently used. It
|
||||
// will also open the file when it's not already open subject to the rules
|
||||
// described in openFile. Also handles closing files as needed to avoid going
|
||||
// over the max allowed open files.
|
||||
//
|
||||
// NOTE: The returned flat file will already have the read lock acquired and
|
||||
// the caller MUST call .RUnlock() to release it once it has finished all read
|
||||
// operations. This is necessary because otherwise it would be possible for a
|
||||
// separate goroutine to close the file after it is returned from here, but
|
||||
// before the caller has acquired a read lock.
|
||||
func (s *flatFileStore) flatFile(fileNumber uint32) (*lockableFile, error) {
|
||||
// When the requested flat file is open for writes, return it.
|
||||
s.writeCursor.RLock()
|
||||
if fileNumber == s.writeCursor.currentFileNumber && s.writeCursor.currentFile.file != nil {
|
||||
openFile := s.writeCursor.currentFile
|
||||
openFile.RLock()
|
||||
s.writeCursor.RUnlock()
|
||||
return openFile, nil
|
||||
}
|
||||
s.writeCursor.RUnlock()
|
||||
|
||||
// Try to return an open file under the overall files read lock.
|
||||
s.openFilesMutex.RLock()
|
||||
if openFile, ok := s.openFiles[fileNumber]; ok {
|
||||
s.lruMutex.Lock()
|
||||
s.openFilesLRU.MoveToFront(s.fileNumberToLRUElement[fileNumber])
|
||||
s.lruMutex.Unlock()
|
||||
|
||||
openFile.RLock()
|
||||
s.openFilesMutex.RUnlock()
|
||||
return openFile, nil
|
||||
}
|
||||
s.openFilesMutex.RUnlock()
|
||||
|
||||
// Since the file isn't open already, need to check the open files map
|
||||
// again under write lock in case multiple readers got here and a
|
||||
// separate one is already opening the file.
|
||||
s.openFilesMutex.Lock()
|
||||
if openFlatFile, ok := s.openFiles[fileNumber]; ok {
|
||||
openFlatFile.RLock()
|
||||
s.openFilesMutex.Unlock()
|
||||
return openFlatFile, nil
|
||||
}
|
||||
|
||||
// The file isn't open, so open it while potentially closing the least
|
||||
// recently used one as needed.
|
||||
openFile, err := s.openFile(fileNumber)
|
||||
if err != nil {
|
||||
s.openFilesMutex.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
openFile.RLock()
|
||||
s.openFilesMutex.Unlock()
|
||||
return openFile, nil
|
||||
}
|
||||
|
||||
// openFile returns a read-only file handle for the passed flat file number.
|
||||
// The function also keeps track of the open files, performs least recently
|
||||
// used tracking, and limits the number of open files to maxOpenFiles by closing
|
||||
// the least recently used file as needed.
|
||||
//
|
||||
// This function MUST be called with the open files mutex (s.openFilesMutex)
|
||||
// locked for WRITES.
|
||||
func (s *flatFileStore) openFile(fileNumber uint32) (*lockableFile, error) {
|
||||
// Open the appropriate file as read-only.
|
||||
filePath := flatFilePath(s.basePath, s.storeName, fileNumber)
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
flatFile := &lockableFile{file: file}
|
||||
|
||||
// Close the least recently used file if the file exceeds the max
|
||||
// allowed open files. This is not done until after the file open in
|
||||
// case the file fails to open, there is no need to close any files.
|
||||
//
|
||||
// A write lock is required on the LRU list here to protect against
|
||||
// modifications happening as already open files are read from and
|
||||
// shuffled to the front of the list.
|
||||
//
|
||||
// Also, add the file that was just opened to the front of the least
|
||||
// recently used list to indicate it is the most recently used file and
|
||||
// therefore should be closed last.
|
||||
s.lruMutex.Lock()
|
||||
lruList := s.openFilesLRU
|
||||
if lruList.Len() >= maxOpenFiles {
|
||||
lruFileNumber := lruList.Remove(lruList.Back()).(uint32)
|
||||
oldFile := s.openFiles[lruFileNumber]
|
||||
|
||||
// Close the old file under the write lock for the file in case
|
||||
// any readers are currently reading from it so it's not closed
|
||||
// out from under them.
|
||||
oldFile.Lock()
|
||||
_ = oldFile.file.Close()
|
||||
oldFile.Unlock()
|
||||
|
||||
delete(s.openFiles, lruFileNumber)
|
||||
delete(s.fileNumberToLRUElement, lruFileNumber)
|
||||
}
|
||||
s.fileNumberToLRUElement[fileNumber] = lruList.PushFront(fileNumber)
|
||||
s.lruMutex.Unlock()
|
||||
|
||||
// Store a reference to it in the open files map.
|
||||
s.openFiles[fileNumber] = flatFile
|
||||
|
||||
return flatFile, nil
|
||||
}
|
||||
142
database/ffldb/ff/rollback.go
Normal file
142
database/ffldb/ff/rollback.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package ff
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
// rollback rolls the flat files on disk back to the provided file number
|
||||
// and offset. This involves potentially deleting and truncating the files that
|
||||
// were partially written.
|
||||
//
|
||||
// There are effectively two scenarios to consider here:
|
||||
// 1) Transient write failures from which recovery is possible
|
||||
// 2) More permanent failures such as hard disk death and/or removal
|
||||
//
|
||||
// In either case, the write cursor will be repositioned to the old flat file
|
||||
// offset regardless of any other errors that occur while attempting to undo
|
||||
// writes.
|
||||
//
|
||||
// For the first scenario, this will lead to any data which failed to be undone
|
||||
// being overwritten and thus behaves as desired as the system continues to run.
|
||||
//
|
||||
// For the second scenario, the metadata which stores the current write cursor
|
||||
// position within the flat files will not have been updated yet and thus if
|
||||
// the system eventually recovers (perhaps the hard drive is reconnected), it
|
||||
// will also lead to any data which failed to be undone being overwritten and
|
||||
// thus behaves as desired.
|
||||
func (s *flatFileStore) rollback(targetLocation *flatFileLocation) error {
|
||||
if s.isClosed {
|
||||
return errors.Errorf("cannot rollback a closed store %s",
|
||||
s.storeName)
|
||||
}
|
||||
|
||||
// Grab the write cursor mutex since it is modified throughout this
|
||||
// function.
|
||||
s.writeCursor.Lock()
|
||||
defer s.writeCursor.Unlock()
|
||||
|
||||
// Nothing to do if the rollback point is the same as the current write
|
||||
// cursor.
|
||||
targetFileNumber := targetLocation.fileNumber
|
||||
targetFileOffset := targetLocation.fileOffset
|
||||
if s.writeCursor.currentFileNumber == targetFileNumber && s.writeCursor.currentOffset == targetFileOffset {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the rollback point is greater than the current write cursor then
|
||||
// something has gone very wrong, e.g. database corruption.
|
||||
if s.writeCursor.currentFileNumber < targetFileNumber ||
|
||||
(s.writeCursor.currentFileNumber == targetFileNumber && s.writeCursor.currentOffset < targetFileOffset) {
|
||||
return errors.Errorf("targetLocation is greater than the " +
|
||||
"current write cursor")
|
||||
}
|
||||
|
||||
// Regardless of any failures that happen below, reposition the write
|
||||
// cursor to the target flat file and offset.
|
||||
defer func() {
|
||||
s.writeCursor.currentFileNumber = targetFileNumber
|
||||
s.writeCursor.currentOffset = targetFileOffset
|
||||
}()
|
||||
|
||||
log.Warnf("ROLLBACK: Rolling back to file %d, offset %d",
|
||||
targetFileNumber, targetFileOffset)
|
||||
|
||||
// Close the current write file if it needs to be deleted.
|
||||
if s.writeCursor.currentFileNumber > targetFileNumber {
|
||||
s.writeCursor.currentFile.Lock()
|
||||
if s.writeCursor.currentFile.file != nil {
|
||||
s.writeCursor.currentFile.file.Close()
|
||||
s.writeCursor.currentFile.file = nil
|
||||
}
|
||||
s.writeCursor.currentFile.Unlock()
|
||||
}
|
||||
|
||||
// Delete all files that are newer than the provided rollback file
|
||||
// while also moving the write cursor file backwards accordingly.
|
||||
s.lruMutex.Lock()
|
||||
defer s.lruMutex.Unlock()
|
||||
s.openFilesMutex.Lock()
|
||||
defer s.openFilesMutex.Unlock()
|
||||
for s.writeCursor.currentFileNumber > targetFileNumber {
|
||||
err := s.deleteFile(s.writeCursor.currentFileNumber)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "ROLLBACK: Failed to delete file "+
|
||||
"number %d in store '%s'", s.writeCursor.currentFileNumber,
|
||||
s.storeName)
|
||||
}
|
||||
s.writeCursor.currentFileNumber--
|
||||
}
|
||||
|
||||
// Open the file for the current write cursor if needed.
|
||||
s.writeCursor.currentFile.Lock()
|
||||
if s.writeCursor.currentFile.file == nil {
|
||||
openFile, err := s.openWriteFile(s.writeCursor.currentFileNumber)
|
||||
if err != nil {
|
||||
s.writeCursor.currentFile.Unlock()
|
||||
return err
|
||||
}
|
||||
s.writeCursor.currentFile.file = openFile
|
||||
}
|
||||
|
||||
// Truncate the file to the provided target offset.
|
||||
err := s.writeCursor.currentFile.file.Truncate(int64(targetFileOffset))
|
||||
if err != nil {
|
||||
s.writeCursor.currentFile.Unlock()
|
||||
return errors.Wrapf(err, "ROLLBACK: Failed to truncate file %d "+
|
||||
"in store '%s'", s.writeCursor.currentFileNumber, s.storeName)
|
||||
}
|
||||
|
||||
// Sync the file to disk.
|
||||
err = s.writeCursor.currentFile.file.Sync()
|
||||
s.writeCursor.currentFile.Unlock()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "ROLLBACK: Failed to sync file %d in "+
|
||||
"store '%s'", s.writeCursor.currentFileNumber, s.storeName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteFile removes the file for the passed flat file number.
|
||||
// This function MUST be called with the lruMutex and the openFilesMutex
|
||||
// held for writes.
|
||||
func (s *flatFileStore) deleteFile(fileNumber uint32) error {
|
||||
// Cleanup the file before deleting it
|
||||
if file, ok := s.openFiles[fileNumber]; ok {
|
||||
file.Lock()
|
||||
defer file.Unlock()
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lruElement := s.fileNumberToLRUElement[fileNumber]
|
||||
s.openFilesLRU.Remove(lruElement)
|
||||
delete(s.openFiles, fileNumber)
|
||||
delete(s.fileNumberToLRUElement, fileNumber)
|
||||
}
|
||||
|
||||
// Delete the file from disk
|
||||
filePath := flatFilePath(s.basePath, s.storeName, fileNumber)
|
||||
return errors.WithStack(os.Remove(filePath))
|
||||
}
|
||||
165
database/ffldb/ff/write.go
Normal file
165
database/ffldb/ff/write.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package ff
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"hash/crc32"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// write appends the specified data bytes to the store's write cursor location
|
||||
// and increments it accordingly. When the data would exceed the max file size
|
||||
// for the current flat file, this function will close the current file, create
|
||||
// the next file, update the write cursor, and write the data to the new file.
|
||||
//
|
||||
// The write cursor will also be advanced the number of bytes actually written
|
||||
// in the event of failure.
|
||||
//
|
||||
// Format: <data length><data><checksum>
|
||||
func (s *flatFileStore) write(data []byte) (*flatFileLocation, error) {
|
||||
if s.isClosed {
|
||||
return nil, errors.Errorf("cannot write to a closed store %s",
|
||||
s.storeName)
|
||||
}
|
||||
|
||||
// Compute how many bytes will be written.
|
||||
// 4 bytes for data length + length of the data + 4 bytes for checksum.
|
||||
dataLength := uint32(len(data))
|
||||
fullLength := uint32(dataLengthLength) + dataLength + uint32(crc32ChecksumLength)
|
||||
|
||||
// Move to the next file if adding the new data would exceed the max
|
||||
// allowed size for the current flat file. Also detect overflow because
|
||||
// even though it isn't possible currently, numbers might change in
|
||||
// the future to make it possible.
|
||||
//
|
||||
// NOTE: The writeCursor.currentOffset field isn't protected by the
|
||||
// mutex since it's only read/changed during this function which can
|
||||
// only be called during a write transaction, of which there can be
|
||||
// only one at a time.
|
||||
cursor := s.writeCursor
|
||||
finalOffset := cursor.currentOffset + fullLength
|
||||
if finalOffset < cursor.currentOffset || finalOffset > maxFileSize {
|
||||
// This is done under the write cursor lock since the curFileNum
|
||||
// field is accessed elsewhere by readers.
|
||||
//
|
||||
// Close the current write file to force a read-only reopen
|
||||
// with LRU tracking. The close is done under the write lock
|
||||
// for the file to prevent it from being closed out from under
|
||||
// any readers currently reading from it.
|
||||
cursor.Lock()
|
||||
cursor.currentFile.Lock()
|
||||
if cursor.currentFile.file != nil {
|
||||
_ = cursor.currentFile.file.Close()
|
||||
cursor.currentFile.file = nil
|
||||
}
|
||||
cursor.currentFile.Unlock()
|
||||
|
||||
// Start writes into next file.
|
||||
cursor.currentFileNumber++
|
||||
cursor.currentOffset = 0
|
||||
cursor.Unlock()
|
||||
}
|
||||
|
||||
// All writes are done under the write lock for the file to ensure any
|
||||
// readers are finished and blocked first.
|
||||
cursor.currentFile.Lock()
|
||||
defer cursor.currentFile.Unlock()
|
||||
|
||||
// Open the current file if needed. This will typically only be the
|
||||
// case when moving to the next file to write to or on initial database
|
||||
// load. However, it might also be the case if rollbacks happened after
|
||||
// file writes started during a transaction commit.
|
||||
if cursor.currentFile.file == nil {
|
||||
file, err := s.openWriteFile(cursor.currentFileNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cursor.currentFile.file = file
|
||||
}
|
||||
|
||||
originalOffset := cursor.currentOffset
|
||||
hasher := crc32.New(castagnoli)
|
||||
var scratch [4]byte
|
||||
|
||||
// Data length.
|
||||
byteOrder.PutUint32(scratch[:], dataLength)
|
||||
err := s.writeData(scratch[:], "data length")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, _ = hasher.Write(scratch[:])
|
||||
|
||||
// Data.
|
||||
err = s.writeData(data[:], "data")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, _ = hasher.Write(data)
|
||||
|
||||
// Castagnoli CRC-32 as a checksum of all the previous.
|
||||
err = s.writeData(hasher.Sum(nil), "checksum")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Sync the file to disk.
|
||||
err = cursor.currentFile.file.Sync()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to sync file %d "+
|
||||
"in store '%s'", cursor.currentFileNumber, s.storeName)
|
||||
}
|
||||
|
||||
location := &flatFileLocation{
|
||||
fileNumber: cursor.currentFileNumber,
|
||||
fileOffset: originalOffset,
|
||||
dataLength: fullLength,
|
||||
}
|
||||
return location, nil
|
||||
}
|
||||
|
||||
// openWriteFile returns a file handle for the passed flat file number in
|
||||
// read/write mode. The file will be created if needed. It is typically used
|
||||
// for the current file that will have all new data appended. Unlike openFile,
|
||||
// this function does not keep track of the open file and it is not subject to
|
||||
// the maxOpenFiles limit.
|
||||
func (s *flatFileStore) openWriteFile(fileNumber uint32) (file, error) {
|
||||
// The current flat file needs to be read-write so it is possible to
|
||||
// append to it. Also, it shouldn't be part of the least recently used
|
||||
// file.
|
||||
filePath := flatFilePath(s.basePath, s.storeName, fileNumber)
|
||||
file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to open file %q",
|
||||
filePath)
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// writeData is a helper function for write which writes the provided data at
|
||||
// the current write offset and updates the write cursor accordingly. The field
|
||||
// name parameter is only used when there is an error to provide a nicer error
|
||||
// message.
|
||||
//
|
||||
// The write cursor will be advanced the number of bytes actually written in the
|
||||
// event of failure.
|
||||
//
|
||||
// NOTE: This function MUST be called with the write cursor current file lock
|
||||
// held and must only be called during a write transaction so it is effectively
|
||||
// locked for writes. Also, the write cursor current file must NOT be nil.
|
||||
func (s *flatFileStore) writeData(data []byte, fieldName string) error {
|
||||
cursor := s.writeCursor
|
||||
n, err := cursor.currentFile.file.WriteAt(data, int64(cursor.currentOffset))
|
||||
cursor.currentOffset += uint32(n)
|
||||
if err != nil {
|
||||
var pathErr *os.PathError
|
||||
if ok := errors.As(err, &pathErr); ok && pathErr.Err == syscall.ENOSPC {
|
||||
panic("No space left on the hard disk, exiting...")
|
||||
}
|
||||
return errors.Wrapf(err, "failed to write %s in store %s to file %d "+
|
||||
"at offset %d", fieldName, s.storeName, cursor.currentFileNumber,
|
||||
cursor.currentOffset-uint32(n))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
177
database/ffldb/ffldb.go
Normal file
177
database/ffldb/ffldb.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/database/ffldb/ff"
|
||||
"github.com/kaspanet/kaspad/database/ffldb/ldb"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// flatFilesBucket keeps an index flat-file stores and their
|
||||
// current locations. Among other things, it is used to repair
|
||||
// the database in case a corruption occurs.
|
||||
flatFilesBucket = database.MakeBucket([]byte("flat-files"))
|
||||
)
|
||||
|
||||
// ffldb is a database utilizing LevelDB for key-value data and
|
||||
// flat-files for raw data storage.
|
||||
type ffldb struct {
|
||||
flatFileDB *ff.FlatFileDB
|
||||
levelDB *ldb.LevelDB
|
||||
}
|
||||
|
||||
// Open opens a new ffldb with the given path.
|
||||
func Open(path string) (database.Database, error) {
|
||||
flatFileDB := ff.NewFlatFileDB(path)
|
||||
levelDB, err := ldb.NewLevelDB(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db := &ffldb{
|
||||
flatFileDB: flatFileDB,
|
||||
levelDB: levelDB,
|
||||
}
|
||||
|
||||
err = db.initialize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// Close closes the database.
|
||||
// This method is part of the Database interface.
|
||||
func (db *ffldb) Close() error {
|
||||
err := db.flatFileDB.Close()
|
||||
if err != nil {
|
||||
ldbCloseErr := db.levelDB.Close()
|
||||
if ldbCloseErr != nil {
|
||||
return errors.Wrapf(err, "err occurred during leveldb close: %s", ldbCloseErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return db.levelDB.Close()
|
||||
}
|
||||
|
||||
// Put sets the value for the given key. It overwrites
|
||||
// any previous value for that key.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (db *ffldb) Put(key []byte, value []byte) error {
|
||||
return db.levelDB.Put(key, value)
|
||||
}
|
||||
|
||||
// Get gets the value for the given key. It returns
|
||||
// ErrNotFound if the given key does not exist.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (db *ffldb) Get(key []byte) ([]byte, error) {
|
||||
return db.levelDB.Get(key)
|
||||
}
|
||||
|
||||
// Has returns true if the database does contains the
|
||||
// given key.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (db *ffldb) Has(key []byte) (bool, error) {
|
||||
return db.levelDB.Has(key)
|
||||
}
|
||||
|
||||
// Delete deletes the value for the given key. Will not
|
||||
// return an error if the key doesn't exist.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (db *ffldb) Delete(key []byte) error {
|
||||
return db.levelDB.Delete(key)
|
||||
}
|
||||
|
||||
// AppendToStore appends the given data to the flat
|
||||
// file store defined by storeName. This function
|
||||
// returns a serialized location handle that's meant
|
||||
// to be stored and later used when querying the data
|
||||
// that has just now been inserted.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (db *ffldb) AppendToStore(storeName string, data []byte) ([]byte, error) {
|
||||
return appendToStore(db, db.flatFileDB, storeName, data)
|
||||
}
|
||||
|
||||
func appendToStore(accessor database.DataAccessor, ffdb *ff.FlatFileDB, storeName string, data []byte) ([]byte, error) {
|
||||
// Save a reference to the current location in case
|
||||
// we fail and need to rollback.
|
||||
previousLocation, err := ffdb.CurrentLocation(storeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rollback := func() error {
|
||||
return ffdb.Rollback(storeName, previousLocation)
|
||||
}
|
||||
|
||||
// Append the data to the store and rollback in case of an error.
|
||||
location, err := ffdb.Write(storeName, data)
|
||||
if err != nil {
|
||||
rollbackErr := rollback()
|
||||
if rollbackErr != nil {
|
||||
return nil, errors.Wrapf(err, "error occurred during rollback: %s", rollbackErr)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the new location. If this fails we won't be able to update
|
||||
// the current store location, in which case we roll back.
|
||||
currentLocation, err := ffdb.CurrentLocation(storeName)
|
||||
if err != nil {
|
||||
rollbackErr := rollback()
|
||||
if rollbackErr != nil {
|
||||
return nil, errors.Wrapf(err, "error occurred during rollback: %s", rollbackErr)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the current store location and roll back in case an error.
|
||||
err = setCurrentStoreLocation(accessor, storeName, currentLocation)
|
||||
if err != nil {
|
||||
rollbackErr := rollback()
|
||||
if rollbackErr != nil {
|
||||
return nil, errors.Wrapf(err, "error occurred during rollback: %s", rollbackErr)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return location, err
|
||||
}
|
||||
|
||||
func setCurrentStoreLocation(accessor database.DataAccessor, storeName string, location []byte) error {
|
||||
locationKey := flatFilesBucket.Key([]byte(storeName))
|
||||
return accessor.Put(locationKey, location)
|
||||
}
|
||||
|
||||
// RetrieveFromStore retrieves data from the store defined by
|
||||
// storeName using the given serialized location handle. It
|
||||
// returns ErrNotFound if the location does not exist. See
|
||||
// AppendToStore for further details.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (db *ffldb) RetrieveFromStore(storeName string, location []byte) ([]byte, error) {
|
||||
return db.flatFileDB.Read(storeName, location)
|
||||
}
|
||||
|
||||
// Cursor begins a new cursor over the given bucket.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (db *ffldb) Cursor(bucket []byte) (database.Cursor, error) {
|
||||
ldbCursor := db.levelDB.Cursor(bucket)
|
||||
|
||||
return ldbCursor, nil
|
||||
}
|
||||
|
||||
// Begin begins a new ffldb transaction.
|
||||
// This method is part of the Database interface.
|
||||
func (db *ffldb) Begin() (database.Transaction, error) {
|
||||
ldbTx, err := db.levelDB.Begin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
transaction := &transaction{
|
||||
ldbTx: ldbTx,
|
||||
ffdb: db.flatFileDB,
|
||||
}
|
||||
return transaction, nil
|
||||
}
|
||||
109
database/ffldb/ffldb_test.go
Normal file
109
database/ffldb/ffldb_test.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRepairFlatFiles(t *testing.T) {
|
||||
// Create a temp db to run tests against
|
||||
path, err := ioutil.TempDir("", "TestRepairFlatFiles")
|
||||
if err != nil {
|
||||
t.Fatalf("TestRepairFlatFiles: TempDir unexpectedly "+
|
||||
"failed: %s", err)
|
||||
}
|
||||
db, err := Open(path)
|
||||
if err != nil {
|
||||
t.Fatalf("TestRepairFlatFiles: Open unexpectedly "+
|
||||
"failed: %s", err)
|
||||
}
|
||||
isOpen := true
|
||||
defer func() {
|
||||
if isOpen {
|
||||
err := db.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("TestRepairFlatFiles: Close unexpectedly "+
|
||||
"failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Cast to ffldb since we're going to be messing with its internals
|
||||
ffldbInstance, ok := db.(*ffldb)
|
||||
if !ok {
|
||||
t.Fatalf("TestRepairFlatFiles: unexpectedly can't cast " +
|
||||
"db to ffldb")
|
||||
}
|
||||
|
||||
// Append data to the same store
|
||||
storeName := "test"
|
||||
_, err = ffldbInstance.AppendToStore(storeName, []byte("data1"))
|
||||
if err != nil {
|
||||
t.Fatalf("TestRepairFlatFiles: AppendToStore unexpectedly "+
|
||||
"failed: %s", err)
|
||||
}
|
||||
|
||||
// Grab the current location to test against later
|
||||
oldCurrentLocation, err := ffldbInstance.flatFileDB.CurrentLocation(storeName)
|
||||
if err != nil {
|
||||
t.Fatalf("TestRepairFlatFiles: CurrentStoreLocation "+
|
||||
"unexpectedly failed: %s", err)
|
||||
}
|
||||
|
||||
// Append more data to the same store. We expect this to disappear later.
|
||||
location2, err := ffldbInstance.AppendToStore(storeName, []byte("data2"))
|
||||
if err != nil {
|
||||
t.Fatalf("TestRepairFlatFiles: AppendToStore unexpectedly "+
|
||||
"failed: %s", err)
|
||||
}
|
||||
|
||||
// Manually update the current location to point to the first piece of data
|
||||
err = setCurrentStoreLocation(ffldbInstance, storeName, oldCurrentLocation)
|
||||
if err != nil {
|
||||
t.Fatalf("TestRepairFlatFiles: setCurrentStoreLocation "+
|
||||
"unexpectedly failed: %s", err)
|
||||
}
|
||||
|
||||
// Reopen the database
|
||||
err = ffldbInstance.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("TestRepairFlatFiles: Close unexpectedly "+
|
||||
"failed: %s", err)
|
||||
}
|
||||
isOpen = false
|
||||
db, err = Open(path)
|
||||
if err != nil {
|
||||
t.Fatalf("TestRepairFlatFiles: Open unexpectedly "+
|
||||
"failed: %s", err)
|
||||
}
|
||||
isOpen = true
|
||||
ffldbInstance, ok = db.(*ffldb)
|
||||
if !ok {
|
||||
t.Fatalf("TestRepairFlatFiles: unexpectedly can't cast " +
|
||||
"db to ffldb")
|
||||
}
|
||||
|
||||
// Make sure that the current location rolled back as expected
|
||||
currentLocation, err := ffldbInstance.flatFileDB.CurrentLocation(storeName)
|
||||
if err != nil {
|
||||
t.Fatalf("TestRepairFlatFiles: CurrentStoreLocation "+
|
||||
"unexpectedly failed: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(oldCurrentLocation, currentLocation) {
|
||||
t.Fatalf("TestRepairFlatFiles: currentLocation did " +
|
||||
"not roll back")
|
||||
}
|
||||
|
||||
// Make sure that we can't get data that no longer exists
|
||||
_, err = ffldbInstance.RetrieveFromStore(storeName, location2)
|
||||
if err == nil {
|
||||
t.Fatalf("TestRepairFlatFiles: RetrieveFromStore " +
|
||||
"unexpectedly succeeded")
|
||||
}
|
||||
if !database.IsNotFoundError(err) {
|
||||
t.Fatalf("TestRepairFlatFiles: RetrieveFromStore "+
|
||||
"returned wrong error: %s", err)
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
)
|
||||
|
||||
func registerDriver() {
|
||||
driver := database.Driver{
|
||||
DbType: dbType,
|
||||
Create: createDBDriver,
|
||||
Open: openDBDriver,
|
||||
}
|
||||
if err := database.RegisterDriver(driver); err != nil {
|
||||
panic(fmt.Sprintf("Failed to regiser database driver '%s': %s",
|
||||
dbType, err))
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerDriver()
|
||||
}
|
||||
56
database/ffldb/initialize.go
Normal file
56
database/ffldb/initialize.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package ffldb
|
||||
|
||||
// initialize initializes the database. If this function fails then the
|
||||
// database is irrecoverably corrupted.
|
||||
func (db *ffldb) initialize() error {
|
||||
flatFiles, err := db.flatFiles()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for storeName, currentLocation := range flatFiles {
|
||||
err := db.tryRepair(storeName, currentLocation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *ffldb) flatFiles() (map[string][]byte, error) {
|
||||
flatFilesBucketPath := flatFilesBucket.Path()
|
||||
flatFilesCursor := db.levelDB.Cursor(flatFilesBucketPath)
|
||||
defer func() {
|
||||
err := flatFilesCursor.Close()
|
||||
if err != nil {
|
||||
log.Warnf("cursor failed to close")
|
||||
}
|
||||
}()
|
||||
|
||||
flatFiles := make(map[string][]byte)
|
||||
for flatFilesCursor.Next() {
|
||||
storeNameKey, err := flatFilesCursor.Key()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storeName := string(storeNameKey)
|
||||
|
||||
currentLocation, err := flatFilesCursor.Value()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
flatFiles[storeName] = currentLocation
|
||||
}
|
||||
return flatFiles, nil
|
||||
}
|
||||
|
||||
// tryRepair attempts to sync the store with the current location value.
|
||||
// Possible scenarios:
|
||||
// a. currentLocation and the store are synced. Rollback does nothing.
|
||||
// b. currentLocation is smaller than the store's location. Rollback truncates
|
||||
// the store.
|
||||
// c. currentLocation is greater than the store's location. Rollback returns an
|
||||
// error. This indicates definite database corruption and is irrecoverable.
|
||||
func (db *ffldb) tryRepair(storeName string, currentLocation []byte) error {
|
||||
return db.flatFileDB.Rollback(storeName, currentLocation)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
116
database/ffldb/ldb/cursor.go
Normal file
116
database/ffldb/ldb/cursor.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package ldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// LevelDBCursor is a thin wrapper around native leveldb iterators.
|
||||
type LevelDBCursor struct {
|
||||
ldbIterator iterator.Iterator
|
||||
prefix []byte
|
||||
|
||||
isClosed bool
|
||||
}
|
||||
|
||||
// Cursor begins a new cursor over the given prefix.
|
||||
func (db *LevelDB) Cursor(prefix []byte) *LevelDBCursor {
|
||||
ldbIterator := db.ldb.NewIterator(util.BytesPrefix(prefix), nil)
|
||||
return &LevelDBCursor{
|
||||
ldbIterator: ldbIterator,
|
||||
prefix: prefix,
|
||||
isClosed: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Next moves the iterator to the next key/value pair. It returns whether the
|
||||
// iterator is exhausted. Returns false if the cursor is closed.
|
||||
func (c *LevelDBCursor) Next() bool {
|
||||
if c.isClosed {
|
||||
return false
|
||||
}
|
||||
return c.ldbIterator.Next()
|
||||
}
|
||||
|
||||
// First moves the iterator to the first key/value pair. It returns false if
|
||||
// such a pair does not exist or if the cursor is closed.
|
||||
func (c *LevelDBCursor) First() bool {
|
||||
if c.isClosed {
|
||||
return false
|
||||
}
|
||||
return c.ldbIterator.First()
|
||||
}
|
||||
|
||||
// Seek moves the iterator to the first key/value pair whose key is greater
|
||||
// than or equal to the given key. It returns ErrNotFound if such pair does not
|
||||
// exist.
|
||||
func (c *LevelDBCursor) Seek(key []byte) error {
|
||||
if c.isClosed {
|
||||
return errors.New("cannot seek a closed cursor")
|
||||
}
|
||||
|
||||
notFoundErr := errors.Wrapf(database.ErrNotFound, "key %s not "+
|
||||
"found", hex.EncodeToString(key))
|
||||
found := c.ldbIterator.Seek(key)
|
||||
if !found {
|
||||
return notFoundErr
|
||||
}
|
||||
|
||||
// Use c.ldbIterator.Key because c.Key removes the prefix from the key
|
||||
currentKey := c.ldbIterator.Key()
|
||||
if currentKey == nil {
|
||||
return notFoundErr
|
||||
}
|
||||
if !bytes.Equal(currentKey, key) {
|
||||
return notFoundErr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the key of the current key/value pair, or ErrNotFound if done.
|
||||
// Note that the key is trimmed to not include the prefix the cursor was opened
|
||||
// with. The caller should not modify the contents of the returned slice, and
|
||||
// its contents may change on the next call to Next.
|
||||
func (c *LevelDBCursor) Key() ([]byte, error) {
|
||||
if c.isClosed {
|
||||
return nil, errors.New("cannot get the key of a closed cursor")
|
||||
}
|
||||
fullKeyPath := c.ldbIterator.Key()
|
||||
if fullKeyPath == nil {
|
||||
return nil, errors.Wrapf(database.ErrNotFound, "cannot get the "+
|
||||
"key of a done cursor")
|
||||
}
|
||||
key := bytes.TrimPrefix(fullKeyPath, c.prefix)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// Value returns the value of the current key/value pair, or ErrNotFound if done.
|
||||
// The caller should not modify the contents of the returned slice, and its
|
||||
// contents may change on the next call to Next.
|
||||
func (c *LevelDBCursor) Value() ([]byte, error) {
|
||||
if c.isClosed {
|
||||
return nil, errors.New("cannot get the value of a closed cursor")
|
||||
}
|
||||
value := c.ldbIterator.Value()
|
||||
if value == nil {
|
||||
return nil, errors.Wrapf(database.ErrNotFound, "cannot get the "+
|
||||
"value of a done cursor")
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Close releases associated resources.
|
||||
func (c *LevelDBCursor) Close() error {
|
||||
if c.isClosed {
|
||||
return errors.New("cannot close an already closed cursor")
|
||||
}
|
||||
c.isClosed = true
|
||||
|
||||
c.ldbIterator.Release()
|
||||
return nil
|
||||
}
|
||||
89
database/ffldb/ldb/leveldb.go
Normal file
89
database/ffldb/ldb/leveldb.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package ldb
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
ldbErrors "github.com/syndtr/goleveldb/leveldb/errors"
|
||||
)
|
||||
|
||||
// LevelDB defines a thin wrapper around leveldb.
|
||||
type LevelDB struct {
|
||||
ldb *leveldb.DB
|
||||
}
|
||||
|
||||
// NewLevelDB opens a leveldb instance defined by the given path.
|
||||
func NewLevelDB(path string) (*LevelDB, error) {
|
||||
// Open leveldb. If it doesn't exist, create it.
|
||||
ldb, err := leveldb.OpenFile(path, nil)
|
||||
|
||||
// If the database is corrupted, attempt to recover.
|
||||
if _, corrupted := err.(*ldbErrors.ErrCorrupted); corrupted {
|
||||
log.Warnf("LevelDB corruption detected for path %s: %s",
|
||||
path, err)
|
||||
var recoverErr error
|
||||
ldb, recoverErr = leveldb.RecoverFile(path, nil)
|
||||
if recoverErr != nil {
|
||||
return nil, errors.Wrapf(err, "failed recovering from "+
|
||||
"database corruption: %s", recoverErr)
|
||||
}
|
||||
log.Warnf("LevelDB recovered from corruption for path %s",
|
||||
path)
|
||||
}
|
||||
|
||||
// If the database cannot be opened for any other
|
||||
// reason, return the error as-is.
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
db := &LevelDB{
|
||||
ldb: ldb,
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// Close closes the leveldb instance.
|
||||
func (db *LevelDB) Close() error {
|
||||
err := db.ldb.Close()
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Put sets the value for the given key. It overwrites
|
||||
// any previous value for that key.
|
||||
func (db *LevelDB) Put(key []byte, value []byte) error {
|
||||
err := db.ldb.Put(key, value, nil)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Get gets the value for the given key. It returns
|
||||
// ErrNotFound if the given key does not exist.
|
||||
func (db *LevelDB) Get(key []byte) ([]byte, error) {
|
||||
data, err := db.ldb.Get(key, nil)
|
||||
if err != nil {
|
||||
if errors.Is(err, leveldb.ErrNotFound) {
|
||||
return nil, errors.Wrapf(database.ErrNotFound,
|
||||
"key %s not found", hex.EncodeToString(key))
|
||||
}
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Has returns true if the database does contains the
|
||||
// given key.
|
||||
func (db *LevelDB) Has(key []byte) (bool, error) {
|
||||
exists, err := db.ldb.Has(key, nil)
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// Delete deletes the value for the given key. Will not
|
||||
// return an error if the key doesn't exist.
|
||||
func (db *LevelDB) Delete(key []byte) error {
|
||||
err := db.ldb.Delete(key, nil)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
162
database/ffldb/ldb/leveldb_test.go
Normal file
162
database/ffldb/ldb/leveldb_test.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package ldb
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLevelDBSanity(t *testing.T) {
|
||||
// Open a test db
|
||||
path, err := ioutil.TempDir("", "TestLevelDBSanity")
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBSanity: TempDir unexpectedly "+
|
||||
"failed: %s", err)
|
||||
}
|
||||
ldb, err := NewLevelDB(path)
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBSanity: NewLevelDB "+
|
||||
"unexpectedly failed: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
err := ldb.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBSanity: Close "+
|
||||
"unexpectedly failed: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Put something into the db
|
||||
key := []byte("key")
|
||||
putData := []byte("Hello world!")
|
||||
err = ldb.Put(key, putData)
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBSanity: Put returned "+
|
||||
"unexpected error: %s", err)
|
||||
}
|
||||
|
||||
// Get from the key previously put to
|
||||
getData, err := ldb.Get(key)
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBSanity: Get returned "+
|
||||
"unexpected error: %s", err)
|
||||
}
|
||||
|
||||
// Make sure that the put data and the get data are equal
|
||||
if !reflect.DeepEqual(getData, putData) {
|
||||
t.Fatalf("TestLevelDBSanity: get data and "+
|
||||
"put data are not equal. Put: %s, got: %s",
|
||||
string(putData), string(getData))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevelDBTransactionSanity(t *testing.T) {
|
||||
// Open a test db
|
||||
path, err := ioutil.TempDir("", "TestLevelDBTransactionSanity")
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: TempDir unexpectedly "+
|
||||
"failed: %s", err)
|
||||
}
|
||||
ldb, err := NewLevelDB(path)
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: NewLevelDB "+
|
||||
"unexpectedly failed: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
err := ldb.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: Close "+
|
||||
"unexpectedly failed: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Case 1. Write in tx and then read directly from the DB
|
||||
// Begin a new transaction
|
||||
tx, err := ldb.Begin()
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: Begin "+
|
||||
"unexpectedly failed: %s", err)
|
||||
}
|
||||
|
||||
// Put something into the transaction
|
||||
key := []byte("key")
|
||||
putData := []byte("Hello world!")
|
||||
err = tx.Put(key, putData)
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: Put "+
|
||||
"returned unexpected error: %s", err)
|
||||
}
|
||||
|
||||
// Get from the key previously put to. Since the tx is not
|
||||
// yet committed, this should return ErrNotFound.
|
||||
getData, err := ldb.Get(key)
|
||||
if err == nil {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: Get " +
|
||||
"unexpectedly succeeded")
|
||||
}
|
||||
if !database.IsNotFoundError(err) {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: Get "+
|
||||
"returned wrong error: %s", err)
|
||||
}
|
||||
|
||||
// Commit the transaction
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: Commit "+
|
||||
"returned unexpected error: %s", err)
|
||||
}
|
||||
|
||||
// Get from the key previously put to. Now that the tx was
|
||||
// committed, this should succeed.
|
||||
getData, err = ldb.Get(key)
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: Get "+
|
||||
"returned unexpected error: %s", err)
|
||||
}
|
||||
|
||||
// Make sure that the put data and the get data are equal
|
||||
if !reflect.DeepEqual(getData, putData) {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: get "+
|
||||
"data and put data are not equal. Put: %s, got: %s",
|
||||
string(putData), string(getData))
|
||||
}
|
||||
|
||||
// Case 2. Write directly to the DB and then read from a tx
|
||||
// Put something into the db
|
||||
key = []byte("key2")
|
||||
putData = []byte("Goodbye world!")
|
||||
err = ldb.Put(key, putData)
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: Put "+
|
||||
"returned unexpected error: %s", err)
|
||||
}
|
||||
|
||||
// Begin a new transaction
|
||||
tx, err = ldb.Begin()
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: Begin "+
|
||||
"unexpectedly failed: %s", err)
|
||||
}
|
||||
|
||||
// Get from the key previously put to
|
||||
getData, err = tx.Get(key)
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: Get "+
|
||||
"returned unexpected error: %s", err)
|
||||
}
|
||||
|
||||
// Make sure that the put data and the get data are equal
|
||||
if !reflect.DeepEqual(getData, putData) {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: get "+
|
||||
"data and put data are not equal. Put: %s, got: %s",
|
||||
string(putData), string(getData))
|
||||
}
|
||||
|
||||
// Rollback the transaction
|
||||
err = tx.Rollback()
|
||||
if err != nil {
|
||||
t.Fatalf("TestLevelDBTransactionSanity: rollback "+
|
||||
"returned unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
5
database/ffldb/ldb/log.go
Normal file
5
database/ffldb/ldb/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package ldb
|
||||
|
||||
import "github.com/kaspanet/kaspad/logger"
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.KSDB)
|
||||
140
database/ffldb/ldb/transaction.go
Normal file
140
database/ffldb/ldb/transaction.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package ldb
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
// LevelDBTransaction is a thin wrapper around native leveldb
|
||||
// batches and snapshots. It supports both get and put.
|
||||
//
|
||||
// Snapshots provide a frozen view of the database at the moment
|
||||
// the transaction begins. On the other hand, batches provide a
|
||||
// mechanism to combine several database writes into one write,
|
||||
// which seamlessly rolls back the database in case any individual
|
||||
// write fails. Together the two forms a logic unit similar
|
||||
// to what one might expect from a classic database transaction.
|
||||
//
|
||||
// Note: Transactions provide data consistency over the state of
|
||||
// the database as it was when the transaction started. As it's
|
||||
// currently implemented, if one puts data into the transaction
|
||||
// then it will not be available to get within the same transaction.
|
||||
type LevelDBTransaction struct {
|
||||
db *LevelDB
|
||||
snapshot *leveldb.Snapshot
|
||||
batch *leveldb.Batch
|
||||
isClosed bool
|
||||
}
|
||||
|
||||
// Begin begins a new transaction.
|
||||
func (db *LevelDB) Begin() (*LevelDBTransaction, error) {
|
||||
snapshot, err := db.ldb.GetSnapshot()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
batch := new(leveldb.Batch)
|
||||
|
||||
transaction := &LevelDBTransaction{
|
||||
db: db,
|
||||
snapshot: snapshot,
|
||||
batch: batch,
|
||||
isClosed: false,
|
||||
}
|
||||
return transaction, nil
|
||||
}
|
||||
|
||||
// Commit commits whatever changes were made to the database
|
||||
// within this transaction.
|
||||
func (tx *LevelDBTransaction) Commit() error {
|
||||
if tx.isClosed {
|
||||
return errors.New("cannot commit a closed transaction")
|
||||
}
|
||||
|
||||
tx.isClosed = true
|
||||
tx.snapshot.Release()
|
||||
return tx.db.ldb.Write(tx.batch, nil)
|
||||
}
|
||||
|
||||
// Rollback rolls back whatever changes were made to the
|
||||
// database within this transaction.
|
||||
func (tx *LevelDBTransaction) Rollback() error {
|
||||
if tx.isClosed {
|
||||
return errors.New("cannot rollback a closed transaction")
|
||||
}
|
||||
|
||||
tx.isClosed = true
|
||||
tx.snapshot.Release()
|
||||
tx.batch.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
// RollbackUnlessClosed rolls back changes that were made to
|
||||
// the database within the transaction, unless the transaction
|
||||
// had already been closed using either Rollback or Commit.
|
||||
func (tx *LevelDBTransaction) RollbackUnlessClosed() error {
|
||||
if tx.isClosed {
|
||||
return nil
|
||||
}
|
||||
return tx.Rollback()
|
||||
}
|
||||
|
||||
// Put sets the value for the given key. It overwrites
|
||||
// any previous value for that key.
|
||||
func (tx *LevelDBTransaction) Put(key []byte, value []byte) error {
|
||||
if tx.isClosed {
|
||||
return errors.New("cannot put into a closed transaction")
|
||||
}
|
||||
|
||||
tx.batch.Put(key, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get gets the value for the given key. It returns
|
||||
// ErrNotFound if the given key does not exist.
|
||||
func (tx *LevelDBTransaction) Get(key []byte) ([]byte, error) {
|
||||
if tx.isClosed {
|
||||
return nil, errors.New("cannot get from a closed transaction")
|
||||
}
|
||||
|
||||
data, err := tx.snapshot.Get(key, nil)
|
||||
if err != nil {
|
||||
if errors.Is(err, leveldb.ErrNotFound) {
|
||||
return nil, errors.Wrapf(database.ErrNotFound,
|
||||
"key %s not found", hex.EncodeToString(key))
|
||||
}
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Has returns true if the database does contains the
|
||||
// given key.
|
||||
func (tx *LevelDBTransaction) Has(key []byte) (bool, error) {
|
||||
if tx.isClosed {
|
||||
return false, errors.New("cannot has from a closed transaction")
|
||||
}
|
||||
|
||||
return tx.snapshot.Has(key, nil)
|
||||
}
|
||||
|
||||
// Delete deletes the value for the given key. Will not
|
||||
// return an error if the key doesn't exist.
|
||||
func (tx *LevelDBTransaction) Delete(key []byte) error {
|
||||
if tx.isClosed {
|
||||
return errors.New("cannot delete from a closed transaction")
|
||||
}
|
||||
|
||||
tx.batch.Delete(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cursor begins a new cursor over the given bucket.
|
||||
func (tx *LevelDBTransaction) Cursor(bucket []byte) (*LevelDBCursor, error) {
|
||||
if tx.isClosed {
|
||||
return nil, errors.New("cannot open a cursor from a closed transaction")
|
||||
}
|
||||
|
||||
return tx.db.Cursor(bucket), nil
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/database/internal/treap"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// ldbTreapIter wraps a treap iterator to provide the additional functionality
|
||||
// needed to satisfy the leveldb iterator.Iterator interface.
|
||||
type ldbTreapIter struct {
|
||||
*treap.Iterator
|
||||
tx *transaction
|
||||
released bool
|
||||
}
|
||||
|
||||
// Enforce ldbTreapIter implements the leveldb iterator.Iterator interface.
|
||||
var _ iterator.Iterator = (*ldbTreapIter)(nil)
|
||||
|
||||
// Error is only provided to satisfy the iterator interface as there are no
|
||||
// errors for this memory-only structure.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *ldbTreapIter) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetReleaser is only provided to satisfy the iterator interface as there is no
|
||||
// need to override it.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *ldbTreapIter) SetReleaser(releaser util.Releaser) {
|
||||
}
|
||||
|
||||
// Release releases the iterator by removing the underlying treap iterator from
|
||||
// the list of active iterators against the pending keys treap.
|
||||
//
|
||||
// This is part of the leveldb iterator.Iterator interface implementation.
|
||||
func (iter *ldbTreapIter) Release() {
|
||||
if !iter.released {
|
||||
iter.tx.removeActiveIter(iter.Iterator)
|
||||
iter.released = true
|
||||
}
|
||||
}
|
||||
|
||||
// newLdbTreapIter creates a new treap iterator for the given slice against the
|
||||
// pending keys for the passed transaction and returns it wrapped in an
|
||||
// ldbTreapIter so it can be used as a leveldb iterator. It also adds the new
|
||||
// iterator to the list of active iterators for the transaction.
|
||||
func newLdbTreapIter(tx *transaction, slice *util.Range) *ldbTreapIter {
|
||||
iter := tx.pendingKeys.Iterator(slice.Start, slice.Limit)
|
||||
tx.addActiveIter(iter)
|
||||
return &ldbTreapIter{Iterator: iter, tx: tx}
|
||||
}
|
||||
@@ -1,11 +1,5 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/logger"
|
||||
)
|
||||
import "github.com/kaspanet/kaspad/logger"
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.KSDB)
|
||||
|
||||
@@ -1,163 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file is part of the ffldb package rather than the ffldb_test package as
|
||||
// it is part of the whitebox testing.
|
||||
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Errors used for the mock file.
|
||||
var (
|
||||
// errMockFileClosed is used to indicate a mock file is closed.
|
||||
errMockFileClosed = errors.New("file closed")
|
||||
|
||||
// errInvalidOffset is used to indicate an offset that is out of range
|
||||
// for the file was provided.
|
||||
errInvalidOffset = errors.New("invalid offset")
|
||||
|
||||
// errSyncFail is used to indicate simulated sync failure.
|
||||
errSyncFail = errors.New("simulated sync failure")
|
||||
)
|
||||
|
||||
// mockFile implements the filer interface and used in order to force failures
|
||||
// the database code related to reading and writing from the flat block files.
|
||||
// A maxSize of -1 is unlimited.
|
||||
type mockFile struct {
|
||||
sync.RWMutex
|
||||
maxSize int64
|
||||
data []byte
|
||||
forceSyncErr bool
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Close closes the mock file without releasing any data associated with it.
|
||||
// This allows it to be "reopened" without losing the data.
|
||||
//
|
||||
// This is part of the filer implementation.
|
||||
func (f *mockFile) Close() error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
if f.closed {
|
||||
return errMockFileClosed
|
||||
}
|
||||
f.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadAt reads len(b) bytes from the mock file starting at byte offset off. It
|
||||
// returns the number of bytes read and the error, if any. ReadAt always
|
||||
// returns a non-nil error when n < len(b). At end of file, that error is
|
||||
// io.EOF.
|
||||
//
|
||||
// This is part of the filer implementation.
|
||||
func (f *mockFile) ReadAt(b []byte, off int64) (int, error) {
|
||||
f.RLock()
|
||||
defer f.RUnlock()
|
||||
|
||||
if f.closed {
|
||||
return 0, errMockFileClosed
|
||||
}
|
||||
maxSize := int64(len(f.data))
|
||||
if f.maxSize > -1 && maxSize > f.maxSize {
|
||||
maxSize = f.maxSize
|
||||
}
|
||||
if off < 0 || off > maxSize {
|
||||
return 0, errInvalidOffset
|
||||
}
|
||||
|
||||
// Limit to the max size field, if set.
|
||||
numToRead := int64(len(b))
|
||||
endOffset := off + numToRead
|
||||
if endOffset > maxSize {
|
||||
numToRead = maxSize - off
|
||||
}
|
||||
|
||||
copy(b, f.data[off:off+numToRead])
|
||||
if numToRead < int64(len(b)) {
|
||||
return int(numToRead), io.EOF
|
||||
}
|
||||
return int(numToRead), nil
|
||||
}
|
||||
|
||||
// Truncate changes the size of the mock file.
|
||||
//
|
||||
// This is part of the filer implementation.
|
||||
func (f *mockFile) Truncate(size int64) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
if f.closed {
|
||||
return errMockFileClosed
|
||||
}
|
||||
maxSize := int64(len(f.data))
|
||||
if f.maxSize > -1 && maxSize > f.maxSize {
|
||||
maxSize = f.maxSize
|
||||
}
|
||||
if size > maxSize {
|
||||
return errInvalidOffset
|
||||
}
|
||||
|
||||
f.data = f.data[:size]
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes len(b) bytes to the mock file. It returns the number of bytes
|
||||
// written and an error, if any. Write returns a non-nil error any time
|
||||
// n != len(b).
|
||||
//
|
||||
// This is part of the filer implementation.
|
||||
func (f *mockFile) WriteAt(b []byte, off int64) (int, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
if f.closed {
|
||||
return 0, errMockFileClosed
|
||||
}
|
||||
maxSize := f.maxSize
|
||||
if maxSize < 0 {
|
||||
maxSize = 100 * 1024 // 100KiB
|
||||
}
|
||||
if off < 0 || off > maxSize {
|
||||
return 0, errInvalidOffset
|
||||
}
|
||||
|
||||
// Limit to the max size field, if set, and grow the slice if needed.
|
||||
numToWrite := int64(len(b))
|
||||
if off+numToWrite > maxSize {
|
||||
numToWrite = maxSize - off
|
||||
}
|
||||
if off+numToWrite > int64(len(f.data)) {
|
||||
newData := make([]byte, off+numToWrite)
|
||||
copy(newData, f.data)
|
||||
f.data = newData
|
||||
}
|
||||
|
||||
copy(f.data[off:], b[:numToWrite])
|
||||
if numToWrite < int64(len(b)) {
|
||||
return int(numToWrite), io.EOF
|
||||
}
|
||||
return int(numToWrite), nil
|
||||
}
|
||||
|
||||
// Sync doesn't do anything for mock files. However, it will return an error if
|
||||
// the mock file's forceSyncErr flag is set.
|
||||
//
|
||||
// This is part of the filer implementation.
|
||||
func (f *mockFile) Sync() error {
|
||||
if f.forceSyncErr {
|
||||
return errSyncFail
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure the mockFile type implements the filer interface.
|
||||
var _ filer = (*mockFile)(nil)
|
||||
@@ -1,117 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
)
|
||||
|
||||
// The serialized write cursor location format is:
|
||||
//
|
||||
// [0:4] Block file (4 bytes)
|
||||
// [4:8] File offset (4 bytes)
|
||||
// [8:12] Castagnoli CRC-32 checksum (4 bytes)
|
||||
|
||||
// serializeWriteRow serialize the current block file and offset where new
|
||||
// will be written into a format suitable for storage into the metadata.
|
||||
func serializeWriteRow(curBlockFileNum, curFileOffset uint32) []byte {
|
||||
var serializedRow [12]byte
|
||||
byteOrder.PutUint32(serializedRow[0:4], curBlockFileNum)
|
||||
byteOrder.PutUint32(serializedRow[4:8], curFileOffset)
|
||||
checksum := crc32.Checksum(serializedRow[:8], castagnoli)
|
||||
byteOrder.PutUint32(serializedRow[8:12], checksum)
|
||||
return serializedRow[:]
|
||||
}
|
||||
|
||||
// deserializeWriteRow deserializes the write cursor location stored in the
|
||||
// metadata. Returns ErrCorruption if the checksum of the entry doesn't match.
|
||||
func deserializeWriteRow(writeRow []byte) (uint32, uint32, error) {
|
||||
// Ensure the checksum matches. The checksum is at the end.
|
||||
gotChecksum := crc32.Checksum(writeRow[:8], castagnoli)
|
||||
wantChecksumBytes := writeRow[8:12]
|
||||
wantChecksum := byteOrder.Uint32(wantChecksumBytes)
|
||||
if gotChecksum != wantChecksum {
|
||||
str := fmt.Sprintf("metadata for write cursor does not match "+
|
||||
"the expected checksum - got %d, want %d", gotChecksum,
|
||||
wantChecksum)
|
||||
return 0, 0, makeDbErr(database.ErrCorruption, str, nil)
|
||||
}
|
||||
|
||||
fileNum := byteOrder.Uint32(writeRow[0:4])
|
||||
fileOffset := byteOrder.Uint32(writeRow[4:8])
|
||||
return fileNum, fileOffset, nil
|
||||
}
|
||||
|
||||
// reconcileDB reconciles the metadata with the flat block files on disk. It
|
||||
// will also initialize the underlying database if the create flag is set.
|
||||
func reconcileDB(pdb *db, create bool) (database.DB, error) {
|
||||
// Perform initial internal bucket and value creation during database
|
||||
// creation.
|
||||
if create {
|
||||
if err := initDB(pdb.cache.ldb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Load the current write cursor position from the metadata.
|
||||
var curFileNum, curOffset uint32
|
||||
err := pdb.View(func(dbTx database.Tx) error {
|
||||
writeRow := dbTx.Metadata().Get(writeLocKeyName)
|
||||
if writeRow == nil {
|
||||
str := "write cursor does not exist"
|
||||
return makeDbErr(database.ErrCorruption, str, nil)
|
||||
}
|
||||
|
||||
var err error
|
||||
curFileNum, curOffset, err = deserializeWriteRow(writeRow)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// When the write cursor position found by scanning the block files on
|
||||
// disk is AFTER the position the metadata believes to be true, truncate
|
||||
// the files on disk to match the metadata. This can be a fairly common
|
||||
// occurrence in unclean shutdown scenarios while the block files are in
|
||||
// the middle of being written. Since the metadata isn't updated until
|
||||
// after the block data is written, this is effectively just a rollback
|
||||
// to the known good point before the unclean shutdown.
|
||||
wc := pdb.store.writeCursor
|
||||
if wc.curFileNum > curFileNum || (wc.curFileNum == curFileNum &&
|
||||
wc.curOffset > curOffset) {
|
||||
|
||||
log.Info("Detected unclean shutdown - Repairing...")
|
||||
log.Debugf("Metadata claims file %d, offset %d. Block data is "+
|
||||
"at file %d, offset %d", curFileNum, curOffset,
|
||||
wc.curFileNum, wc.curOffset)
|
||||
pdb.store.handleRollback(curFileNum, curOffset)
|
||||
log.Infof("Database sync complete")
|
||||
}
|
||||
|
||||
// When the write cursor position found by scanning the block files on
|
||||
// disk is BEFORE the position the metadata believes to be true, return
|
||||
// a corruption error. Since sync is called after each block is written
|
||||
// and before the metadata is updated, this should only happen in the
|
||||
// case of missing, deleted, or truncated block files, which generally
|
||||
// is not an easily recoverable scenario. In the future, it might be
|
||||
// possible to rescan and rebuild the metadata from the block files,
|
||||
// however, that would need to happen with coordination from a higher
|
||||
// layer since it could invalidate other metadata.
|
||||
if wc.curFileNum < curFileNum || (wc.curFileNum == curFileNum &&
|
||||
wc.curOffset < curOffset) {
|
||||
|
||||
str := fmt.Sprintf("metadata claims file %d, offset %d, but "+
|
||||
"block data is at file %d, offset %d", curFileNum,
|
||||
curOffset, wc.curFileNum, wc.curOffset)
|
||||
log.Warnf("***Database corruption detected***: %s", str)
|
||||
return nil, makeDbErr(database.ErrCorruption, str, nil)
|
||||
}
|
||||
|
||||
return pdb, nil
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSerializeWriteRow(t *testing.T) {
|
||||
tests := []struct {
|
||||
curBlockFileNum uint32
|
||||
curFileOffset uint32
|
||||
expectedWriteRow []byte
|
||||
}{
|
||||
// WriteRow format:
|
||||
// First 4 bytes: curBlockFileNum
|
||||
// Next 4 bytes: curFileOffset
|
||||
// Next 4 bytes: Castagnoli CRC-32 checksum
|
||||
// One can easily calculate checksums using the following code:
|
||||
// https://play.golang.org/p/zoMKT-ORyF9
|
||||
{0, 0, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xB2, 0x28, 0x8C}},
|
||||
{10, 11, []byte{0x0A, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0xC1, 0xA6, 0x0D, 0xC8}},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
actualWriteRow := serializeWriteRow(test.curBlockFileNum, test.curFileOffset)
|
||||
|
||||
if !reflect.DeepEqual(test.expectedWriteRow, actualWriteRow) {
|
||||
t.Errorf("TestSerializeWriteRow: %d: Expected: %v, but got: %v",
|
||||
i, test.expectedWriteRow, actualWriteRow)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeserializeWriteRow(t *testing.T) {
|
||||
tests := []struct {
|
||||
writeRow []byte
|
||||
expectedCurBlockFileNum uint32
|
||||
expectedCurFileOffset uint32
|
||||
expectedError bool
|
||||
}{
|
||||
// WriteRow format:
|
||||
// First 4 bytes: curBlockFileNum
|
||||
// Next 4 bytes: curFileOffset
|
||||
// Next 4 bytes: Castagnoli CRC-32 checksum
|
||||
// One can easily calculate checksums using the following code:
|
||||
// https://play.golang.org/p/zoMKT-ORyF9
|
||||
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xB2, 0x28, 0x8C}, 0, 0, false},
|
||||
{[]byte{0x0A, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0xC1, 0xA6, 0x0D, 0xC8}, 10, 11, false},
|
||||
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xB2, 0x28, 0x8D}, 0, 0, true},
|
||||
{[]byte{0x0A, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 0, 0, true},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
actualCurBlockFileNum, actualCurFileOffset, err := deserializeWriteRow(test.writeRow)
|
||||
|
||||
if (err != nil) != test.expectedError {
|
||||
t.Errorf("TestDeserializeWriteRow: %d: Expected error status: %t, but got: %t",
|
||||
i, test.expectedError, err != nil)
|
||||
}
|
||||
|
||||
if test.expectedCurBlockFileNum != actualCurBlockFileNum {
|
||||
t.Errorf("TestDeserializeWriteRow: %d: Expected curBlockFileNum: %d, but got: %d",
|
||||
i, test.expectedCurBlockFileNum, actualCurBlockFileNum)
|
||||
}
|
||||
|
||||
if test.expectedCurFileOffset != actualCurFileOffset {
|
||||
t.Errorf("TestDeserializeWriteRow: %d: Expected curFileOffset: %d, but got: %d",
|
||||
i, test.expectedCurFileOffset, actualCurFileOffset)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setWriteRow is a low-level helper method to update the write row in the
|
||||
// metadata bucket to enable certain test-cases in TestReconcileErrors
|
||||
// if writeRow = nil deletes the write row altogether
|
||||
func setWriteRow(pdb *db, writeRow []byte, t *testing.T) {
|
||||
tx, err := pdb.begin(true)
|
||||
if err != nil {
|
||||
t.Fatalf("TestReconcileErrors: Error getting tx for setting "+
|
||||
"writeLoc in metadata: %s", err)
|
||||
}
|
||||
|
||||
if writeRow == nil {
|
||||
tx.metaBucket.Delete(writeLocKeyName)
|
||||
if err != nil {
|
||||
t.Fatalf("TestReconcileErrors: Error deleting writeLoc from metadata: %s",
|
||||
err)
|
||||
}
|
||||
} else {
|
||||
tx.metaBucket.Put(writeLocKeyName, writeRow)
|
||||
if err != nil {
|
||||
t.Fatalf("TestReconcileErrors: Error updating writeLoc in metadata: %s",
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
err = pdb.cache.commitTx(tx)
|
||||
if err != nil {
|
||||
t.Fatalf("TestReconcileErrors: Error commiting the update of "+
|
||||
"writeLoc in metadata: %s", err)
|
||||
}
|
||||
|
||||
pdb.writeLock.Unlock()
|
||||
pdb.closeLock.RUnlock()
|
||||
}
|
||||
|
||||
// TestReconcileErrors tests all error-cases in reconclieDB.
|
||||
// The non-error-cases are tested in the more general tests.
|
||||
func TestReconcileErrors(t *testing.T) {
|
||||
// Set-up tests
|
||||
pdb := newTestDb("TestReconcileErrors", t)
|
||||
|
||||
// Test without writeLoc
|
||||
setWriteRow(pdb, nil, t)
|
||||
_, err := reconcileDB(pdb, false)
|
||||
if err == nil {
|
||||
t.Errorf("TestReconcileErrors: ReconcileDB() didn't error out when " +
|
||||
"running without a writeRowLoc")
|
||||
}
|
||||
|
||||
// Test with writeLoc in metadata after the actual cursor position
|
||||
setWriteRow(pdb, serializeWriteRow(1, 0), t)
|
||||
_, err = reconcileDB(pdb, false)
|
||||
if err == nil {
|
||||
t.Errorf("TestReconcileErrors: ReconcileDB() didn't error out when " +
|
||||
"curBlockFileNum after the actual cursor position")
|
||||
}
|
||||
setWriteRow(pdb, serializeWriteRow(0, 1), t)
|
||||
_, err = reconcileDB(pdb, false)
|
||||
if err == nil {
|
||||
t.Errorf("TestReconcileErrors: ReconcileDB() didn't error out when " +
|
||||
"curFileOffset after the actual cursor position")
|
||||
}
|
||||
|
||||
// Restore previous writeRow
|
||||
setWriteRow(pdb, serializeWriteRow(0, 0), t)
|
||||
|
||||
// Test create with closed DB to force initDB to fail
|
||||
pdb.Close()
|
||||
_, err = reconcileDB(pdb, true)
|
||||
if err == nil {
|
||||
t.Errorf("ReconcileDB didn't error out when running with closed db and create = true")
|
||||
}
|
||||
}
|
||||
92
database/ffldb/transaction.go
Normal file
92
database/ffldb/transaction.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/database/ffldb/ff"
|
||||
"github.com/kaspanet/kaspad/database/ffldb/ldb"
|
||||
)
|
||||
|
||||
// transaction is an ffldb transaction.
|
||||
//
|
||||
// Note: Transactions provide data consistency over the state of
|
||||
// the database as it was when the transaction started. There is
|
||||
// NO guarantee that if one puts data into the transaction then
|
||||
// it will be available to get within the same transaction.
|
||||
type transaction struct {
|
||||
ldbTx *ldb.LevelDBTransaction
|
||||
ffdb *ff.FlatFileDB
|
||||
}
|
||||
|
||||
// Put sets the value for the given key. It overwrites
|
||||
// any previous value for that key.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (tx *transaction) Put(key []byte, value []byte) error {
|
||||
return tx.ldbTx.Put(key, value)
|
||||
}
|
||||
|
||||
// Get gets the value for the given key. It returns
|
||||
// ErrNotFound if the given key does not exist.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (tx *transaction) Get(key []byte) ([]byte, error) {
|
||||
return tx.ldbTx.Get(key)
|
||||
}
|
||||
|
||||
// Has returns true if the database does contains the
|
||||
// given key.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (tx *transaction) Has(key []byte) (bool, error) {
|
||||
return tx.ldbTx.Has(key)
|
||||
}
|
||||
|
||||
// Delete deletes the value for the given key. Will not
|
||||
// return an error if the key doesn't exist.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (tx *transaction) Delete(key []byte) error {
|
||||
return tx.ldbTx.Delete(key)
|
||||
}
|
||||
|
||||
// AppendToStore appends the given data to the flat
|
||||
// file store defined by storeName. This function
|
||||
// returns a serialized location handle that's meant
|
||||
// to be stored and later used when querying the data
|
||||
// that has just now been inserted.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (tx *transaction) AppendToStore(storeName string, data []byte) ([]byte, error) {
|
||||
return appendToStore(tx, tx.ffdb, storeName, data)
|
||||
}
|
||||
|
||||
// RetrieveFromStore retrieves data from the store defined by
|
||||
// storeName using the given serialized location handle. It
|
||||
// returns ErrNotFound if the location does not exist. See
|
||||
// AppendToStore for further details.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (tx *transaction) RetrieveFromStore(storeName string, location []byte) ([]byte, error) {
|
||||
return tx.ffdb.Read(storeName, location)
|
||||
}
|
||||
|
||||
// Cursor begins a new cursor over the given bucket.
|
||||
// This method is part of the DataAccessor interface.
|
||||
func (tx *transaction) Cursor(bucket []byte) (database.Cursor, error) {
|
||||
return tx.ldbTx.Cursor(bucket)
|
||||
}
|
||||
|
||||
// Rollback rolls back whatever changes were made to the
|
||||
// database within this transaction.
|
||||
// This method is part of the Transaction interface.
|
||||
func (tx *transaction) Rollback() error {
|
||||
return tx.ldbTx.Rollback()
|
||||
}
|
||||
|
||||
// Commit commits whatever changes were made to the database
|
||||
// within this transaction.
|
||||
// This method is part of the Transaction interface.
|
||||
func (tx *transaction) Commit() error {
|
||||
return tx.ldbTx.Commit()
|
||||
}
|
||||
|
||||
// RollbackUnlessClosed rolls back changes that were made to
|
||||
// the database within the transaction, unless the transaction
|
||||
// had already been closed using either Rollback or Commit.
|
||||
func (tx *transaction) RollbackUnlessClosed() error {
|
||||
return tx.ldbTx.RollbackUnlessClosed()
|
||||
}
|
||||
@@ -1,707 +0,0 @@
|
||||
// Copyright (c) 2015-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file is part of the ffldb package rather than the ffldb_test package as
|
||||
// it provides whitebox testing.
|
||||
|
||||
package ffldb
|
||||
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// blockDataNet is the expected network in the test block data.
|
||||
blockDataNet = wire.Mainnet
|
||||
|
||||
// blockDataFile is the path to a file containing the first 256 blocks
|
||||
// of the block DAG.
|
||||
blockDataFile = filepath.Join("..", "testdata", "blocks1-256.bz2")
|
||||
|
||||
// errSubTestFail is used to signal that a sub test returned false.
|
||||
errSubTestFail = errors.Errorf("sub test failure")
|
||||
)
|
||||
|
||||
// loadBlocks loads the blocks contained in the testdata directory and returns
|
||||
// a slice of them.
|
||||
func loadBlocks(t *testing.T, dataFile string, network wire.KaspaNet) ([]*util.Block, error) {
|
||||
// Open the file that contains the blocks for reading.
|
||||
fi, err := os.Open(dataFile)
|
||||
if err != nil {
|
||||
t.Errorf("failed to open file %v, err %v", dataFile, err)
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := fi.Close(); err != nil {
|
||||
t.Errorf("failed to close file %v %v", dataFile,
|
||||
err)
|
||||
}
|
||||
}()
|
||||
dr := bzip2.NewReader(fi)
|
||||
|
||||
// Set the first block as the genesis block.
|
||||
blocks := make([]*util.Block, 0, 256)
|
||||
genesis := util.NewBlock(dagconfig.MainnetParams.GenesisBlock)
|
||||
blocks = append(blocks, genesis)
|
||||
|
||||
// Load the remaining blocks.
|
||||
for height := 1; ; height++ {
|
||||
var net uint32
|
||||
err := binary.Read(dr, binary.LittleEndian, &net)
|
||||
if err == io.EOF {
|
||||
// Hit end of file at the expected offset. No error.
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Failed to load network type for block %d: %v",
|
||||
height, err)
|
||||
return nil, err
|
||||
}
|
||||
if net != uint32(network) {
|
||||
t.Errorf("Block doesn't match network: %v expects %v",
|
||||
net, network)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var blockLen uint32
|
||||
err = binary.Read(dr, binary.LittleEndian, &blockLen)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to load block size for block %d: %v",
|
||||
height, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read the block.
|
||||
blockBytes := make([]byte, blockLen)
|
||||
_, err = io.ReadFull(dr, blockBytes)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to load block %d: %v", height, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Deserialize and store the block.
|
||||
block, err := util.NewBlockFromBytes(blockBytes)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to parse block %v: %v", height, err)
|
||||
return nil, err
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// checkDbError ensures the passed error is a database.Error with an error code
|
||||
// that matches the passed error code.
|
||||
func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool {
|
||||
dbErr, ok := gotErr.(database.Error)
|
||||
if !ok {
|
||||
t.Errorf("%s: unexpected error type - got %T, want %T",
|
||||
testName, gotErr, database.Error{})
|
||||
return false
|
||||
}
|
||||
if dbErr.ErrorCode != wantErrCode {
|
||||
t.Errorf("%s: unexpected error code - got %s (%s), want %s",
|
||||
testName, dbErr.ErrorCode, dbErr.Description,
|
||||
wantErrCode)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// testContext is used to store context information about a running test which
|
||||
// is passed into helper functions.
|
||||
type testContext struct {
|
||||
t *testing.T
|
||||
db database.DB
|
||||
files map[uint32]*lockableFile
|
||||
maxFileSizes map[uint32]int64
|
||||
blocks []*util.Block
|
||||
}
|
||||
|
||||
// TestConvertErr ensures the leveldb error to database error conversion works
|
||||
// as expected.
|
||||
func TestConvertErr(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
err error
|
||||
wantErrCode database.ErrorCode
|
||||
}{
|
||||
{&ldberrors.ErrCorrupted{}, database.ErrCorruption},
|
||||
{leveldb.ErrClosed, database.ErrDbNotOpen},
|
||||
{leveldb.ErrSnapshotReleased, database.ErrTxClosed},
|
||||
{leveldb.ErrIterReleased, database.ErrTxClosed},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
gotErr := convertErr("test", test.err)
|
||||
if gotErr.ErrorCode != test.wantErrCode {
|
||||
t.Errorf("convertErr #%d unexpected error - got %v, "+
|
||||
"want %v", i, gotErr.ErrorCode, test.wantErrCode)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCornerCases ensures several corner cases which can happen when opening
|
||||
// a database and/or block files work as expected.
|
||||
func TestCornerCases(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create a file at the datapase path to force the open below to fail.
|
||||
dbPath := filepath.Join(os.TempDir(), "ffldb-errors")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
fi, err := os.Create(dbPath)
|
||||
if err != nil {
|
||||
t.Errorf("os.Create: unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
fi.Close()
|
||||
|
||||
// Ensure creating a new database fails when a file exists where a
|
||||
// directory is needed.
|
||||
testName := "openDB: fail due to file at target location"
|
||||
wantErrCode := database.ErrDriverSpecific
|
||||
idb, err := openDB(dbPath, blockDataNet, true)
|
||||
if !checkDbError(t, testName, err, wantErrCode) {
|
||||
if err == nil {
|
||||
idb.Close()
|
||||
}
|
||||
_ = os.RemoveAll(dbPath)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove the file and create the database to run tests against. It
|
||||
// should be successful this time.
|
||||
_ = os.RemoveAll(dbPath)
|
||||
idb, err = openDB(dbPath, blockDataNet, true)
|
||||
if err != nil {
|
||||
t.Errorf("openDB: unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(dbPath)
|
||||
defer idb.Close()
|
||||
|
||||
// Ensure attempting to write to a file that can't be created returns
|
||||
// the expected error.
|
||||
testName = "writeBlock: open file failure"
|
||||
filePath := blockFilePath(dbPath, 0)
|
||||
if err := os.Mkdir(filePath, 0755); err != nil {
|
||||
t.Errorf("os.Mkdir: unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
store := idb.(*db).store
|
||||
_, err = store.writeBlock([]byte{0x00})
|
||||
if !checkDbError(t, testName, err, database.ErrDriverSpecific) {
|
||||
return
|
||||
}
|
||||
_ = os.RemoveAll(filePath)
|
||||
|
||||
// Close the underlying leveldb database out from under the database.
|
||||
ldb := idb.(*db).cache.ldb
|
||||
ldb.Close()
|
||||
|
||||
// Ensure initilization errors in the underlying database work as
|
||||
// expected.
|
||||
testName = "initDB: reinitialization"
|
||||
wantErrCode = database.ErrDbNotOpen
|
||||
err = initDB(ldb)
|
||||
if !checkDbError(t, testName, err, wantErrCode) {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure the View handles errors in the underlying leveldb database
|
||||
// properly.
|
||||
testName = "View: underlying leveldb error"
|
||||
wantErrCode = database.ErrDbNotOpen
|
||||
err = idb.View(func(dbTx database.Tx) error {
|
||||
return nil
|
||||
})
|
||||
if !checkDbError(t, testName, err, wantErrCode) {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure the Update handles errors in the underlying leveldb database
|
||||
// properly.
|
||||
testName = "Update: underlying leveldb error"
|
||||
err = idb.Update(func(dbTx database.Tx) error {
|
||||
return nil
|
||||
})
|
||||
if !checkDbError(t, testName, err, wantErrCode) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// resetDatabase removes everything from the opened database associated with the
|
||||
// test context including all metadata and the mock files.
|
||||
func resetDatabase(tc *testContext) bool {
|
||||
// Reset the metadata.
|
||||
err := tc.db.Update(func(dbTx database.Tx) error {
|
||||
// Remove all the keys using a cursor while also generating a
|
||||
// list of buckets. It's not safe to remove keys during ForEach
|
||||
// iteration nor is it safe to remove buckets during cursor
|
||||
// iteration, so this dual approach is needed.
|
||||
var bucketNames [][]byte
|
||||
cursor := dbTx.Metadata().Cursor()
|
||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||
if cursor.Value() != nil {
|
||||
if err := cursor.Delete(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
bucketNames = append(bucketNames, cursor.Key())
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the buckets.
|
||||
for _, k := range bucketNames {
|
||||
if err := dbTx.Metadata().DeleteBucket(k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_, err := dbTx.Metadata().CreateBucket(blockIdxBucketName)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
tc.t.Errorf("Update: unexpected error: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Reset the mock files.
|
||||
store := tc.db.(*db).store
|
||||
wc := store.writeCursor
|
||||
wc.curFile.Lock()
|
||||
if wc.curFile.file != nil {
|
||||
wc.curFile.file.Close()
|
||||
wc.curFile.file = nil
|
||||
}
|
||||
wc.curFile.Unlock()
|
||||
wc.Lock()
|
||||
wc.curFileNum = 0
|
||||
wc.curOffset = 0
|
||||
wc.Unlock()
|
||||
tc.files = make(map[uint32]*lockableFile)
|
||||
tc.maxFileSizes = make(map[uint32]int64)
|
||||
return true
|
||||
}
|
||||
|
||||
// testWriteFailures tests various failures paths when writing to the block
|
||||
// files.
|
||||
func testWriteFailures(tc *testContext) bool {
|
||||
if !resetDatabase(tc) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ensure file sync errors during flush return the expected error.
|
||||
store := tc.db.(*db).store
|
||||
testName := "flush: file sync failure"
|
||||
store.writeCursor.Lock()
|
||||
oldFile := store.writeCursor.curFile
|
||||
store.writeCursor.curFile = &lockableFile{
|
||||
file: &mockFile{forceSyncErr: true, maxSize: -1},
|
||||
}
|
||||
store.writeCursor.Unlock()
|
||||
err := tc.db.(*db).cache.flush()
|
||||
if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) {
|
||||
return false
|
||||
}
|
||||
store.writeCursor.Lock()
|
||||
store.writeCursor.curFile = oldFile
|
||||
store.writeCursor.Unlock()
|
||||
|
||||
// Force errors in the various error paths when writing data by using
|
||||
// mock files with a limited max size.
|
||||
block0Bytes, _ := tc.blocks[0].Bytes()
|
||||
tests := []struct {
|
||||
fileNum uint32
|
||||
maxSize int64
|
||||
}{
|
||||
// Force an error when writing the network bytes.
|
||||
{fileNum: 0, maxSize: 2},
|
||||
|
||||
// Force an error when writing the block size.
|
||||
{fileNum: 0, maxSize: 6},
|
||||
|
||||
// Force an error when writing the block.
|
||||
{fileNum: 0, maxSize: 17},
|
||||
|
||||
// Force an error when writing the checksum.
|
||||
{fileNum: 0, maxSize: int64(len(block0Bytes)) + 10},
|
||||
|
||||
// Force an error after writing enough blocks for force multiple
|
||||
// files.
|
||||
{fileNum: 15, maxSize: 1},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
if !resetDatabase(tc) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ensure storing the specified number of blocks using a mock
|
||||
// file that fails the write fails when the transaction is
|
||||
// committed, not when the block is stored.
|
||||
tc.maxFileSizes = map[uint32]int64{test.fileNum: test.maxSize}
|
||||
err := tc.db.Update(func(dbTx database.Tx) error {
|
||||
for i, block := range tc.blocks {
|
||||
err := dbTx.StoreBlock(block)
|
||||
if err != nil {
|
||||
tc.t.Errorf("StoreBlock (%d): unexpected "+
|
||||
"error: %v", i, err)
|
||||
return errSubTestFail
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
testName := fmt.Sprintf("Force update commit failure - test "+
|
||||
"%d, fileNum %d, maxsize %d", i, test.fileNum,
|
||||
test.maxSize)
|
||||
if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) {
|
||||
tc.t.Errorf("%v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Ensure the commit rollback removed all extra files and data.
|
||||
if len(tc.files) != 1 {
|
||||
tc.t.Errorf("Update rollback: new not removed - want "+
|
||||
"1 file, got %d", len(tc.files))
|
||||
return false
|
||||
}
|
||||
if _, ok := tc.files[0]; !ok {
|
||||
tc.t.Error("Update rollback: file 0 does not exist")
|
||||
return false
|
||||
}
|
||||
file := tc.files[0].file.(*mockFile)
|
||||
if len(file.data) != 0 {
|
||||
tc.t.Errorf("Update rollback: file did not truncate - "+
|
||||
"want len 0, got len %d", len(file.data))
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// testBlockFileErrors ensures the database returns expected errors with various
|
||||
// file-related issues such as closed and missing files.
|
||||
func testBlockFileErrors(tc *testContext) bool {
|
||||
if !resetDatabase(tc) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ensure errors in blockFile and openFile when requesting invalid file
|
||||
// numbers.
|
||||
store := tc.db.(*db).store
|
||||
testName := "blockFile invalid file open"
|
||||
_, err := store.blockFile(^uint32(0))
|
||||
if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) {
|
||||
return false
|
||||
}
|
||||
testName = "openFile invalid file open"
|
||||
_, err = store.openFile(^uint32(0))
|
||||
if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Insert the first block into the mock file.
|
||||
err = tc.db.Update(func(dbTx database.Tx) error {
|
||||
err := dbTx.StoreBlock(tc.blocks[0])
|
||||
if err != nil {
|
||||
tc.t.Errorf("StoreBlock: unexpected error: %v", err)
|
||||
return errSubTestFail
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if err != errSubTestFail {
|
||||
tc.t.Errorf("Update: unexpected error: %v", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Ensure errors in readBlock and readBlockRegion when requesting a file
|
||||
// number that doesn't exist.
|
||||
block0Hash := tc.blocks[0].Hash()
|
||||
testName = "readBlock invalid file number"
|
||||
invalidLoc := blockLocation{
|
||||
blockFileNum: ^uint32(0),
|
||||
blockLen: 80,
|
||||
}
|
||||
_, err = store.readBlock(block0Hash, invalidLoc)
|
||||
if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) {
|
||||
return false
|
||||
}
|
||||
testName = "readBlockRegion invalid file number"
|
||||
_, err = store.readBlockRegion(invalidLoc, 0, 80)
|
||||
if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Close the block file out from under the database.
|
||||
store.writeCursor.curFile.Lock()
|
||||
store.writeCursor.curFile.file.Close()
|
||||
store.writeCursor.curFile.Unlock()
|
||||
|
||||
// Ensure failures in FetchBlock and FetchBlockRegion(s) since the
|
||||
// underlying file they need to read from has been closed.
|
||||
err = tc.db.View(func(dbTx database.Tx) error {
|
||||
testName = "FetchBlock closed file"
|
||||
wantErrCode := database.ErrDriverSpecific
|
||||
_, err := dbTx.FetchBlock(block0Hash)
|
||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||
return errSubTestFail
|
||||
}
|
||||
|
||||
testName = "FetchBlockRegion closed file"
|
||||
regions := []database.BlockRegion{
|
||||
{
|
||||
Hash: block0Hash,
|
||||
Len: 80,
|
||||
Offset: 0,
|
||||
},
|
||||
}
|
||||
_, err = dbTx.FetchBlockRegion(®ions[0])
|
||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||
return errSubTestFail
|
||||
}
|
||||
|
||||
testName = "FetchBlockRegions closed file"
|
||||
_, err = dbTx.FetchBlockRegions(regions)
|
||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||
return errSubTestFail
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if err != errSubTestFail {
|
||||
tc.t.Errorf("View: unexpected error: %v", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// testCorruption ensures the database returns expected errors under various
|
||||
// corruption scenarios.
|
||||
func testCorruption(tc *testContext) bool {
|
||||
if !resetDatabase(tc) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Insert the first block into the mock file.
|
||||
err := tc.db.Update(func(dbTx database.Tx) error {
|
||||
err := dbTx.StoreBlock(tc.blocks[0])
|
||||
if err != nil {
|
||||
tc.t.Errorf("StoreBlock: unexpected error: %v", err)
|
||||
return errSubTestFail
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if err != errSubTestFail {
|
||||
tc.t.Errorf("Update: unexpected error: %v", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Ensure corruption is detected by intentionally modifying the bytes
|
||||
// stored to the mock file and reading the block.
|
||||
block0Bytes, _ := tc.blocks[0].Bytes()
|
||||
block0Hash := tc.blocks[0].Hash()
|
||||
tests := []struct {
|
||||
offset uint32
|
||||
fixChecksum bool
|
||||
wantErrCode database.ErrorCode
|
||||
}{
|
||||
// One of the network bytes. The checksum needs to be fixed so
|
||||
// the invalid network is detected.
|
||||
{2, true, database.ErrDriverSpecific},
|
||||
|
||||
// The same network byte, but this time don't fix the checksum
|
||||
// to ensure the corruption is detected.
|
||||
{2, false, database.ErrCorruption},
|
||||
|
||||
// One of the block length bytes.
|
||||
{6, false, database.ErrCorruption},
|
||||
|
||||
// Random header byte.
|
||||
{17, false, database.ErrCorruption},
|
||||
|
||||
// Random transaction byte.
|
||||
{90, false, database.ErrCorruption},
|
||||
|
||||
// Random checksum byte.
|
||||
{uint32(len(block0Bytes)) + 10, false, database.ErrCorruption},
|
||||
}
|
||||
err = tc.db.View(func(dbTx database.Tx) error {
|
||||
data := tc.files[0].file.(*mockFile).data
|
||||
for i, test := range tests {
|
||||
// Corrupt the byte at the offset by a single bit.
|
||||
data[test.offset] ^= 0x10
|
||||
|
||||
// Fix the checksum if requested to force other errors.
|
||||
fileLen := len(data)
|
||||
var oldChecksumBytes [4]byte
|
||||
copy(oldChecksumBytes[:], data[fileLen-4:])
|
||||
if test.fixChecksum {
|
||||
toSum := data[:fileLen-4]
|
||||
cksum := crc32.Checksum(toSum, castagnoli)
|
||||
binary.BigEndian.PutUint32(data[fileLen-4:], cksum)
|
||||
}
|
||||
|
||||
testName := fmt.Sprintf("FetchBlock (test #%d): "+
|
||||
"corruption", i)
|
||||
_, err := dbTx.FetchBlock(block0Hash)
|
||||
if !checkDbError(tc.t, testName, err, test.wantErrCode) {
|
||||
return errSubTestFail
|
||||
}
|
||||
|
||||
// Reset the corrupted data back to the original.
|
||||
data[test.offset] ^= 0x10
|
||||
if test.fixChecksum {
|
||||
copy(data[fileLen-4:], oldChecksumBytes[:])
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if err != errSubTestFail {
|
||||
tc.t.Errorf("View: unexpected error: %v", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestFailureScenarios ensures several failure scenarios such as database
|
||||
// corruption, block file write failures, and rollback failures are handled
|
||||
// correctly.
|
||||
func TestFailureScenarios(t *testing.T) {
|
||||
// Create a new database to run tests against.
|
||||
dbPath := filepath.Join(os.TempDir(), "ffldb-failurescenarios")
|
||||
_ = os.RemoveAll(dbPath)
|
||||
idb, err := database.Create(dbType, dbPath, blockDataNet)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create test database (%s) %v", dbType, err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(dbPath)
|
||||
defer idb.Close()
|
||||
|
||||
// Create a test context to pass around.
|
||||
tc := &testContext{
|
||||
t: t,
|
||||
db: idb,
|
||||
files: make(map[uint32]*lockableFile),
|
||||
maxFileSizes: make(map[uint32]int64),
|
||||
}
|
||||
|
||||
// Change the maximum file size to a small value to force multiple flat
|
||||
// files with the test data set and replace the file-related functions
|
||||
// to make use of mock files in memory. This allows injection of
|
||||
// various file-related errors.
|
||||
store := idb.(*db).store
|
||||
store.maxBlockFileSize = 1024 // 1KiB
|
||||
store.openWriteFileFunc = func(fileNum uint32) (filer, error) {
|
||||
if file, ok := tc.files[fileNum]; ok {
|
||||
// "Reopen" the file.
|
||||
file.Lock()
|
||||
mock := file.file.(*mockFile)
|
||||
mock.Lock()
|
||||
mock.closed = false
|
||||
mock.Unlock()
|
||||
file.Unlock()
|
||||
return mock, nil
|
||||
}
|
||||
|
||||
// Limit the max size of the mock file as specified in the test
|
||||
// context.
|
||||
maxSize := int64(-1)
|
||||
if maxFileSize, ok := tc.maxFileSizes[fileNum]; ok {
|
||||
maxSize = int64(maxFileSize)
|
||||
}
|
||||
file := &mockFile{maxSize: int64(maxSize)}
|
||||
tc.files[fileNum] = &lockableFile{file: file}
|
||||
return file, nil
|
||||
}
|
||||
store.openFileFunc = func(fileNum uint32) (*lockableFile, error) {
|
||||
// Force error when trying to open max file num.
|
||||
if fileNum == ^uint32(0) {
|
||||
return nil, makeDbErr(database.ErrDriverSpecific,
|
||||
"test", nil)
|
||||
}
|
||||
if file, ok := tc.files[fileNum]; ok {
|
||||
// "Reopen" the file.
|
||||
file.Lock()
|
||||
mock := file.file.(*mockFile)
|
||||
mock.Lock()
|
||||
mock.closed = false
|
||||
mock.Unlock()
|
||||
file.Unlock()
|
||||
return file, nil
|
||||
}
|
||||
file := &lockableFile{file: &mockFile{}}
|
||||
tc.files[fileNum] = file
|
||||
return file, nil
|
||||
}
|
||||
store.deleteFileFunc = func(fileNum uint32) error {
|
||||
if file, ok := tc.files[fileNum]; ok {
|
||||
file.Lock()
|
||||
file.file.Close()
|
||||
file.Unlock()
|
||||
delete(tc.files, fileNum)
|
||||
return nil
|
||||
}
|
||||
|
||||
str := fmt.Sprintf("file %d does not exist", fileNum)
|
||||
return makeDbErr(database.ErrDriverSpecific, str, nil)
|
||||
}
|
||||
|
||||
// Load the test blocks and save in the test context for use throughout
|
||||
// the tests.
|
||||
blocks, err := loadBlocks(t, blockDataFile, blockDataNet)
|
||||
if err != nil {
|
||||
t.Errorf("loadBlocks: Unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
tc.blocks = blocks
|
||||
|
||||
// Test various failures paths when writing to the block files.
|
||||
if !testWriteFailures(tc) {
|
||||
return
|
||||
}
|
||||
|
||||
// Test various file-related issues such as closed and missing files.
|
||||
if !testBlockFileErrors(tc) {
|
||||
return
|
||||
}
|
||||
|
||||
// Test various corruption scenarios.
|
||||
testCorruption(tc)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user