mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-06-08 07:06:43 +00:00
[DEV-81] 100% Database Coverage (#110)
* [DEV-81] Overwrite maxOpenFiles for testInterface to force tests to check the LRU-mechanism in openFile * [DEV-81] Added database.UseLogger test * [DEV-81] Completed coverage of reconcileDB() * [DEV-81] Added some tests for dbcache * [DEV-81] Moved init and UseLogger to separate file to make them more easily-testable + added tests * [DEV-81] Added tests for deleteFile * [DEV-81] Added tests to cursor.Delete + made sure it returns error when transaction is not writable * [DEV-81] Moved database/error_test.go from database_test package to database package + added test for IsErrorCode * [DEV-81] Added tests for handleRollback error-cases * [DEV-81] Added tests for cursor.skipPendingUpdates * [DEV-81] Added tests for various cursor edge-cases * [DEV-81] tx.putKey no longer returns error, because there is no case when it does * [DEV-81] Added tests to CreateBucket error cases * [DEV-81] Added tests to bucket.Get and .Delete error cases + .Delete now returns error on empty key * [DEV-81] Added test for ForEachBucket * [DEV-81] Added tests to StoreBlock * [DEV-81] Added test for deleting a double nested bucket * [DEV-81] Removed log_test, as it is no longer necessary with the logging system re-design * [DEV-81] Added test to some of writePendingAndCommit error-cases * [DEV-81] Update references from btcutil to btcd/util * [DEV-81] Add tests for dbCacheIterator{.Next(), .Prev(), .Key, .Value()} in cases when iterator is exhausted * [DEV-81] Added tests for ldbIterator placeholder functions * [DEV-81] Added test name to Error messsages in TestSkipPendingUpdates * [DEV-81] Begin writing TestSkipPendingUpdatesCache * [DEV-81] Added error-cases for DBCache.flush() and DBCache.commitTreaps() * [DEV-81] Use monkey.patch from bou.ke and not from github * [DEV-81] Rewrote IsErrorCode in both database and txscript packages to be more concise * [DEV-81] Rename any database.Tx to dbTx instead of tx - to remove confusion with coin Tx * [DEV-81] Fix typo * [DEV-81] Use os.TempDir() instead of /tmp/ to be cross-platform * [DEV-81] use SimNet for database tests + Error if testDB exists after deleting it * [DEV-81] Removed useLogger - it's redundant * [DEV-81] Added comment on how CRC32 checksums are calculated in reconcile_test.go * [DEV-81] Added comment that explains what setWriteRow does * [DEV-81] Use constant instead of hard-coded value * [DEV-81] Fixed some typo's + better formatting
This commit is contained in:
parent
60afa37acf
commit
225f349e6a
@ -777,7 +777,7 @@ func (p *provisionalNode) pastUTXO(virtual *virtualBlock, db database.DB) (pastU
|
|||||||
// Fetch from the database all the transactions for this block's blue set (besides the selected parent)
|
// Fetch from the database all the transactions for this block's blue set (besides the selected parent)
|
||||||
var blueBlockTransactions []*TxWithBlockHash
|
var blueBlockTransactions []*TxWithBlockHash
|
||||||
transactionCount := 0
|
transactionCount := 0
|
||||||
err = db.View(func(tx database.Tx) error {
|
err = db.View(func(dbTx database.Tx) error {
|
||||||
// Precalculate the amount of transactions in this block's blue set, besides the selected parent.
|
// Precalculate the amount of transactions in this block's blue set, besides the selected parent.
|
||||||
// This is to avoid an attack in which an attacker fabricates a block that will deliberately cause
|
// This is to avoid an attack in which an attacker fabricates a block that will deliberately cause
|
||||||
// a lot of copying, causing a high cost to the whole network.
|
// a lot of copying, causing a high cost to the whole network.
|
||||||
@ -788,7 +788,7 @@ func (p *provisionalNode) pastUTXO(virtual *virtualBlock, db database.DB) (pastU
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
blueBlock, err := dbFetchBlockByNode(tx, blueBlockNode)
|
blueBlock, err := dbFetchBlockByNode(dbTx, blueBlockNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -43,10 +43,10 @@ func (cmd *fetchBlockCmd) Execute(args []string) error {
|
|||||||
}
|
}
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
return db.View(func(tx database.Tx) error {
|
return db.View(func(dbTx database.Tx) error {
|
||||||
log.Infof("Fetching block %s", blockHash)
|
log.Infof("Fetching block %s", blockHash)
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
blockBytes, err := tx.FetchBlock(blockHash)
|
blockBytes, err := dbTx.FetchBlock(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ func (cmd *blockRegionCmd) Execute(args []string) error {
|
|||||||
}
|
}
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
return db.View(func(tx database.Tx) error {
|
return db.View(func(dbTx database.Tx) error {
|
||||||
log.Infof("Fetching block region %s<%d:%d>", blockHash,
|
log.Infof("Fetching block region %s<%d:%d>", blockHash,
|
||||||
startOffset, startOffset+regionLen-1)
|
startOffset, startOffset+regionLen-1)
|
||||||
region := database.BlockRegion{
|
region := database.BlockRegion{
|
||||||
@ -73,7 +73,7 @@ func (cmd *blockRegionCmd) Execute(args []string) error {
|
|||||||
Len: uint32(regionLen),
|
Len: uint32(regionLen),
|
||||||
}
|
}
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
regionBytes, err := tx.FetchBlockRegion(®ion)
|
regionBytes, err := dbTx.FetchBlockRegion(®ion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -119,8 +119,8 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
|
|||||||
|
|
||||||
// Skip blocks that already exist.
|
// Skip blocks that already exist.
|
||||||
var exists bool
|
var exists bool
|
||||||
err = bi.db.View(func(tx database.Tx) error {
|
err = bi.db.View(func(dbTx database.Tx) error {
|
||||||
exists, err = tx.HasBlock(block.Hash())
|
exists, err = dbTx.HasBlock(block.Hash())
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -134,8 +134,8 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
|
|||||||
parentHashes := block.MsgBlock().Header.ParentHashes
|
parentHashes := block.MsgBlock().Header.ParentHashes
|
||||||
for _, parentHash := range parentHashes {
|
for _, parentHash := range parentHashes {
|
||||||
var exists bool
|
var exists bool
|
||||||
err := bi.db.View(func(tx database.Tx) error {
|
err := bi.db.View(func(dbTx database.Tx) error {
|
||||||
exists, err = tx.HasBlock(&parentHash)
|
exists, err = dbTx.HasBlock(&parentHash)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -149,8 +149,8 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Put the blocks into the database with no checking of chain rules.
|
// Put the blocks into the database with no checking of chain rules.
|
||||||
err = bi.db.Update(func(tx database.Tx) error {
|
err = bi.db.Update(func(dbTx database.Tx) error {
|
||||||
return tx.StoreBlock(block)
|
return dbTx.StoreBlock(block)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -41,9 +41,9 @@ func (cmd *headersCmd) Execute(args []string) error {
|
|||||||
// the database would keep a metadata index of its own.
|
// the database would keep a metadata index of its own.
|
||||||
blockIdxName := []byte("ffldb-blockidx")
|
blockIdxName := []byte("ffldb-blockidx")
|
||||||
if !headersCfg.Bulk {
|
if !headersCfg.Bulk {
|
||||||
err = db.View(func(tx database.Tx) error {
|
err = db.View(func(dbTx database.Tx) error {
|
||||||
totalHdrs := 0
|
totalHdrs := 0
|
||||||
blockIdxBucket := tx.Metadata().Bucket(blockIdxName)
|
blockIdxBucket := dbTx.Metadata().Bucket(blockIdxName)
|
||||||
blockIdxBucket.ForEach(func(k, v []byte) error {
|
blockIdxBucket.ForEach(func(k, v []byte) error {
|
||||||
totalHdrs++
|
totalHdrs++
|
||||||
return nil
|
return nil
|
||||||
@ -54,7 +54,7 @@ func (cmd *headersCmd) Execute(args []string) error {
|
|||||||
blockIdxBucket.ForEach(func(k, v []byte) error {
|
blockIdxBucket.ForEach(func(k, v []byte) error {
|
||||||
var hash daghash.Hash
|
var hash daghash.Hash
|
||||||
copy(hash[:], k)
|
copy(hash[:], k)
|
||||||
_, err := tx.FetchBlockHeader(&hash)
|
_, err := dbTx.FetchBlockHeader(&hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -69,8 +69,8 @@ func (cmd *headersCmd) Execute(args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Bulk load headers.
|
// Bulk load headers.
|
||||||
err = db.View(func(tx database.Tx) error {
|
err = db.View(func(dbTx database.Tx) error {
|
||||||
blockIdxBucket := tx.Metadata().Bucket(blockIdxName)
|
blockIdxBucket := dbTx.Metadata().Bucket(blockIdxName)
|
||||||
hashes := make([]daghash.Hash, 0, 500000)
|
hashes := make([]daghash.Hash, 0, 500000)
|
||||||
blockIdxBucket.ForEach(func(k, v []byte) error {
|
blockIdxBucket.ForEach(func(k, v []byte) error {
|
||||||
var hash daghash.Hash
|
var hash daghash.Hash
|
||||||
@ -81,7 +81,7 @@ func (cmd *headersCmd) Execute(args []string) error {
|
|||||||
|
|
||||||
log.Infof("Loading headers for %d blocks...", len(hashes))
|
log.Infof("Loading headers for %d blocks...", len(hashes))
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
hdrs, err := tx.FetchBlockHeaders(hashes)
|
hdrs, err := dbTx.FetchBlockHeaders(hashes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -195,3 +195,13 @@ func (e Error) Error() string {
|
|||||||
func makeError(c ErrorCode, desc string, err error) Error {
|
func makeError(c ErrorCode, desc string, err error) Error {
|
||||||
return Error{ErrorCode: c, Description: desc, Err: err}
|
return Error{ErrorCode: c, Description: desc, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsErrorCode returns whether or not the provided error is a script error with
|
||||||
|
// the provided error code.
|
||||||
|
func IsErrorCode(err error, c ErrorCode) bool {
|
||||||
|
if err, ok := err.(Error); ok {
|
||||||
|
return err.ErrorCode == c
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
@ -2,48 +2,46 @@
|
|||||||
// Use of this source code is governed by an ISC
|
// Use of this source code is governed by an ISC
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package database_test
|
package database
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/daglabs/btcd/database"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestErrorCodeStringer tests the stringized output for the ErrorCode type.
|
// TestErrorCodeStringer tests the stringized output for the ErrorCode type.
|
||||||
func TestErrorCodeStringer(t *testing.T) {
|
func TestErrorCodeStringer(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
in database.ErrorCode
|
in ErrorCode
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{database.ErrDbTypeRegistered, "ErrDbTypeRegistered"},
|
{ErrDbTypeRegistered, "ErrDbTypeRegistered"},
|
||||||
{database.ErrDbUnknownType, "ErrDbUnknownType"},
|
{ErrDbUnknownType, "ErrDbUnknownType"},
|
||||||
{database.ErrDbDoesNotExist, "ErrDbDoesNotExist"},
|
{ErrDbDoesNotExist, "ErrDbDoesNotExist"},
|
||||||
{database.ErrDbExists, "ErrDbExists"},
|
{ErrDbExists, "ErrDbExists"},
|
||||||
{database.ErrDbNotOpen, "ErrDbNotOpen"},
|
{ErrDbNotOpen, "ErrDbNotOpen"},
|
||||||
{database.ErrDbAlreadyOpen, "ErrDbAlreadyOpen"},
|
{ErrDbAlreadyOpen, "ErrDbAlreadyOpen"},
|
||||||
{database.ErrInvalid, "ErrInvalid"},
|
{ErrInvalid, "ErrInvalid"},
|
||||||
{database.ErrCorruption, "ErrCorruption"},
|
{ErrCorruption, "ErrCorruption"},
|
||||||
{database.ErrTxClosed, "ErrTxClosed"},
|
{ErrTxClosed, "ErrTxClosed"},
|
||||||
{database.ErrTxNotWritable, "ErrTxNotWritable"},
|
{ErrTxNotWritable, "ErrTxNotWritable"},
|
||||||
{database.ErrBucketNotFound, "ErrBucketNotFound"},
|
{ErrBucketNotFound, "ErrBucketNotFound"},
|
||||||
{database.ErrBucketExists, "ErrBucketExists"},
|
{ErrBucketExists, "ErrBucketExists"},
|
||||||
{database.ErrBucketNameRequired, "ErrBucketNameRequired"},
|
{ErrBucketNameRequired, "ErrBucketNameRequired"},
|
||||||
{database.ErrKeyRequired, "ErrKeyRequired"},
|
{ErrKeyRequired, "ErrKeyRequired"},
|
||||||
{database.ErrKeyTooLarge, "ErrKeyTooLarge"},
|
{ErrKeyTooLarge, "ErrKeyTooLarge"},
|
||||||
{database.ErrValueTooLarge, "ErrValueTooLarge"},
|
{ErrValueTooLarge, "ErrValueTooLarge"},
|
||||||
{database.ErrIncompatibleValue, "ErrIncompatibleValue"},
|
{ErrIncompatibleValue, "ErrIncompatibleValue"},
|
||||||
{database.ErrBlockNotFound, "ErrBlockNotFound"},
|
{ErrBlockNotFound, "ErrBlockNotFound"},
|
||||||
{database.ErrBlockExists, "ErrBlockExists"},
|
{ErrBlockExists, "ErrBlockExists"},
|
||||||
{database.ErrBlockRegionInvalid, "ErrBlockRegionInvalid"},
|
{ErrBlockRegionInvalid, "ErrBlockRegionInvalid"},
|
||||||
{database.ErrDriverSpecific, "ErrDriverSpecific"},
|
{ErrDriverSpecific, "ErrDriverSpecific"},
|
||||||
|
|
||||||
{0xffff, "Unknown ErrorCode (65535)"},
|
{0xffff, "Unknown ErrorCode (65535)"},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Detect additional error codes that don't have the stringer added.
|
// Detect additional error codes that don't have the stringer added.
|
||||||
if len(tests)-1 != int(database.TstNumErrorCodes) {
|
if len(tests)-1 != int(TstNumErrorCodes) {
|
||||||
t.Errorf("It appears an error code was added without adding " +
|
t.Errorf("It appears an error code was added without adding " +
|
||||||
"an associated stringer test")
|
"an associated stringer test")
|
||||||
}
|
}
|
||||||
@ -64,20 +62,20 @@ func TestError(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
in database.Error
|
in Error
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
database.Error{Description: "some error"},
|
Error{Description: "some error"},
|
||||||
"some error",
|
"some error",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
database.Error{Description: "human-readable error"},
|
Error{Description: "human-readable error"},
|
||||||
"human-readable error",
|
"human-readable error",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
database.Error{
|
Error{
|
||||||
ErrorCode: database.ErrDriverSpecific,
|
ErrorCode: ErrDriverSpecific,
|
||||||
Description: "some error",
|
Description: "some error",
|
||||||
Err: errors.New("driver-specific error"),
|
Err: errors.New("driver-specific error"),
|
||||||
},
|
},
|
||||||
@ -95,3 +93,26 @@ func TestError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIsErrorCode(t *testing.T) {
|
||||||
|
dummyError := errors.New("")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
err error
|
||||||
|
code ErrorCode
|
||||||
|
expectedResult bool
|
||||||
|
}{
|
||||||
|
{makeError(ErrBucketExists, "", dummyError), ErrBucketExists, true},
|
||||||
|
{makeError(ErrBucketExists, "", dummyError), ErrBlockExists, false},
|
||||||
|
{dummyError, ErrBlockExists, false},
|
||||||
|
{nil, ErrBlockExists, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
actualResult := IsErrorCode(test.err, test.code)
|
||||||
|
if test.expectedResult != actualResult {
|
||||||
|
t.Errorf("TestIsErrorCode: %d: Expected: %t, but got: %t",
|
||||||
|
i, test.expectedResult, actualResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -68,25 +68,25 @@ func Example_basicUsage() {
|
|||||||
// Use the Update function of the database to perform a managed
|
// Use the Update function of the database to perform a managed
|
||||||
// read-write transaction. The transaction will automatically be rolled
|
// read-write transaction. The transaction will automatically be rolled
|
||||||
// back if the supplied inner function returns a non-nil error.
|
// back if the supplied inner function returns a non-nil error.
|
||||||
err = db.Update(func(tx database.Tx) error {
|
err = db.Update(func(dbTx database.Tx) error {
|
||||||
// Store a key/value pair directly in the metadata bucket.
|
// Store a key/value pair directly in the metadata bucket.
|
||||||
// Typically a nested bucket would be used for a given feature,
|
// Typically a nested bucket would be used for a given feature,
|
||||||
// but this example is using the metadata bucket directly for
|
// but this example is using the metadata bucket directly for
|
||||||
// simplicity.
|
// simplicity.
|
||||||
key := []byte("mykey")
|
key := []byte("mykey")
|
||||||
value := []byte("myvalue")
|
value := []byte("myvalue")
|
||||||
if err := tx.Metadata().Put(key, value); err != nil {
|
if err := dbTx.Metadata().Put(key, value); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the key back and ensure it matches.
|
// Read the key back and ensure it matches.
|
||||||
if !bytes.Equal(tx.Metadata().Get(key), value) {
|
if !bytes.Equal(dbTx.Metadata().Get(key), value) {
|
||||||
return fmt.Errorf("unexpected value for key '%s'", key)
|
return fmt.Errorf("unexpected value for key '%s'", key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new nested bucket under the metadata bucket.
|
// Create a new nested bucket under the metadata bucket.
|
||||||
nestedBucketKey := []byte("mybucket")
|
nestedBucketKey := []byte("mybucket")
|
||||||
nestedBucket, err := tx.Metadata().CreateBucket(nestedBucketKey)
|
nestedBucket, err := dbTx.Metadata().CreateBucket(nestedBucketKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -134,9 +134,9 @@ func Example_blockStorageAndRetrieval() {
|
|||||||
// Use the Update function of the database to perform a managed
|
// Use the Update function of the database to perform a managed
|
||||||
// read-write transaction and store a genesis block in the database as
|
// read-write transaction and store a genesis block in the database as
|
||||||
// and example.
|
// and example.
|
||||||
err = db.Update(func(tx database.Tx) error {
|
err = db.Update(func(dbTx database.Tx) error {
|
||||||
genesisBlock := dagconfig.MainNetParams.GenesisBlock
|
genesisBlock := dagconfig.MainNetParams.GenesisBlock
|
||||||
return tx.StoreBlock(util.NewBlock(genesisBlock))
|
return dbTx.StoreBlock(util.NewBlock(genesisBlock))
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
@ -146,9 +146,9 @@ func Example_blockStorageAndRetrieval() {
|
|||||||
// Use the View function of the database to perform a managed read-only
|
// Use the View function of the database to perform a managed read-only
|
||||||
// transaction and fetch the block stored above.
|
// transaction and fetch the block stored above.
|
||||||
var loadedBlockBytes []byte
|
var loadedBlockBytes []byte
|
||||||
err = db.Update(func(tx database.Tx) error {
|
err = db.Update(func(dbTx database.Tx) error {
|
||||||
genesisHash := dagconfig.MainNetParams.GenesisHash
|
genesisHash := dagconfig.MainNetParams.GenesisHash
|
||||||
blockBytes, err := tx.FetchBlock(genesisHash)
|
blockBytes, err := dbTx.FetchBlock(genesisHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -27,9 +27,9 @@ func BenchmarkBlockHeader(b *testing.B) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(dbPath)
|
defer os.RemoveAll(dbPath)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
err = db.Update(func(tx database.Tx) error {
|
err = db.Update(func(dbTx database.Tx) error {
|
||||||
block := util.NewBlock(dagconfig.MainNetParams.GenesisBlock)
|
block := util.NewBlock(dagconfig.MainNetParams.GenesisBlock)
|
||||||
return tx.StoreBlock(block)
|
return dbTx.StoreBlock(block)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
@ -37,10 +37,10 @@ func BenchmarkBlockHeader(b *testing.B) {
|
|||||||
|
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
err = db.View(func(tx database.Tx) error {
|
err = db.View(func(dbTx database.Tx) error {
|
||||||
blockHash := dagconfig.MainNetParams.GenesisHash
|
blockHash := dagconfig.MainNetParams.GenesisHash
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
_, err := tx.FetchBlockHeader(blockHash)
|
_, err := dbTx.FetchBlockHeader(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -68,9 +68,9 @@ func BenchmarkBlock(b *testing.B) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(dbPath)
|
defer os.RemoveAll(dbPath)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
err = db.Update(func(tx database.Tx) error {
|
err = db.Update(func(dbTx database.Tx) error {
|
||||||
block := util.NewBlock(dagconfig.MainNetParams.GenesisBlock)
|
block := util.NewBlock(dagconfig.MainNetParams.GenesisBlock)
|
||||||
return tx.StoreBlock(block)
|
return dbTx.StoreBlock(block)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
@ -78,10 +78,10 @@ func BenchmarkBlock(b *testing.B) {
|
|||||||
|
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
err = db.View(func(tx database.Tx) error {
|
err = db.View(func(dbTx database.Tx) error {
|
||||||
blockHash := dagconfig.MainNetParams.GenesisHash
|
blockHash := dagconfig.MainNetParams.GenesisHash
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
_, err := tx.FetchBlock(blockHash)
|
_, err := dbTx.FetchBlock(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -116,6 +116,12 @@ type blockStore struct {
|
|||||||
// override the value.
|
// override the value.
|
||||||
maxBlockFileSize uint32
|
maxBlockFileSize uint32
|
||||||
|
|
||||||
|
// maxOpenFiles is the max number of open files to maintain in the
|
||||||
|
// open blocks cache. Note that this does not include the current
|
||||||
|
// write file, so there will typically be one more than this value open.
|
||||||
|
// It is defined on the store so the whitebox tests can override the value.
|
||||||
|
maxOpenFiles int
|
||||||
|
|
||||||
// The following fields are related to the flat files which hold the
|
// The following fields are related to the flat files which hold the
|
||||||
// actual blocks. The number of open files is limited by maxOpenFiles.
|
// actual blocks. The number of open files is limited by maxOpenFiles.
|
||||||
//
|
//
|
||||||
@ -272,7 +278,7 @@ func (s *blockStore) openFile(fileNum uint32) (*lockableFile, error) {
|
|||||||
// therefore should be closed last.
|
// therefore should be closed last.
|
||||||
s.lruMutex.Lock()
|
s.lruMutex.Lock()
|
||||||
lruList := s.openBlocksLRU
|
lruList := s.openBlocksLRU
|
||||||
if lruList.Len() >= maxOpenFiles {
|
if lruList.Len() >= s.maxOpenFiles {
|
||||||
lruFileNum := lruList.Remove(lruList.Back()).(uint32)
|
lruFileNum := lruList.Remove(lruList.Back()).(uint32)
|
||||||
oldBlockFile := s.openBlockFiles[lruFileNum]
|
oldBlockFile := s.openBlockFiles[lruFileNum]
|
||||||
|
|
||||||
@ -752,6 +758,7 @@ func newBlockStore(basePath string, network wire.BitcoinNet) *blockStore {
|
|||||||
network: network,
|
network: network,
|
||||||
basePath: basePath,
|
basePath: basePath,
|
||||||
maxBlockFileSize: maxBlockFileSize,
|
maxBlockFileSize: maxBlockFileSize,
|
||||||
|
maxOpenFiles: maxOpenFiles,
|
||||||
openBlockFiles: make(map[uint32]*lockableFile),
|
openBlockFiles: make(map[uint32]*lockableFile),
|
||||||
openBlocksLRU: list.New(),
|
openBlocksLRU: list.New(),
|
||||||
fileNumToLRUElem: make(map[uint32]*list.Element),
|
fileNumToLRUElem: make(map[uint32]*list.Element),
|
||||||
|
116
database/ffldb/blockio_test.go
Normal file
116
database/ffldb/blockio_test.go
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
package ffldb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"bou.ke/monkey"
|
||||||
|
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||||
|
"github.com/daglabs/btcd/database"
|
||||||
|
"github.com/daglabs/btcd/util"
|
||||||
|
"github.com/daglabs/btcd/wire"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDeleteFile(t *testing.T) {
|
||||||
|
testBlock := util.NewBlock(wire.NewMsgBlock(
|
||||||
|
wire.NewBlockHeader(1, []daghash.Hash{}, &daghash.Hash{}, 0, 0)))
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
fileNum uint32
|
||||||
|
expectedErr bool
|
||||||
|
}{
|
||||||
|
{0, false},
|
||||||
|
{1, true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
func() {
|
||||||
|
pdb := newTestDb("TestDeleteFile", t)
|
||||||
|
defer pdb.Close()
|
||||||
|
|
||||||
|
err := pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
dbTx.StoreBlock(testBlock)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestDeleteFile: Error storing block: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = pdb.store.deleteFile(test.fileNum)
|
||||||
|
if (err != nil) != test.expectedErr {
|
||||||
|
t.Errorf("TestDeleteFile: %d: Expected error status: %t, but got: %t",
|
||||||
|
test.fileNum, test.expectedErr, (err != nil))
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
filePath := blockFilePath(pdb.store.basePath, test.fileNum)
|
||||||
|
if _, err := os.Stat(filePath); !os.IsNotExist(err) {
|
||||||
|
t.Errorf("TestDeleteFile: %d: File %s still exists", test.fileNum, filePath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHandleRollbackErrors tests all error-cases in *blockStore.handleRollback().
|
||||||
|
// The non-error-cases are tested in the more general tests.
|
||||||
|
// Since handleRollback just logs errors, this test simply causes all error-cases to be hit,
|
||||||
|
// and makes sure no panic occurs, as well as ensures the writeCursor was updated correctly.
|
||||||
|
func TestHandleRollbackErrors(t *testing.T) {
|
||||||
|
testBlock := util.NewBlock(wire.NewMsgBlock(
|
||||||
|
wire.NewBlockHeader(1, []daghash.Hash{}, &daghash.Hash{}, 0, 0)))
|
||||||
|
|
||||||
|
testBlockSize := uint32(testBlock.MsgBlock().SerializeSize())
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
fileNum uint32
|
||||||
|
offset uint32
|
||||||
|
target interface{}
|
||||||
|
replacement interface{}
|
||||||
|
}{
|
||||||
|
// offset should be size of block + 12 bytes for block network, size and checksum
|
||||||
|
{"Nothing to rollback", 1, testBlockSize + 12, nil, nil},
|
||||||
|
{"deleteFile fails", 0, 0, (*blockStore).deleteFile,
|
||||||
|
func(*blockStore, uint32) error { return errors.New("error in blockstore.deleteFile") }},
|
||||||
|
{"openWriteFile fails", 0, 0, (*blockStore).openWriteFile,
|
||||||
|
func(*blockStore, uint32) (filer, error) { return nil, errors.New("error in blockstore.openWriteFile") }},
|
||||||
|
{"file.Truncate fails", 0, 0, (*os.File).Truncate,
|
||||||
|
func(*os.File, int64) error { return errors.New("error in file.Truncate") }},
|
||||||
|
{"file.Sync fails", 0, 0, (*os.File).Sync,
|
||||||
|
func(*os.File) error { return errors.New("error in file.Sync") }},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
func() {
|
||||||
|
pdb := newTestDb("TestHandleRollbackErrors", t)
|
||||||
|
defer pdb.Close()
|
||||||
|
|
||||||
|
// Set maxBlockFileSize to testBlockSize so that writeCursor.curFileNum increments
|
||||||
|
pdb.store.maxBlockFileSize = testBlockSize
|
||||||
|
|
||||||
|
err := pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
return dbTx.StoreBlock(testBlock)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestHandleRollbackErrors: %s: Error adding test block to database: %s", test.name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if test.target != nil && test.replacement != nil {
|
||||||
|
patch := monkey.Patch(test.target, test.replacement)
|
||||||
|
defer patch.Unpatch()
|
||||||
|
}
|
||||||
|
|
||||||
|
pdb.store.handleRollback(test.fileNum, test.offset)
|
||||||
|
|
||||||
|
if pdb.store.writeCursor.curFileNum != test.fileNum {
|
||||||
|
t.Errorf("TestHandleRollbackErrors: %s: Expected fileNum: %d, but got: %d",
|
||||||
|
test.name, test.fileNum, pdb.store.writeCursor.curFileNum)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pdb.store.writeCursor.curOffset != test.offset {
|
||||||
|
t.Errorf("TestHandleRollbackErrors: %s: offset fileNum: %d, but got: %d",
|
||||||
|
test.name, test.offset, pdb.store.writeCursor.curOffset)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
43
database/ffldb/common_test.go
Normal file
43
database/ffldb/common_test.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package ffldb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/btcsuite/goleveldb/leveldb"
|
||||||
|
"github.com/btcsuite/goleveldb/leveldb/filter"
|
||||||
|
"github.com/btcsuite/goleveldb/leveldb/opt"
|
||||||
|
"github.com/daglabs/btcd/wire"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newTestDb(testName string, t *testing.T) *db {
|
||||||
|
dbPath := path.Join(os.TempDir(), "db_test")
|
||||||
|
err := os.RemoveAll(dbPath)
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
t.Fatalf("%s: Error deleting database folder before starting: %s", testName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
network := wire.SimNet
|
||||||
|
|
||||||
|
opts := opt.Options{
|
||||||
|
ErrorIfExist: true,
|
||||||
|
Strict: opt.DefaultStrict,
|
||||||
|
Compression: opt.NoCompression,
|
||||||
|
Filter: filter.NewBloomFilter(10),
|
||||||
|
}
|
||||||
|
metadataDbPath := filepath.Join(dbPath, metadataDbName)
|
||||||
|
ldb, err := leveldb.OpenFile(metadataDbPath, &opts)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: Error opening metadataDbPath: %s", testName, err)
|
||||||
|
}
|
||||||
|
err = initDB(ldb)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: Error initializing metadata Db: %s", testName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
store := newBlockStore(dbPath, network)
|
||||||
|
cache := newDbCache(ldb, store, defaultCacheSize, defaultFlushSecs)
|
||||||
|
return &db{store: store, cache: cache}
|
||||||
|
}
|
@ -203,6 +203,12 @@ func (c *cursor) Delete() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure the transaction is writable.
|
||||||
|
if !c.bucket.tx.writable {
|
||||||
|
str := "delete requires a writable database transaction"
|
||||||
|
return makeDbErr(database.ErrTxNotWritable, str, nil)
|
||||||
|
}
|
||||||
|
|
||||||
// Error if the cursor is exhausted.
|
// Error if the cursor is exhausted.
|
||||||
if c.currentIter == nil {
|
if c.currentIter == nil {
|
||||||
str := "cursor is exhausted"
|
str := "cursor is exhausted"
|
||||||
@ -652,10 +658,7 @@ func (b *bucket) CreateBucket(key []byte) (database.Bucket, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add the new bucket to the bucket index.
|
// Add the new bucket to the bucket index.
|
||||||
if err := b.tx.putKey(bidxKey, childID[:]); err != nil {
|
b.tx.putKey(bidxKey, childID[:])
|
||||||
str := fmt.Sprintf("failed to create bucket with key %q", key)
|
|
||||||
return nil, convertErr(str, err)
|
|
||||||
}
|
|
||||||
return &bucket{tx: b.tx, id: childID}, nil
|
return &bucket{tx: b.tx, id: childID}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -881,7 +884,9 @@ func (b *bucket) Put(key, value []byte) error {
|
|||||||
return makeDbErr(database.ErrKeyRequired, str, nil)
|
return makeDbErr(database.ErrKeyRequired, str, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.tx.putKey(bucketizedKey(b.id, key), value)
|
b.tx.putKey(bucketizedKey(b.id, key), value)
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the value for the given key. Returns nil if the key does not
|
// Get returns the value for the given key. Returns nil if the key does not
|
||||||
@ -931,7 +936,8 @@ func (b *bucket) Delete(key []byte) error {
|
|||||||
|
|
||||||
// Nothing to do if there is no key.
|
// Nothing to do if there is no key.
|
||||||
if len(key) == 0 {
|
if len(key) == 0 {
|
||||||
return nil
|
str := "delete requires a key"
|
||||||
|
return makeDbErr(database.ErrKeyRequired, str, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.tx.deleteKey(bucketizedKey(b.id, key), true)
|
b.tx.deleteKey(bucketizedKey(b.id, key), true)
|
||||||
@ -1044,7 +1050,7 @@ func (tx *transaction) hasKey(key []byte) bool {
|
|||||||
//
|
//
|
||||||
// NOTE: This function must only be called on a writable transaction. Since it
|
// NOTE: This function must only be called on a writable transaction. Since it
|
||||||
// is an internal helper function, it does not check.
|
// is an internal helper function, it does not check.
|
||||||
func (tx *transaction) putKey(key, value []byte) error {
|
func (tx *transaction) putKey(key, value []byte) {
|
||||||
// Prevent the key from being deleted if it was previously scheduled
|
// Prevent the key from being deleted if it was previously scheduled
|
||||||
// to be deleted on transaction commit.
|
// to be deleted on transaction commit.
|
||||||
tx.pendingRemove.Delete(key)
|
tx.pendingRemove.Delete(key)
|
||||||
@ -1053,7 +1059,6 @@ func (tx *transaction) putKey(key, value []byte) error {
|
|||||||
// commit.
|
// commit.
|
||||||
tx.pendingKeys.Put(key, value)
|
tx.pendingKeys.Put(key, value)
|
||||||
tx.notifyActiveIters()
|
tx.notifyActiveIters()
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchKey attempts to fetch the provided key from the database cache (and
|
// fetchKey attempts to fetch the provided key from the database cache (and
|
||||||
@ -1107,9 +1112,9 @@ func (tx *transaction) nextBucketID() ([4]byte, error) {
|
|||||||
// Increment and update the current bucket ID and return it.
|
// Increment and update the current bucket ID and return it.
|
||||||
var nextBucketID [4]byte
|
var nextBucketID [4]byte
|
||||||
binary.BigEndian.PutUint32(nextBucketID[:], curBucketNum+1)
|
binary.BigEndian.PutUint32(nextBucketID[:], curBucketNum+1)
|
||||||
if err := tx.putKey(curBucketIDKeyName, nextBucketID[:]); err != nil {
|
|
||||||
return [4]byte{}, err
|
tx.putKey(curBucketIDKeyName, nextBucketID[:])
|
||||||
}
|
|
||||||
return nextBucketID, nil
|
return nextBucketID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
727
database/ffldb/db_test.go
Normal file
727
database/ffldb/db_test.go
Normal file
@ -0,0 +1,727 @@
|
|||||||
|
package ffldb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"bou.ke/monkey"
|
||||||
|
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||||
|
"github.com/daglabs/btcd/database"
|
||||||
|
"github.com/daglabs/btcd/util"
|
||||||
|
"github.com/daglabs/btcd/wire"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestCursorDeleteErrors tests all error-cases in *cursor.Delete().
|
||||||
|
// The non-error-cases are tested in the more general tests.
|
||||||
|
func TestCursorDeleteErrors(t *testing.T) {
|
||||||
|
pdb := newTestDb("TestCursorDeleteErrors", t)
|
||||||
|
|
||||||
|
nestedBucket := []byte("nestedBucket")
|
||||||
|
key := []byte("key")
|
||||||
|
value := []byte("value")
|
||||||
|
|
||||||
|
err := pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
_, err := metadata.CreateBucket(nestedBucket)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
metadata.Put(key, value)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestCursorDeleteErrors: Error setting up test-database: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for error when attempted to delete a bucket
|
||||||
|
err = pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
cursor := dbTx.Metadata().Cursor()
|
||||||
|
found := false
|
||||||
|
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||||
|
if bytes.Equal(cursor.Key(), nestedBucket) {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
t.Errorf("TestCursorDeleteErrors: Key '%s' not found", string(nestedBucket))
|
||||||
|
}
|
||||||
|
|
||||||
|
err := cursor.Delete()
|
||||||
|
if !database.IsErrorCode(err, database.ErrIncompatibleValue) {
|
||||||
|
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrIncompatibleValue, "+
|
||||||
|
"when deleting bucket, but got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestCursorDeleteErrors: Unexpected error from pdb.Update "+
|
||||||
|
"when attempting to delete bucket: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for error when transaction is not writable
|
||||||
|
err = pdb.View(func(dbTx database.Tx) error {
|
||||||
|
cursor := dbTx.Metadata().Cursor()
|
||||||
|
if !cursor.First() {
|
||||||
|
t.Fatal("TestCursorDeleteErrors: Nothing in cursor when testing for delete in " +
|
||||||
|
"non-writable transaction")
|
||||||
|
}
|
||||||
|
|
||||||
|
err := cursor.Delete()
|
||||||
|
if !database.IsErrorCode(err, database.ErrTxNotWritable) {
|
||||||
|
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrTxNotWritable "+
|
||||||
|
"when calling .Delete() on non-writable transaction, but got '%v' instead", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestCursorDeleteErrors: Unexpected error from pdb.Update "+
|
||||||
|
"when attempting to delete on non-writable transaction: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for error when cursor was exhausted
|
||||||
|
err = pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
cursor := dbTx.Metadata().Cursor()
|
||||||
|
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||||
|
}
|
||||||
|
|
||||||
|
err := cursor.Delete()
|
||||||
|
if !database.IsErrorCode(err, database.ErrIncompatibleValue) {
|
||||||
|
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrIncompatibleValue "+
|
||||||
|
"when calling .Delete() on exhausted cursor, but got '%v' instead", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestCursorDeleteErrors: Unexpected error from pdb.Update "+
|
||||||
|
"when attempting to delete on exhausted cursor: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for error when transaction is closed
|
||||||
|
tx, err := pdb.Begin(true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestCursorDeleteErrors: Error in pdb.Begin(): %s", err)
|
||||||
|
}
|
||||||
|
cursor := tx.Metadata().Cursor()
|
||||||
|
err = tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestCursorDeleteErrors: Error in tx.Commit(): %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = cursor.Delete()
|
||||||
|
if !database.IsErrorCode(err, database.ErrTxClosed) {
|
||||||
|
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrTxClosed "+
|
||||||
|
"when calling .Delete() on with closed transaction, but got '%s' instead", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSkipPendingUpdates(t *testing.T) {
|
||||||
|
pdb := newTestDb("TestSkipPendingUpdates", t)
|
||||||
|
defer pdb.Close()
|
||||||
|
|
||||||
|
value := []byte("value")
|
||||||
|
// Add numbered prefixes to keys so that they are in expected order, and before any other keys
|
||||||
|
firstKey := []byte("1 - first")
|
||||||
|
toDeleteKey := []byte("2 - toDelete")
|
||||||
|
toUpdateKey := []byte("3 - toUpdate")
|
||||||
|
secondKey := []byte("4 - second")
|
||||||
|
|
||||||
|
// create initial metadata for test
|
||||||
|
err := pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
if err := metadata.Put(firstKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := metadata.Put(toDeleteKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := metadata.Put(toUpdateKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := metadata.Put(secondKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestSkipPendingUpdates: Error adding to metadata: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// test skips
|
||||||
|
err = pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
if err := metadata.Delete(toDeleteKey); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := metadata.Put(toUpdateKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cursor := metadata.Cursor().(*cursor)
|
||||||
|
dbIter := cursor.dbIter
|
||||||
|
|
||||||
|
// Check that first is ok
|
||||||
|
dbIter.First()
|
||||||
|
expectedKey := bucketizedKey(metadataBucketID, firstKey)
|
||||||
|
if !bytes.Equal(dbIter.Key(), expectedKey) {
|
||||||
|
t.Errorf("TestSkipPendingUpdates: 1: key expected to be %v but is %v", expectedKey, dbIter.Key())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go to the next key, which is toDelete
|
||||||
|
dbIter.Next()
|
||||||
|
expectedKey = bucketizedKey(metadataBucketID, toDeleteKey)
|
||||||
|
if !bytes.Equal(dbIter.Key(), expectedKey) {
|
||||||
|
t.Errorf("TestSkipPendingUpdates: 2: key expected to be %s but is %s", expectedKey, dbIter.Key())
|
||||||
|
}
|
||||||
|
|
||||||
|
// at this point toDeleteKey and toUpdateKey should be skipped
|
||||||
|
cursor.skipPendingUpdates(true)
|
||||||
|
expectedKey = bucketizedKey(metadataBucketID, secondKey)
|
||||||
|
if !bytes.Equal(dbIter.Key(), expectedKey) {
|
||||||
|
t.Errorf("TestSkipPendingUpdates: 3: key expected to be %s but is %s", expectedKey, dbIter.Key())
|
||||||
|
}
|
||||||
|
|
||||||
|
// now traverse backwards - should get toUpdate
|
||||||
|
dbIter.Prev()
|
||||||
|
expectedKey = bucketizedKey(metadataBucketID, toUpdateKey)
|
||||||
|
if !bytes.Equal(dbIter.Key(), expectedKey) {
|
||||||
|
t.Errorf("TestSkipPendingUpdates: 4: key expected to be %s but is %s", expectedKey, dbIter.Key())
|
||||||
|
}
|
||||||
|
|
||||||
|
// at this point toUpdateKey and toDeleteKey should be skipped
|
||||||
|
cursor.skipPendingUpdates(false)
|
||||||
|
expectedKey = bucketizedKey(metadataBucketID, firstKey)
|
||||||
|
if !bytes.Equal(dbIter.Key(), expectedKey) {
|
||||||
|
t.Errorf("TestSkipPendingUpdates: 5: key expected to be %s but is %s", expectedKey, dbIter.Key())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestSkipPendingUpdates: Error running main part of test: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCursor tests various edge-cases in cursor that were not hit by the more general tests
|
||||||
|
func TestCursor(t *testing.T) {
|
||||||
|
pdb := newTestDb("TestCursor", t)
|
||||||
|
defer pdb.Close()
|
||||||
|
|
||||||
|
value := []byte("value")
|
||||||
|
// Add numbered prefixes to keys so that they are in expected order, and before any other keys
|
||||||
|
firstKey := []byte("1 - first")
|
||||||
|
toDeleteKey := []byte("2 - toDelete")
|
||||||
|
toUpdateKey := []byte("3 - toUpdate")
|
||||||
|
secondKey := []byte("4 - second")
|
||||||
|
|
||||||
|
// create initial metadata for test
|
||||||
|
err := pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
if err := metadata.Put(firstKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := metadata.Put(toDeleteKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := metadata.Put(toUpdateKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := metadata.Put(secondKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error adding to metadata: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// run the actual tests
|
||||||
|
err = pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
if err := metadata.Delete(toDeleteKey); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := metadata.Put(toUpdateKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cursor := metadata.Cursor().(*cursor)
|
||||||
|
|
||||||
|
// Check prev when currentIter == nil
|
||||||
|
if ok := cursor.Prev(); ok {
|
||||||
|
t.Error("1: .Prev() should have returned false, but have returned true")
|
||||||
|
}
|
||||||
|
// Same thing for .Next()
|
||||||
|
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||||
|
}
|
||||||
|
if ok := cursor.Next(); ok {
|
||||||
|
t.Error("2: .Next() should have returned false, but have returned true")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that Key(), rawKey(), Value(), and rawValue() all return nil when currentIter == nil
|
||||||
|
if key := cursor.Key(); key != nil {
|
||||||
|
t.Errorf("3: .Key() should have returned nil, but have returned '%s' instead", key)
|
||||||
|
}
|
||||||
|
if key := cursor.rawKey(); key != nil {
|
||||||
|
t.Errorf("4: .rawKey() should have returned nil, but have returned '%s' instead", key)
|
||||||
|
}
|
||||||
|
if value := cursor.Value(); value != nil {
|
||||||
|
t.Errorf("5: .Value() should have returned nil, but have returned '%s' instead", value)
|
||||||
|
}
|
||||||
|
if value := cursor.rawValue(); value != nil {
|
||||||
|
t.Errorf("6: .rawValue() should have returned nil, but have returned '%s' instead", value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check rawValue in normal operation
|
||||||
|
cursor.First()
|
||||||
|
if rawValue := cursor.rawValue(); !bytes.Equal(rawValue, value) {
|
||||||
|
t.Errorf("7: rawValue should have returned '%s' but have returned '%s' instead", value, rawValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error running the actual tests: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCreateBucketErrors tests all error-cases in *bucket.CreateBucket().
|
||||||
|
// The non-error-cases are tested in the more general tests.
|
||||||
|
func TestCreateBucketErrors(t *testing.T) {
|
||||||
|
testKey := []byte("key")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
key []byte
|
||||||
|
target interface{}
|
||||||
|
replacement interface{}
|
||||||
|
isWritable bool
|
||||||
|
isClosed bool
|
||||||
|
expectedErr database.ErrorCode
|
||||||
|
}{
|
||||||
|
{"empty key", []byte{}, nil, nil, true, false, database.ErrBucketNameRequired},
|
||||||
|
{"transaction is closed", testKey, nil, nil, true, true, database.ErrTxClosed},
|
||||||
|
{"transaction is not writable", testKey, nil, nil, false, false, database.ErrTxNotWritable},
|
||||||
|
{"key already exists", blockIdxBucketName, nil, nil, true, false, database.ErrBucketExists},
|
||||||
|
{"nextBucketID error", testKey, (*transaction).nextBucketID,
|
||||||
|
func(*transaction) ([4]byte, error) {
|
||||||
|
return [4]byte{}, makeDbErr(database.ErrTxClosed, "error in newBucketID", nil)
|
||||||
|
},
|
||||||
|
true, false, database.ErrTxClosed},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
func() {
|
||||||
|
pdb := newTestDb("TestCreateBucketErrors", t)
|
||||||
|
defer pdb.Close()
|
||||||
|
|
||||||
|
if test.target != nil && test.replacement != nil {
|
||||||
|
patch := monkey.Patch(test.target, test.replacement)
|
||||||
|
defer patch.Unpatch()
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, err := pdb.Begin(test.isWritable)
|
||||||
|
defer tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestCreateBucketErrors: %s: error from pdb.Begin: %s", test.name, err)
|
||||||
|
}
|
||||||
|
if test.isClosed {
|
||||||
|
err = tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestCreateBucketErrors: %s: error from tx.Commit: %s", test.name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata := tx.Metadata()
|
||||||
|
|
||||||
|
_, err = metadata.CreateBucket(test.key)
|
||||||
|
|
||||||
|
if !database.IsErrorCode(err, test.expectedErr) {
|
||||||
|
t.Errorf("TestCreateBucketErrors: %s: Expected error of type %d "+
|
||||||
|
"but got '%v'", test.name, test.expectedErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestPutErrors tests all error-cases in *bucket.Put().
|
||||||
|
// The non-error-cases are tested in the more general tests.
|
||||||
|
func TestPutErrors(t *testing.T) {
|
||||||
|
testKey := []byte("key")
|
||||||
|
testValue := []byte("value")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
key []byte
|
||||||
|
isWritable bool
|
||||||
|
isClosed bool
|
||||||
|
expectedErr database.ErrorCode
|
||||||
|
}{
|
||||||
|
{"empty key", []byte{}, true, false, database.ErrKeyRequired},
|
||||||
|
{"transaction is closed", testKey, true, true, database.ErrTxClosed},
|
||||||
|
{"transaction is not writable", testKey, false, false, database.ErrTxNotWritable},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
func() {
|
||||||
|
pdb := newTestDb("TestPutErrors", t)
|
||||||
|
defer pdb.Close()
|
||||||
|
|
||||||
|
tx, err := pdb.Begin(test.isWritable)
|
||||||
|
defer tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestPutErrors: %s: error from pdb.Begin: %s", test.name, err)
|
||||||
|
}
|
||||||
|
if test.isClosed {
|
||||||
|
err = tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestPutErrors: %s: error from tx.Commit: %s", test.name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata := tx.Metadata()
|
||||||
|
|
||||||
|
err = metadata.Put(test.key, testValue)
|
||||||
|
|
||||||
|
if !database.IsErrorCode(err, test.expectedErr) {
|
||||||
|
t.Errorf("TestPutErrors: %s: Expected error of type %d "+
|
||||||
|
"but got '%v'", test.name, test.expectedErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestGetErrors tests all error-cases in *bucket.Get().
|
||||||
|
// The non-error-cases are tested in the more general tests.
|
||||||
|
func TestGetErrors(t *testing.T) {
|
||||||
|
testKey := []byte("key")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
key []byte
|
||||||
|
isClosed bool
|
||||||
|
}{
|
||||||
|
{"empty key", []byte{}, false},
|
||||||
|
{"transaction is closed", testKey, true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
func() {
|
||||||
|
pdb := newTestDb("TestGetErrors", t)
|
||||||
|
defer pdb.Close()
|
||||||
|
|
||||||
|
tx, err := pdb.Begin(false)
|
||||||
|
defer tx.Rollback()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestGetErrors: %s: error from pdb.Begin: %s", test.name, err)
|
||||||
|
}
|
||||||
|
if test.isClosed {
|
||||||
|
err = tx.Rollback()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestGetErrors: %s: error from tx.Commit: %s", test.name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata := tx.Metadata()
|
||||||
|
|
||||||
|
if result := metadata.Get(test.key); result != nil {
|
||||||
|
t.Errorf("TestGetErrors: %s: Expected to return nil, but got %v", test.name, result)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeleteErrors tests all error-cases in *bucket.Delete().
|
||||||
|
// The non-error-cases are tested in the more general tests.
|
||||||
|
func TestDeleteErrors(t *testing.T) {
|
||||||
|
testKey := []byte("key")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
key []byte
|
||||||
|
isWritable bool
|
||||||
|
isClosed bool
|
||||||
|
expectedErr database.ErrorCode
|
||||||
|
}{
|
||||||
|
{"empty key", []byte{}, true, false, database.ErrKeyRequired},
|
||||||
|
{"transaction is closed", testKey, true, true, database.ErrTxClosed},
|
||||||
|
{"transaction is not writable", testKey, false, false, database.ErrTxNotWritable},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
func() {
|
||||||
|
pdb := newTestDb("TestDeleteErrors", t)
|
||||||
|
defer pdb.Close()
|
||||||
|
|
||||||
|
tx, err := pdb.Begin(test.isWritable)
|
||||||
|
defer tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestDeleteErrors: %s: error from pdb.Begin: %s", test.name, err)
|
||||||
|
}
|
||||||
|
if test.isClosed {
|
||||||
|
err = tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestDeleteErrors: %s: error from tx.Commit: %s", test.name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata := tx.Metadata()
|
||||||
|
|
||||||
|
err = metadata.Delete(test.key)
|
||||||
|
|
||||||
|
if !database.IsErrorCode(err, test.expectedErr) {
|
||||||
|
t.Errorf("TestDeleteErrors: %s: Expected error of type %d "+
|
||||||
|
"but got '%v'", test.name, test.expectedErr, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestForEachBucket(t *testing.T) {
|
||||||
|
pdb := newTestDb("TestForEachBucket", t)
|
||||||
|
|
||||||
|
// set-up test
|
||||||
|
testKey := []byte("key")
|
||||||
|
testValue := []byte("value")
|
||||||
|
bucketKeys := [][]byte{{1}, {2}, {3}}
|
||||||
|
|
||||||
|
err := pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
for _, bucketKey := range bucketKeys {
|
||||||
|
bucket, err := metadata.CreateBucket(bucketKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = bucket.Put(testKey, testValue)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestForEachBucket: Error setting up test-database: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// actual test
|
||||||
|
err = pdb.View(func(dbTx database.Tx) error {
|
||||||
|
i := 0
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
|
||||||
|
err := metadata.ForEachBucket(func(bucketKey []byte) error {
|
||||||
|
if i >= len(bucketKeys) { // in case there are any other buckets in metadata
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedBucketKey := bucketKeys[i]
|
||||||
|
if !bytes.Equal(expectedBucketKey, bucketKey) {
|
||||||
|
t.Errorf("TestForEachBucket: %d: Expected bucket key: %v, but got: %v",
|
||||||
|
i, expectedBucketKey, bucketKey)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
bucket := metadata.Bucket(bucketKey)
|
||||||
|
if bucket == nil {
|
||||||
|
t.Errorf("TestForEachBucket: %d: Bucket is nil", i)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
value := bucket.Get(testKey)
|
||||||
|
if !bytes.Equal(testValue, value) {
|
||||||
|
t.Errorf("TestForEachBucket: %d: Expected value: %s, but got: %s",
|
||||||
|
i, testValue, value)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
i++
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestForEachBucket: Error running actual tests: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestStoreBlockErrors tests all error-cases in *tx.StoreBlock().
|
||||||
|
// The non-error-cases are tested in the more general tests.
|
||||||
|
func TestStoreBlockErrors(t *testing.T) {
|
||||||
|
testBlock := util.NewBlock(wire.NewMsgBlock(wire.NewBlockHeader(1, []daghash.Hash{}, &daghash.Hash{}, 0, 0)))
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
target interface{}
|
||||||
|
replacement interface{}
|
||||||
|
isWritable bool
|
||||||
|
isClosed bool
|
||||||
|
expectedErr database.ErrorCode
|
||||||
|
}{
|
||||||
|
{"transaction is closed", nil, nil, true, true, database.ErrTxClosed},
|
||||||
|
{"transaction is not writable", nil, nil, false, false, database.ErrTxNotWritable},
|
||||||
|
{"block exists", (*transaction).hasBlock,
|
||||||
|
func(*transaction, *daghash.Hash) bool { return true },
|
||||||
|
true, false, database.ErrBlockExists},
|
||||||
|
{"error in block.Bytes", (*util.Block).Bytes,
|
||||||
|
func(*util.Block) ([]byte, error) { return nil, errors.New("Error in block.Bytes()") },
|
||||||
|
true, false, database.ErrDriverSpecific},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
func() {
|
||||||
|
pdb := newTestDb("TestStoreBlockErrors", t)
|
||||||
|
defer pdb.Close()
|
||||||
|
|
||||||
|
if test.target != nil && test.replacement != nil {
|
||||||
|
patch := monkey.Patch(test.target, test.replacement)
|
||||||
|
defer patch.Unpatch()
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, err := pdb.Begin(test.isWritable)
|
||||||
|
defer tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestStoreBlockErrors: %s: error from pdb.Begin: %s", test.name, err)
|
||||||
|
}
|
||||||
|
if test.isClosed {
|
||||||
|
err = tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestStoreBlockErrors: %s: error from tx.Commit: %s", test.name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.StoreBlock(testBlock)
|
||||||
|
if !database.IsErrorCode(err, test.expectedErr) {
|
||||||
|
t.Errorf("TestStoreBlockErrors: %s: Expected error of type %d "+
|
||||||
|
"but got '%v'", test.name, test.expectedErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeleteDoubleNestedBucket tests what happens when bucket.DeleteBucket()
|
||||||
|
// is invoked on a bucket that contains a nested bucket.
|
||||||
|
func TestDeleteDoubleNestedBucket(t *testing.T) {
|
||||||
|
pdb := newTestDb("TestDeleteDoubleNestedBucket", t)
|
||||||
|
defer pdb.Close()
|
||||||
|
|
||||||
|
firstKey := []byte("first")
|
||||||
|
secondKey := []byte("second")
|
||||||
|
key := []byte("key")
|
||||||
|
value := []byte("value")
|
||||||
|
var rawKey, rawSecondKey []byte
|
||||||
|
|
||||||
|
// Test setup
|
||||||
|
err := pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
firstBucket, err := metadata.CreateBucket(firstKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating first bucket: %s", err)
|
||||||
|
}
|
||||||
|
secondBucket, err := firstBucket.CreateBucket(secondKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating second bucket: %s", err)
|
||||||
|
}
|
||||||
|
secondBucket.Put(key, value)
|
||||||
|
|
||||||
|
// extract rawKey from cursor and make sure it's in raw database
|
||||||
|
c := secondBucket.Cursor()
|
||||||
|
for ok := c.First(); ok && !bytes.Equal(c.Key(), key); ok = c.Next() {
|
||||||
|
}
|
||||||
|
if !bytes.Equal(c.Key(), key) {
|
||||||
|
return fmt.Errorf("Couldn't find key to extract rawKey")
|
||||||
|
}
|
||||||
|
rawKey = c.(*cursor).rawKey()
|
||||||
|
if dbTx.(*transaction).fetchKey(rawKey) == nil {
|
||||||
|
return fmt.Errorf("rawKey not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// extract rawSecondKey from cursor and make sure it's in raw database
|
||||||
|
c = firstBucket.Cursor()
|
||||||
|
for ok := c.First(); ok && !bytes.Equal(c.Key(), secondKey); ok = c.Next() {
|
||||||
|
}
|
||||||
|
if !bytes.Equal(c.Key(), secondKey) {
|
||||||
|
return fmt.Errorf("Couldn't find secondKey to extract rawSecondKey")
|
||||||
|
}
|
||||||
|
rawSecondKey = c.(*cursor).rawKey()
|
||||||
|
if dbTx.(*transaction).fetchKey(rawSecondKey) == nil {
|
||||||
|
return fmt.Errorf("rawSecondKey not found for some reason")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestDeleteDoubleNestedBucket: Error in test setup pdb.Update: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Actual test
|
||||||
|
err = pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
err := metadata.DeleteBucket(firstKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if dbTx.(*transaction).fetchKey(rawSecondKey) != nil {
|
||||||
|
t.Error("TestDeleteDoubleNestedBucket: secondBucket was not deleted")
|
||||||
|
}
|
||||||
|
|
||||||
|
if dbTx.(*transaction).fetchKey(rawKey) != nil {
|
||||||
|
t.Error("TestDeleteDoubleNestedBucket: value inside secondBucket was not deleted")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestDeleteDoubleNestedBucket: Error in actual test pdb.Update: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWritePendingAndCommitErrors tests some error-cases in *tx.writePendingAndCommit().
|
||||||
|
// The non-error-cases are tested in the more general tests.
|
||||||
|
func TestWritePendingAndCommitErrors(t *testing.T) {
|
||||||
|
putPatch := monkey.Patch((*bucket).Put,
|
||||||
|
func(_ *bucket, _, _ []byte) error { return errors.New("Error in bucket.Put") })
|
||||||
|
defer putPatch.Unpatch()
|
||||||
|
|
||||||
|
rollbackCalled := false
|
||||||
|
var rollbackPatch *monkey.PatchGuard
|
||||||
|
rollbackPatch = monkey.Patch((*blockStore).handleRollback,
|
||||||
|
func(s *blockStore, oldBlockFileNum, oldBlockOffset uint32) {
|
||||||
|
rollbackPatch.Unpatch()
|
||||||
|
defer rollbackPatch.Restore()
|
||||||
|
|
||||||
|
rollbackCalled = true
|
||||||
|
s.handleRollback(oldBlockFileNum, oldBlockOffset)
|
||||||
|
})
|
||||||
|
defer rollbackPatch.Unpatch()
|
||||||
|
|
||||||
|
pdb := newTestDb("TestWritePendingAndCommitErrors", t)
|
||||||
|
defer pdb.Close()
|
||||||
|
|
||||||
|
err := pdb.Update(func(dbTx database.Tx) error { return nil })
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("No error returned when metaBucket.Put() should have returned an error")
|
||||||
|
}
|
||||||
|
if !rollbackCalled {
|
||||||
|
t.Errorf("No rollback called when metaBucket.Put() have returned an error")
|
||||||
|
}
|
||||||
|
|
||||||
|
rollbackCalled = false
|
||||||
|
err = pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
return dbTx.StoreBlock(util.NewBlock(wire.NewMsgBlock(
|
||||||
|
wire.NewBlockHeader(1, []daghash.Hash{}, &daghash.Hash{}, 0, 0))))
|
||||||
|
})
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("No error returned when blockIdx.Put() should have returned an error")
|
||||||
|
}
|
||||||
|
if !rollbackCalled {
|
||||||
|
t.Errorf("No rollback called when blockIdx.Put() have returned an error")
|
||||||
|
}
|
||||||
|
}
|
319
database/ffldb/dbcache_test.go
Normal file
319
database/ffldb/dbcache_test.go
Normal file
@ -0,0 +1,319 @@
|
|||||||
|
package ffldb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"bou.ke/monkey"
|
||||||
|
"github.com/btcsuite/goleveldb/leveldb"
|
||||||
|
"github.com/btcsuite/goleveldb/leveldb/opt"
|
||||||
|
ldbutil "github.com/btcsuite/goleveldb/leveldb/util"
|
||||||
|
"github.com/daglabs/btcd/database"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestDBCacheCloseErrors tests all error-cases in *dbCache.Close().
|
||||||
|
// The non-error-cases are tested in the more general tests.
|
||||||
|
func TestDBCacheCloseErrors(t *testing.T) {
|
||||||
|
cache := newTestDb("TestDBCacheCloseErrors", t).cache
|
||||||
|
defer cache.Close()
|
||||||
|
|
||||||
|
closeCalled := false
|
||||||
|
closePatch := monkey.Patch((*leveldb.DB).Close, func(*leveldb.DB) error { closeCalled = true; return nil })
|
||||||
|
defer closePatch.Unpatch()
|
||||||
|
|
||||||
|
expectedErr := errors.New("error on flush")
|
||||||
|
|
||||||
|
flushPatch := monkey.Patch((*dbCache).flush, func(*dbCache) error { return expectedErr })
|
||||||
|
defer flushPatch.Unpatch()
|
||||||
|
|
||||||
|
err := cache.Close()
|
||||||
|
if err != expectedErr {
|
||||||
|
t.Errorf("TestDBCacheCloseErrors: Expected error on bad flush is %s but got %s", expectedErr, err)
|
||||||
|
}
|
||||||
|
if !closeCalled {
|
||||||
|
t.Errorf("TestDBCacheCloseErrors: ldb.Close was not called when error flushing")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestUpdateDBErrors tests all error-cases in *dbCache.UpdateDB().
|
||||||
|
// The non-error-cases are tested in the more general tests.
|
||||||
|
func TestUpdateDBErrors(t *testing.T) {
|
||||||
|
// Test when ldb.OpenTransaction returns error
|
||||||
|
func() {
|
||||||
|
cache := newTestDb("TestDBCacheCloseErrors", t).cache
|
||||||
|
defer cache.Close()
|
||||||
|
|
||||||
|
patch := monkey.Patch((*leveldb.DB).OpenTransaction,
|
||||||
|
func(*leveldb.DB) (*leveldb.Transaction, error) { return nil, errors.New("error in OpenTransaction") })
|
||||||
|
defer patch.Unpatch()
|
||||||
|
|
||||||
|
err := cache.updateDB(func(ldbTx *leveldb.Transaction) error { return nil })
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("No error in updateDB when ldb.OpenTransaction returns an error")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Test when ldbTx.Commit returns an error
|
||||||
|
func() {
|
||||||
|
cache := newTestDb("TestDBCacheCloseErrors", t).cache
|
||||||
|
defer cache.Close()
|
||||||
|
|
||||||
|
patch := monkey.Patch((*leveldb.Transaction).Commit,
|
||||||
|
func(*leveldb.Transaction) error { return errors.New("error in Commit") })
|
||||||
|
defer patch.Unpatch()
|
||||||
|
|
||||||
|
err := cache.updateDB(func(ldbTx *leveldb.Transaction) error { return nil })
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("No error in updateDB when ldbTx.Commit returns an error")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
cache := newTestDb("TestDBCacheCloseErrors", t).cache
|
||||||
|
defer cache.Close()
|
||||||
|
|
||||||
|
// Test when function passed to updateDB returns an error
|
||||||
|
err := cache.updateDB(func(ldbTx *leveldb.Transaction) error { return errors.New("Error in fn") })
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("No error in updateDB when passed function returns an error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCommitTxFlushNeeded test the *dbCache.commitTx function when flush is needed,
|
||||||
|
// including error-cases.
|
||||||
|
// When flush is not needed is tested in the more general tests.
|
||||||
|
func TestCommitTxFlushNeeded(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
target interface{}
|
||||||
|
replacement interface{}
|
||||||
|
expectedError bool
|
||||||
|
}{
|
||||||
|
{"No errors", nil, nil, false},
|
||||||
|
{"Error in flush", (*dbCache).flush, func(*dbCache) error { return errors.New("error") }, true},
|
||||||
|
{"Error in commitTreaps", (*dbCache).commitTreaps,
|
||||||
|
func(*dbCache, TreapForEacher, TreapForEacher) error { return errors.New("error") }, true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
func() {
|
||||||
|
db := newTestDb("TestDBCacheCloseErrors", t)
|
||||||
|
defer db.Close()
|
||||||
|
cache := db.cache
|
||||||
|
|
||||||
|
cache.flushInterval = 0 // set flushInterval to 0 so that flush is always required
|
||||||
|
|
||||||
|
if test.target != nil && test.replacement != nil {
|
||||||
|
patch := monkey.Patch(test.target, test.replacement)
|
||||||
|
defer patch.Unpatch()
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, err := db.Begin(true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error begining transaction: %s", err)
|
||||||
|
}
|
||||||
|
cache.commitTx(tx.(*transaction))
|
||||||
|
db.closeLock.RUnlock()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExhaustedDbCacheIterator(t *testing.T) {
|
||||||
|
db := newTestDb("TestExhaustedDbCacheIterator", t)
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
snapshot, err := db.cache.Snapshot()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestExhaustedDbCacheIterator: Error creating cache snapshot: %s", err)
|
||||||
|
}
|
||||||
|
iterator := snapshot.NewIterator(&ldbutil.Range{})
|
||||||
|
|
||||||
|
if next := iterator.Next(); next != false {
|
||||||
|
t.Errorf("TestExhaustedDbCacheIterator: Expected .Next() = false, but got %v", next)
|
||||||
|
}
|
||||||
|
|
||||||
|
if prev := iterator.Prev(); prev != false {
|
||||||
|
t.Errorf("TestExhaustedDbCacheIterator: Expected .Prev() = false, but got %v", prev)
|
||||||
|
}
|
||||||
|
|
||||||
|
if key := iterator.Key(); key != nil {
|
||||||
|
t.Errorf("TestExhaustedDbCacheIterator: Expected .Key() = nil, but got %v", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value := iterator.Value(); value != nil {
|
||||||
|
t.Errorf("TestExhaustedDbCacheIterator: Expected .Value() = nil, but got %v", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLDBIteratorImplPlaceholders hits functions that are there to implement leveldb iterator.Iterator interface,
|
||||||
|
// but surve no other purpose.
|
||||||
|
func TestLDBIteratorImplPlaceholders(t *testing.T) {
|
||||||
|
db := newTestDb("TestIteratorImplPlaceholders", t)
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
snapshot, err := db.cache.Snapshot()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestLDBIteratorImplPlaceholders: Error creating cache snapshot: %s", err)
|
||||||
|
}
|
||||||
|
iterator := newLdbCacheIter(snapshot, &ldbutil.Range{})
|
||||||
|
|
||||||
|
if err = iterator.Error(); err != nil {
|
||||||
|
t.Errorf("TestLDBIteratorImplPlaceholders: Expected .Error() = nil, but got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call SetReleaser to achieve coverage of it. Actually does nothing
|
||||||
|
iterator.SetReleaser(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSkipPendingUpdatesCache(t *testing.T) {
|
||||||
|
pdb := newTestDb("TestSkipPendingUpdatesCache", t)
|
||||||
|
defer pdb.Close()
|
||||||
|
|
||||||
|
value := []byte("value")
|
||||||
|
// Add numbered prefixes to keys so that they are in expected order, and before any other keys
|
||||||
|
firstKey := []byte("1 - first")
|
||||||
|
toDeleteKey := []byte("2 - toDelete")
|
||||||
|
toUpdateKey := []byte("3 - toUpdate")
|
||||||
|
secondKey := []byte("4 - second")
|
||||||
|
|
||||||
|
// create initial metadata for test
|
||||||
|
err := pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
if err := metadata.Put(firstKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := metadata.Put(toDeleteKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := metadata.Put(toUpdateKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := metadata.Put(secondKey, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error adding to metadata: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = pdb.cache.flush()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error flushing cache: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// test skips
|
||||||
|
err = pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
snapshot, err := pdb.cache.Snapshot()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestSkipPendingUpdatesCache: Error getting snapshot: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator := snapshot.NewIterator(&ldbutil.Range{})
|
||||||
|
snapshot.pendingRemove = snapshot.pendingRemove.Put(bucketizedKey(metadataBucketID, toDeleteKey), value)
|
||||||
|
snapshot.pendingKeys = snapshot.pendingKeys.Put(bucketizedKey(metadataBucketID, toUpdateKey), value)
|
||||||
|
|
||||||
|
// Check that first is ok
|
||||||
|
iterator.First()
|
||||||
|
expectedKey := bucketizedKey(metadataBucketID, firstKey)
|
||||||
|
actualKey := iterator.Key()
|
||||||
|
if !bytes.Equal(actualKey, expectedKey) {
|
||||||
|
t.Errorf("TestSkipPendingUpdatesCache: 1: key expected to be %v but is %v", expectedKey, actualKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go to the next key, which is second, toDelete and toUpdate will be skipped
|
||||||
|
iterator.Next()
|
||||||
|
expectedKey = bucketizedKey(metadataBucketID, secondKey)
|
||||||
|
actualKey = iterator.Key()
|
||||||
|
if !bytes.Equal(actualKey, expectedKey) {
|
||||||
|
t.Errorf("TestSkipPendingUpdatesCache: 2: key expected to be %s but is %s", expectedKey, actualKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// now traverse backwards - should get first, toUpdate and toDelete will be skipped
|
||||||
|
iterator.Prev()
|
||||||
|
expectedKey = bucketizedKey(metadataBucketID, firstKey)
|
||||||
|
actualKey = iterator.Key()
|
||||||
|
if !bytes.Equal(actualKey, expectedKey) {
|
||||||
|
t.Errorf("TestSkipPendingUpdatesCache: 4: key expected to be %s but is %s", expectedKey, actualKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestSkipPendingUpdatesCache: Error running main part of test: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFlushCommitTreapsErrors tests error-cases in *dbCache.flush() when commitTreaps returns error.
|
||||||
|
// The non-error-cases are tested in the more general tests.
|
||||||
|
func TestFlushCommitTreapsErrors(t *testing.T) {
|
||||||
|
pdb := newTestDb("TestFlushCommitTreapsErrors", t)
|
||||||
|
defer pdb.Close()
|
||||||
|
|
||||||
|
key := []byte("key")
|
||||||
|
value := []byte("value")
|
||||||
|
|
||||||
|
// Before setting flush interval to zero - put some data so that there's something to flush
|
||||||
|
err := pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
metadata.Put(key, value)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestFlushCommitTreapsErrors: Error putting some data to flush: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cache := pdb.cache
|
||||||
|
cache.flushInterval = 0 // set flushInterval to 0 so that flush is always required
|
||||||
|
|
||||||
|
// Test for correctness when encountered error on Put
|
||||||
|
func() {
|
||||||
|
patch := monkey.Patch((*leveldb.Transaction).Put,
|
||||||
|
func(*leveldb.Transaction, []byte, []byte, *opt.WriteOptions) error { return errors.New("error") })
|
||||||
|
defer patch.Unpatch()
|
||||||
|
|
||||||
|
err := pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
metadata.Put(key, value)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("TestFlushCommitTreapsErrors: No error from pdb.Update when ldbTx.Put returned error")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Test for correctness when encountered error on Delete
|
||||||
|
|
||||||
|
// First put some data we can later "fail" to delete
|
||||||
|
err = pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
metadata.Put(key, value)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestFlushCommitTreapsErrors: Error putting some data to delete: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now "fail" to delete it
|
||||||
|
func() {
|
||||||
|
patch := monkey.Patch((*leveldb.Transaction).Delete,
|
||||||
|
func(*leveldb.Transaction, []byte, *opt.WriteOptions) error { return errors.New("error") })
|
||||||
|
defer patch.Unpatch()
|
||||||
|
|
||||||
|
err := pdb.Update(func(dbTx database.Tx) error {
|
||||||
|
metadata := dbTx.Metadata()
|
||||||
|
metadata.Delete(key)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("TestFlushCommitTreapsErrors: No error from pdb.Update when ldbTx.Delete returned error")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
@ -59,16 +59,3 @@ func createDBDriver(args ...interface{}) (database.DB, error) {
|
|||||||
|
|
||||||
return openDB(dbPath, network, true)
|
return openDB(dbPath, network, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Register the driver.
|
|
||||||
driver := database.Driver{
|
|
||||||
DbType: dbType,
|
|
||||||
Create: createDBDriver,
|
|
||||||
Open: openDBDriver,
|
|
||||||
}
|
|
||||||
if err := database.RegisterDriver(driver); err != nil {
|
|
||||||
panic(fmt.Sprintf("Failed to regiser database driver '%s': %v",
|
|
||||||
dbType, err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -113,7 +113,7 @@ func TestCreateOpenFail(t *testing.T) {
|
|||||||
db.Close()
|
db.Close()
|
||||||
|
|
||||||
wantErrCode = database.ErrDbNotOpen
|
wantErrCode = database.ErrDbNotOpen
|
||||||
err = db.View(func(tx database.Tx) error {
|
err = db.View(func(dbTx database.Tx) error {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if !checkDbError(t, "View", err, wantErrCode) {
|
if !checkDbError(t, "View", err, wantErrCode) {
|
||||||
@ -121,7 +121,7 @@ func TestCreateOpenFail(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
wantErrCode = database.ErrDbNotOpen
|
wantErrCode = database.ErrDbNotOpen
|
||||||
err = db.Update(func(tx database.Tx) error {
|
err = db.Update(func(dbTx database.Tx) error {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if !checkDbError(t, "Update", err, wantErrCode) {
|
if !checkDbError(t, "Update", err, wantErrCode) {
|
||||||
@ -173,8 +173,8 @@ func TestPersistence(t *testing.T) {
|
|||||||
}
|
}
|
||||||
genesisBlock := util.NewBlock(dagconfig.MainNetParams.GenesisBlock)
|
genesisBlock := util.NewBlock(dagconfig.MainNetParams.GenesisBlock)
|
||||||
genesisHash := dagconfig.MainNetParams.GenesisHash
|
genesisHash := dagconfig.MainNetParams.GenesisHash
|
||||||
err = db.Update(func(tx database.Tx) error {
|
err = db.Update(func(dbTx database.Tx) error {
|
||||||
metadataBucket := tx.Metadata()
|
metadataBucket := dbTx.Metadata()
|
||||||
if metadataBucket == nil {
|
if metadataBucket == nil {
|
||||||
return fmt.Errorf("Metadata: unexpected nil bucket")
|
return fmt.Errorf("Metadata: unexpected nil bucket")
|
||||||
}
|
}
|
||||||
@ -193,7 +193,7 @@ func TestPersistence(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := tx.StoreBlock(genesisBlock); err != nil {
|
if err := dbTx.StoreBlock(genesisBlock); err != nil {
|
||||||
return fmt.Errorf("StoreBlock: unexpected error: %v",
|
return fmt.Errorf("StoreBlock: unexpected error: %v",
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
@ -216,8 +216,8 @@ func TestPersistence(t *testing.T) {
|
|||||||
|
|
||||||
// Ensure the values previously stored in the 3rd namespace still exist
|
// Ensure the values previously stored in the 3rd namespace still exist
|
||||||
// and are correct.
|
// and are correct.
|
||||||
err = db.View(func(tx database.Tx) error {
|
err = db.View(func(dbTx database.Tx) error {
|
||||||
metadataBucket := tx.Metadata()
|
metadataBucket := dbTx.Metadata()
|
||||||
if metadataBucket == nil {
|
if metadataBucket == nil {
|
||||||
return fmt.Errorf("Metadata: unexpected nil bucket")
|
return fmt.Errorf("Metadata: unexpected nil bucket")
|
||||||
}
|
}
|
||||||
@ -237,7 +237,7 @@ func TestPersistence(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
genesisBlockBytes, _ := genesisBlock.Bytes()
|
genesisBlockBytes, _ := genesisBlock.Bytes()
|
||||||
gotBytes, err := tx.FetchBlock(genesisHash)
|
gotBytes, err := dbTx.FetchBlock(genesisHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("FetchBlock: unexpected error: %v",
|
return fmt.Errorf("FetchBlock: unexpected error: %v",
|
||||||
err)
|
err)
|
||||||
@ -282,7 +282,9 @@ func TestInterface(t *testing.T) {
|
|||||||
|
|
||||||
// Change the maximum file size to a small value to force multiple flat
|
// Change the maximum file size to a small value to force multiple flat
|
||||||
// files with the test data set.
|
// files with the test data set.
|
||||||
ffldb.TstRunWithMaxBlockFileSize(db, 2048, func() {
|
// Change maximum open files to small value to force shifts in the LRU
|
||||||
|
// mechanism
|
||||||
|
ffldb.TstRunWithMaxBlockFileSizeAndMaxOpenFiles(db, 2048, 10, func() {
|
||||||
testInterface(t, db)
|
testInterface(t, db)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -16,11 +16,14 @@ import "github.com/daglabs/btcd/database"
|
|||||||
// TstRunWithMaxBlockFileSize runs the passed function with the maximum allowed
|
// TstRunWithMaxBlockFileSize runs the passed function with the maximum allowed
|
||||||
// file size for the database set to the provided value. The value will be set
|
// file size for the database set to the provided value. The value will be set
|
||||||
// back to the original value upon completion.
|
// back to the original value upon completion.
|
||||||
func TstRunWithMaxBlockFileSize(idb database.DB, size uint32, fn func()) {
|
func TstRunWithMaxBlockFileSizeAndMaxOpenFiles(idb database.DB, size uint32, maxOpenFiles int, fn func()) {
|
||||||
ffldb := idb.(*db)
|
ffldb := idb.(*db)
|
||||||
origSize := ffldb.store.maxBlockFileSize
|
origSize := ffldb.store.maxBlockFileSize
|
||||||
|
origMaxOpenFiles := ffldb.store.maxOpenFiles
|
||||||
|
|
||||||
ffldb.store.maxBlockFileSize = size
|
ffldb.store.maxBlockFileSize = size
|
||||||
|
ffldb.store.maxOpenFiles = maxOpenFiles
|
||||||
fn()
|
fn()
|
||||||
ffldb.store.maxBlockFileSize = origSize
|
ffldb.store.maxBlockFileSize = origSize
|
||||||
|
ffldb.store.maxOpenFiles = origMaxOpenFiles
|
||||||
}
|
}
|
||||||
|
23
database/ffldb/init.go
Normal file
23
database/ffldb/init.go
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
package ffldb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/daglabs/btcd/database"
|
||||||
|
)
|
||||||
|
|
||||||
|
func registerDriver() {
|
||||||
|
driver := database.Driver{
|
||||||
|
DbType: dbType,
|
||||||
|
Create: createDBDriver,
|
||||||
|
Open: openDBDriver,
|
||||||
|
}
|
||||||
|
if err := database.RegisterDriver(driver); err != nil {
|
||||||
|
panic(fmt.Sprintf("Failed to regiser database driver '%s': %v",
|
||||||
|
dbType, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
registerDriver()
|
||||||
|
}
|
26
database/ffldb/init_test.go
Normal file
26
database/ffldb/init_test.go
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
package ffldb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"bou.ke/monkey"
|
||||||
|
"github.com/daglabs/btcd/database"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestRegisterDriverErrors tests all error-cases in registerDriver().
|
||||||
|
// The non-error-cases are tested in the more general tests.
|
||||||
|
func TestInitErrors(t *testing.T) {
|
||||||
|
patch := monkey.Patch(database.RegisterDriver,
|
||||||
|
func(driver database.Driver) error { return errors.New("Error in database.RegisterDriver") })
|
||||||
|
defer patch.Unpatch()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err := recover()
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("TestRegisterDriverErrors: No panic on init when database.RegisterDriver returned an error")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
registerDriver()
|
||||||
|
}
|
@ -626,10 +626,10 @@ func testBucketInterface(tc *testContext, bucket database.Bucket) bool {
|
|||||||
// would leave any manually created transactions with the database mutex locked
|
// would leave any manually created transactions with the database mutex locked
|
||||||
// thereby leading to a deadlock and masking the real reason for the panic. It
|
// thereby leading to a deadlock and masking the real reason for the panic. It
|
||||||
// also logs a test error and repanics so the original panic can be traced.
|
// also logs a test error and repanics so the original panic can be traced.
|
||||||
func rollbackOnPanic(t *testing.T, tx database.Tx) {
|
func rollbackOnPanic(t *testing.T, dbTx database.Tx) {
|
||||||
if err := recover(); err != nil {
|
if err := recover(); err != nil {
|
||||||
t.Errorf("Unexpected panic: %v", err)
|
t.Errorf("Unexpected panic: %v", err)
|
||||||
_ = tx.Rollback()
|
_ = dbTx.Rollback()
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -851,8 +851,8 @@ func testManagedTxPanics(tc *testContext) bool {
|
|||||||
|
|
||||||
// Ensure calling Commit on a managed read-only transaction panics.
|
// Ensure calling Commit on a managed read-only transaction panics.
|
||||||
paniced := testPanic(func() {
|
paniced := testPanic(func() {
|
||||||
tc.db.View(func(tx database.Tx) error {
|
tc.db.View(func(dbTx database.Tx) error {
|
||||||
tx.Commit()
|
dbTx.Commit()
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -863,8 +863,8 @@ func testManagedTxPanics(tc *testContext) bool {
|
|||||||
|
|
||||||
// Ensure calling Rollback on a managed read-only transaction panics.
|
// Ensure calling Rollback on a managed read-only transaction panics.
|
||||||
paniced = testPanic(func() {
|
paniced = testPanic(func() {
|
||||||
tc.db.View(func(tx database.Tx) error {
|
tc.db.View(func(dbTx database.Tx) error {
|
||||||
tx.Rollback()
|
dbTx.Rollback()
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -875,8 +875,8 @@ func testManagedTxPanics(tc *testContext) bool {
|
|||||||
|
|
||||||
// Ensure calling Commit on a managed read-write transaction panics.
|
// Ensure calling Commit on a managed read-write transaction panics.
|
||||||
paniced = testPanic(func() {
|
paniced = testPanic(func() {
|
||||||
tc.db.Update(func(tx database.Tx) error {
|
tc.db.Update(func(dbTx database.Tx) error {
|
||||||
tx.Commit()
|
dbTx.Commit()
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -887,8 +887,8 @@ func testManagedTxPanics(tc *testContext) bool {
|
|||||||
|
|
||||||
// Ensure calling Rollback on a managed read-write transaction panics.
|
// Ensure calling Rollback on a managed read-write transaction panics.
|
||||||
paniced = testPanic(func() {
|
paniced = testPanic(func() {
|
||||||
tc.db.Update(func(tx database.Tx) error {
|
tc.db.Update(func(dbTx database.Tx) error {
|
||||||
tx.Rollback()
|
dbTx.Rollback()
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -909,8 +909,8 @@ func testMetadataTxInterface(tc *testContext) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bucket1Name := []byte("bucket1")
|
bucket1Name := []byte("bucket1")
|
||||||
err := tc.db.Update(func(tx database.Tx) error {
|
err := tc.db.Update(func(dbTx database.Tx) error {
|
||||||
_, err := tx.Metadata().CreateBucket(bucket1Name)
|
_, err := dbTx.Metadata().CreateBucket(bucket1Name)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -932,8 +932,8 @@ func testMetadataTxInterface(tc *testContext) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test the bucket interface via a managed read-only transaction.
|
// Test the bucket interface via a managed read-only transaction.
|
||||||
err = tc.db.View(func(tx database.Tx) error {
|
err = tc.db.View(func(dbTx database.Tx) error {
|
||||||
metadataBucket := tx.Metadata()
|
metadataBucket := dbTx.Metadata()
|
||||||
if metadataBucket == nil {
|
if metadataBucket == nil {
|
||||||
return fmt.Errorf("Metadata: unexpected nil bucket")
|
return fmt.Errorf("Metadata: unexpected nil bucket")
|
||||||
}
|
}
|
||||||
@ -960,7 +960,7 @@ func testMetadataTxInterface(tc *testContext) bool {
|
|||||||
// Ensure errors returned from the user-supplied View function are
|
// Ensure errors returned from the user-supplied View function are
|
||||||
// returned.
|
// returned.
|
||||||
viewError := fmt.Errorf("example view error")
|
viewError := fmt.Errorf("example view error")
|
||||||
err = tc.db.View(func(tx database.Tx) error {
|
err = tc.db.View(func(dbTx database.Tx) error {
|
||||||
return viewError
|
return viewError
|
||||||
})
|
})
|
||||||
if err != viewError {
|
if err != viewError {
|
||||||
@ -973,8 +973,8 @@ func testMetadataTxInterface(tc *testContext) bool {
|
|||||||
// Also, put a series of values and force a rollback so the following
|
// Also, put a series of values and force a rollback so the following
|
||||||
// code can ensure the values were not stored.
|
// code can ensure the values were not stored.
|
||||||
forceRollbackError := fmt.Errorf("force rollback")
|
forceRollbackError := fmt.Errorf("force rollback")
|
||||||
err = tc.db.Update(func(tx database.Tx) error {
|
err = tc.db.Update(func(dbTx database.Tx) error {
|
||||||
metadataBucket := tx.Metadata()
|
metadataBucket := dbTx.Metadata()
|
||||||
if metadataBucket == nil {
|
if metadataBucket == nil {
|
||||||
return fmt.Errorf("Metadata: unexpected nil bucket")
|
return fmt.Errorf("Metadata: unexpected nil bucket")
|
||||||
}
|
}
|
||||||
@ -1008,8 +1008,8 @@ func testMetadataTxInterface(tc *testContext) bool {
|
|||||||
|
|
||||||
// Ensure the values that should not have been stored due to the forced
|
// Ensure the values that should not have been stored due to the forced
|
||||||
// rollback above were not actually stored.
|
// rollback above were not actually stored.
|
||||||
err = tc.db.View(func(tx database.Tx) error {
|
err = tc.db.View(func(dbTx database.Tx) error {
|
||||||
metadataBucket := tx.Metadata()
|
metadataBucket := dbTx.Metadata()
|
||||||
if metadataBucket == nil {
|
if metadataBucket == nil {
|
||||||
return fmt.Errorf("Metadata: unexpected nil bucket")
|
return fmt.Errorf("Metadata: unexpected nil bucket")
|
||||||
}
|
}
|
||||||
@ -1028,8 +1028,8 @@ func testMetadataTxInterface(tc *testContext) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Store a series of values via a managed read-write transaction.
|
// Store a series of values via a managed read-write transaction.
|
||||||
err = tc.db.Update(func(tx database.Tx) error {
|
err = tc.db.Update(func(dbTx database.Tx) error {
|
||||||
metadataBucket := tx.Metadata()
|
metadataBucket := dbTx.Metadata()
|
||||||
if metadataBucket == nil {
|
if metadataBucket == nil {
|
||||||
return fmt.Errorf("Metadata: unexpected nil bucket")
|
return fmt.Errorf("Metadata: unexpected nil bucket")
|
||||||
}
|
}
|
||||||
@ -1053,8 +1053,8 @@ func testMetadataTxInterface(tc *testContext) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the values stored above were committed as expected.
|
// Ensure the values stored above were committed as expected.
|
||||||
err = tc.db.View(func(tx database.Tx) error {
|
err = tc.db.View(func(dbTx database.Tx) error {
|
||||||
metadataBucket := tx.Metadata()
|
metadataBucket := dbTx.Metadata()
|
||||||
if metadataBucket == nil {
|
if metadataBucket == nil {
|
||||||
return fmt.Errorf("Metadata: unexpected nil bucket")
|
return fmt.Errorf("Metadata: unexpected nil bucket")
|
||||||
}
|
}
|
||||||
@ -1078,8 +1078,8 @@ func testMetadataTxInterface(tc *testContext) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Clean up the values stored above in a managed read-write transaction.
|
// Clean up the values stored above in a managed read-write transaction.
|
||||||
err = tc.db.Update(func(tx database.Tx) error {
|
err = tc.db.Update(func(dbTx database.Tx) error {
|
||||||
metadataBucket := tx.Metadata()
|
metadataBucket := dbTx.Metadata()
|
||||||
if metadataBucket == nil {
|
if metadataBucket == nil {
|
||||||
return fmt.Errorf("Metadata: unexpected nil bucket")
|
return fmt.Errorf("Metadata: unexpected nil bucket")
|
||||||
}
|
}
|
||||||
@ -1107,7 +1107,7 @@ func testMetadataTxInterface(tc *testContext) bool {
|
|||||||
|
|
||||||
// testFetchBlockIOMissing ensures that all of the block retrieval API functions
|
// testFetchBlockIOMissing ensures that all of the block retrieval API functions
|
||||||
// work as expected when requesting blocks that don't exist.
|
// work as expected when requesting blocks that don't exist.
|
||||||
func testFetchBlockIOMissing(tc *testContext, tx database.Tx) bool {
|
func testFetchBlockIOMissing(tc *testContext, dbTx database.Tx) bool {
|
||||||
wantErrCode := database.ErrBlockNotFound
|
wantErrCode := database.ErrBlockNotFound
|
||||||
|
|
||||||
// ---------------------
|
// ---------------------
|
||||||
@ -1132,7 +1132,7 @@ func testFetchBlockIOMissing(tc *testContext, tx database.Tx) bool {
|
|||||||
|
|
||||||
// Ensure FetchBlock returns expected error.
|
// Ensure FetchBlock returns expected error.
|
||||||
testName := fmt.Sprintf("FetchBlock #%d on missing block", i)
|
testName := fmt.Sprintf("FetchBlock #%d on missing block", i)
|
||||||
_, err = tx.FetchBlock(blockHash)
|
_, err = dbTx.FetchBlock(blockHash)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -1140,7 +1140,7 @@ func testFetchBlockIOMissing(tc *testContext, tx database.Tx) bool {
|
|||||||
// Ensure FetchBlockHeader returns expected error.
|
// Ensure FetchBlockHeader returns expected error.
|
||||||
testName = fmt.Sprintf("FetchBlockHeader #%d on missing block",
|
testName = fmt.Sprintf("FetchBlockHeader #%d on missing block",
|
||||||
i)
|
i)
|
||||||
_, err = tx.FetchBlockHeader(blockHash)
|
_, err = dbTx.FetchBlockHeader(blockHash)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -1153,13 +1153,13 @@ func testFetchBlockIOMissing(tc *testContext, tx database.Tx) bool {
|
|||||||
Len: uint32(txLocs[0].TxLen),
|
Len: uint32(txLocs[0].TxLen),
|
||||||
}
|
}
|
||||||
allBlockRegions[i] = region
|
allBlockRegions[i] = region
|
||||||
_, err = tx.FetchBlockRegion(®ion)
|
_, err = dbTx.FetchBlockRegion(®ion)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure HasBlock returns false.
|
// Ensure HasBlock returns false.
|
||||||
hasBlock, err := tx.HasBlock(blockHash)
|
hasBlock, err := dbTx.HasBlock(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("HasBlock #%d: unexpected err: %v", i, err)
|
tc.t.Errorf("HasBlock #%d: unexpected err: %v", i, err)
|
||||||
return false
|
return false
|
||||||
@ -1176,27 +1176,27 @@ func testFetchBlockIOMissing(tc *testContext, tx database.Tx) bool {
|
|||||||
|
|
||||||
// Ensure FetchBlocks returns expected error.
|
// Ensure FetchBlocks returns expected error.
|
||||||
testName := "FetchBlocks on missing blocks"
|
testName := "FetchBlocks on missing blocks"
|
||||||
_, err := tx.FetchBlocks(allBlockHashes)
|
_, err := dbTx.FetchBlocks(allBlockHashes)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure FetchBlockHeaders returns expected error.
|
// Ensure FetchBlockHeaders returns expected error.
|
||||||
testName = "FetchBlockHeaders on missing blocks"
|
testName = "FetchBlockHeaders on missing blocks"
|
||||||
_, err = tx.FetchBlockHeaders(allBlockHashes)
|
_, err = dbTx.FetchBlockHeaders(allBlockHashes)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure FetchBlockRegions returns expected error.
|
// Ensure FetchBlockRegions returns expected error.
|
||||||
testName = "FetchBlockRegions on missing blocks"
|
testName = "FetchBlockRegions on missing blocks"
|
||||||
_, err = tx.FetchBlockRegions(allBlockRegions)
|
_, err = dbTx.FetchBlockRegions(allBlockRegions)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure HasBlocks returns false for all blocks.
|
// Ensure HasBlocks returns false for all blocks.
|
||||||
hasBlocks, err := tx.HasBlocks(allBlockHashes)
|
hasBlocks, err := dbTx.HasBlocks(allBlockHashes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("HasBlocks: unexpected err: %v", err)
|
tc.t.Errorf("HasBlocks: unexpected err: %v", err)
|
||||||
}
|
}
|
||||||
@ -1215,7 +1215,7 @@ func testFetchBlockIOMissing(tc *testContext, tx database.Tx) bool {
|
|||||||
// the database, or at least stored into the the passed transaction. It also
|
// the database, or at least stored into the the passed transaction. It also
|
||||||
// tests several error conditions such as ensuring the expected errors are
|
// tests several error conditions such as ensuring the expected errors are
|
||||||
// returned when fetching blocks, headers, and regions that don't exist.
|
// returned when fetching blocks, headers, and regions that don't exist.
|
||||||
func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
func testFetchBlockIO(tc *testContext, dbTx database.Tx) bool {
|
||||||
// ---------------------
|
// ---------------------
|
||||||
// Non-bulk Block IO API
|
// Non-bulk Block IO API
|
||||||
// ---------------------
|
// ---------------------
|
||||||
@ -1251,7 +1251,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
|
|
||||||
// Ensure the block data fetched from the database matches the
|
// Ensure the block data fetched from the database matches the
|
||||||
// expected bytes.
|
// expected bytes.
|
||||||
gotBlockBytes, err := tx.FetchBlock(blockHash)
|
gotBlockBytes, err := dbTx.FetchBlock(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("FetchBlock(%s): unexpected error: %v",
|
tc.t.Errorf("FetchBlock(%s): unexpected error: %v",
|
||||||
blockHash, err)
|
blockHash, err)
|
||||||
@ -1264,7 +1264,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
wantHeaderBytes := blockBytes[0:allBlockHeaderSizes[i]]
|
wantHeaderBytes := blockBytes[0:allBlockHeaderSizes[i]]
|
||||||
gotHeaderBytes, err := tx.FetchBlockHeader(blockHash)
|
gotHeaderBytes, err := dbTx.FetchBlockHeader(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("FetchBlockHeader(%s): unexpected error: %v",
|
tc.t.Errorf("FetchBlockHeader(%s): unexpected error: %v",
|
||||||
blockHash, err)
|
blockHash, err)
|
||||||
@ -1287,7 +1287,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
allBlockRegions[i] = region
|
allBlockRegions[i] = region
|
||||||
endRegionOffset := region.Offset + region.Len
|
endRegionOffset := region.Offset + region.Len
|
||||||
wantRegionBytes := blockBytes[region.Offset:endRegionOffset]
|
wantRegionBytes := blockBytes[region.Offset:endRegionOffset]
|
||||||
gotRegionBytes, err := tx.FetchBlockRegion(®ion)
|
gotRegionBytes, err := dbTx.FetchBlockRegion(®ion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("FetchBlockRegion(%s): unexpected error: %v",
|
tc.t.Errorf("FetchBlockRegion(%s): unexpected error: %v",
|
||||||
blockHash, err)
|
blockHash, err)
|
||||||
@ -1302,7 +1302,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
|
|
||||||
// Ensure the block header fetched from the database matches the
|
// Ensure the block header fetched from the database matches the
|
||||||
// expected bytes.
|
// expected bytes.
|
||||||
hasBlock, err := tx.HasBlock(blockHash)
|
hasBlock, err := dbTx.HasBlock(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("HasBlock(%s): unexpected error: %v",
|
tc.t.Errorf("HasBlock(%s): unexpected error: %v",
|
||||||
blockHash, err)
|
blockHash, err)
|
||||||
@ -1324,7 +1324,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
testName := fmt.Sprintf("FetchBlock(%s) invalid block",
|
testName := fmt.Sprintf("FetchBlock(%s) invalid block",
|
||||||
badBlockHash)
|
badBlockHash)
|
||||||
wantErrCode := database.ErrBlockNotFound
|
wantErrCode := database.ErrBlockNotFound
|
||||||
_, err = tx.FetchBlock(badBlockHash)
|
_, err = dbTx.FetchBlock(badBlockHash)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -1333,7 +1333,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
// the expected error.
|
// the expected error.
|
||||||
testName = fmt.Sprintf("FetchBlockHeader(%s) invalid block",
|
testName = fmt.Sprintf("FetchBlockHeader(%s) invalid block",
|
||||||
badBlockHash)
|
badBlockHash)
|
||||||
_, err = tx.FetchBlockHeader(badBlockHash)
|
_, err = dbTx.FetchBlockHeader(badBlockHash)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -1345,7 +1345,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
wantErrCode = database.ErrBlockNotFound
|
wantErrCode = database.ErrBlockNotFound
|
||||||
region.Hash = badBlockHash
|
region.Hash = badBlockHash
|
||||||
region.Offset = ^uint32(0)
|
region.Offset = ^uint32(0)
|
||||||
_, err = tx.FetchBlockRegion(®ion)
|
_, err = dbTx.FetchBlockRegion(®ion)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -1357,7 +1357,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
wantErrCode = database.ErrBlockRegionInvalid
|
wantErrCode = database.ErrBlockRegionInvalid
|
||||||
region.Hash = blockHash
|
region.Hash = blockHash
|
||||||
region.Offset = ^uint32(0)
|
region.Offset = ^uint32(0)
|
||||||
_, err = tx.FetchBlockRegion(®ion)
|
_, err = dbTx.FetchBlockRegion(®ion)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -1369,7 +1369,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
|
|
||||||
// Ensure the bulk block data fetched from the database matches the
|
// Ensure the bulk block data fetched from the database matches the
|
||||||
// expected bytes.
|
// expected bytes.
|
||||||
blockData, err := tx.FetchBlocks(allBlockHashes)
|
blockData, err := dbTx.FetchBlocks(allBlockHashes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("FetchBlocks: unexpected error: %v", err)
|
tc.t.Errorf("FetchBlocks: unexpected error: %v", err)
|
||||||
return false
|
return false
|
||||||
@ -1393,7 +1393,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
|
|
||||||
// Ensure the bulk block headers fetched from the database match the
|
// Ensure the bulk block headers fetched from the database match the
|
||||||
// expected bytes.
|
// expected bytes.
|
||||||
blockHeaderData, err := tx.FetchBlockHeaders(allBlockHashes)
|
blockHeaderData, err := dbTx.FetchBlockHeaders(allBlockHashes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("FetchBlockHeaders: unexpected error: %v", err)
|
tc.t.Errorf("FetchBlockHeaders: unexpected error: %v", err)
|
||||||
return false
|
return false
|
||||||
@ -1418,7 +1418,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
|
|
||||||
// Ensure the first transaction of every block fetched in bulk block
|
// Ensure the first transaction of every block fetched in bulk block
|
||||||
// regions from the database matches the expected bytes.
|
// regions from the database matches the expected bytes.
|
||||||
allRegionBytes, err := tx.FetchBlockRegions(allBlockRegions)
|
allRegionBytes, err := dbTx.FetchBlockRegions(allBlockRegions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("FetchBlockRegions: unexpected error: %v", err)
|
tc.t.Errorf("FetchBlockRegions: unexpected error: %v", err)
|
||||||
return false
|
return false
|
||||||
@ -1444,7 +1444,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
|
|
||||||
// Ensure the bulk determination of whether a set of block hashes are in
|
// Ensure the bulk determination of whether a set of block hashes are in
|
||||||
// the database returns true for all loaded blocks.
|
// the database returns true for all loaded blocks.
|
||||||
hasBlocks, err := tx.HasBlocks(allBlockHashes)
|
hasBlocks, err := dbTx.HasBlocks(allBlockHashes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("HasBlocks: unexpected error: %v", err)
|
tc.t.Errorf("HasBlocks: unexpected error: %v", err)
|
||||||
return false
|
return false
|
||||||
@ -1467,7 +1467,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
copy(badBlockHashes, allBlockHashes)
|
copy(badBlockHashes, allBlockHashes)
|
||||||
badBlockHashes[len(badBlockHashes)-1] = daghash.Hash{}
|
badBlockHashes[len(badBlockHashes)-1] = daghash.Hash{}
|
||||||
wantErrCode := database.ErrBlockNotFound
|
wantErrCode := database.ErrBlockNotFound
|
||||||
_, err = tx.FetchBlocks(badBlockHashes)
|
_, err = dbTx.FetchBlocks(badBlockHashes)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -1475,7 +1475,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
// Ensure fetching block headers for which one doesn't exist returns the
|
// Ensure fetching block headers for which one doesn't exist returns the
|
||||||
// expected error.
|
// expected error.
|
||||||
testName = "FetchBlockHeaders invalid hash"
|
testName = "FetchBlockHeaders invalid hash"
|
||||||
_, err = tx.FetchBlockHeaders(badBlockHashes)
|
_, err = dbTx.FetchBlockHeaders(badBlockHashes)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -1487,7 +1487,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
copy(badBlockRegions, allBlockRegions)
|
copy(badBlockRegions, allBlockRegions)
|
||||||
badBlockRegions[len(badBlockRegions)-1].Hash = &daghash.Hash{}
|
badBlockRegions[len(badBlockRegions)-1].Hash = &daghash.Hash{}
|
||||||
wantErrCode = database.ErrBlockNotFound
|
wantErrCode = database.ErrBlockNotFound
|
||||||
_, err = tx.FetchBlockRegions(badBlockRegions)
|
_, err = dbTx.FetchBlockRegions(badBlockRegions)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -1500,7 +1500,7 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
badBlockRegions[i].Offset = ^uint32(0)
|
badBlockRegions[i].Offset = ^uint32(0)
|
||||||
}
|
}
|
||||||
wantErrCode = database.ErrBlockRegionInvalid
|
wantErrCode = database.ErrBlockRegionInvalid
|
||||||
_, err = tx.FetchBlockRegions(badBlockRegions)
|
_, err = dbTx.FetchBlockRegions(badBlockRegions)
|
||||||
return checkDbError(tc.t, testName, err, wantErrCode)
|
return checkDbError(tc.t, testName, err, wantErrCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1510,11 +1510,11 @@ func testFetchBlockIO(tc *testContext, tx database.Tx) bool {
|
|||||||
func testBlockIOTxInterface(tc *testContext) bool {
|
func testBlockIOTxInterface(tc *testContext) bool {
|
||||||
// Ensure attempting to store a block with a read-only transaction fails
|
// Ensure attempting to store a block with a read-only transaction fails
|
||||||
// with the expected error.
|
// with the expected error.
|
||||||
err := tc.db.View(func(tx database.Tx) error {
|
err := tc.db.View(func(dbTx database.Tx) error {
|
||||||
wantErrCode := database.ErrTxNotWritable
|
wantErrCode := database.ErrTxNotWritable
|
||||||
for i, block := range tc.blocks {
|
for i, block := range tc.blocks {
|
||||||
testName := fmt.Sprintf("StoreBlock(%d) on ro tx", i)
|
testName := fmt.Sprintf("StoreBlock(%d) on ro tx", i)
|
||||||
err := tx.StoreBlock(block)
|
err := dbTx.StoreBlock(block)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
}
|
}
|
||||||
@ -1534,10 +1534,10 @@ func testBlockIOTxInterface(tc *testContext) bool {
|
|||||||
// commit or rollback. Then, force a rollback so the code below can
|
// commit or rollback. Then, force a rollback so the code below can
|
||||||
// ensure none of the data actually gets stored.
|
// ensure none of the data actually gets stored.
|
||||||
forceRollbackError := fmt.Errorf("force rollback")
|
forceRollbackError := fmt.Errorf("force rollback")
|
||||||
err = tc.db.Update(func(tx database.Tx) error {
|
err = tc.db.Update(func(dbTx database.Tx) error {
|
||||||
// Store all blocks in the same transaction.
|
// Store all blocks in the same transaction.
|
||||||
for i, block := range tc.blocks {
|
for i, block := range tc.blocks {
|
||||||
err := tx.StoreBlock(block)
|
err := dbTx.StoreBlock(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("StoreBlock #%d: unexpected error: "+
|
tc.t.Errorf("StoreBlock #%d: unexpected error: "+
|
||||||
"%v", i, err)
|
"%v", i, err)
|
||||||
@ -1551,7 +1551,7 @@ func testBlockIOTxInterface(tc *testContext) bool {
|
|||||||
for i, block := range tc.blocks {
|
for i, block := range tc.blocks {
|
||||||
testName := fmt.Sprintf("duplicate block entry #%d "+
|
testName := fmt.Sprintf("duplicate block entry #%d "+
|
||||||
"(before commit)", i)
|
"(before commit)", i)
|
||||||
err := tx.StoreBlock(block)
|
err := dbTx.StoreBlock(block)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
}
|
}
|
||||||
@ -1559,7 +1559,7 @@ func testBlockIOTxInterface(tc *testContext) bool {
|
|||||||
|
|
||||||
// Ensure that all data fetches from the stored blocks before
|
// Ensure that all data fetches from the stored blocks before
|
||||||
// the transaction has been committed work as expected.
|
// the transaction has been committed work as expected.
|
||||||
if !testFetchBlockIO(tc, tx) {
|
if !testFetchBlockIO(tc, dbTx) {
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1576,8 +1576,8 @@ func testBlockIOTxInterface(tc *testContext) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ensure rollback was successful
|
// Ensure rollback was successful
|
||||||
err = tc.db.View(func(tx database.Tx) error {
|
err = tc.db.View(func(dbTx database.Tx) error {
|
||||||
if !testFetchBlockIOMissing(tc, tx) {
|
if !testFetchBlockIOMissing(tc, dbTx) {
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -1591,10 +1591,10 @@ func testBlockIOTxInterface(tc *testContext) bool {
|
|||||||
|
|
||||||
// Populate the database with loaded blocks and ensure all of the data
|
// Populate the database with loaded blocks and ensure all of the data
|
||||||
// fetching APIs work properly.
|
// fetching APIs work properly.
|
||||||
err = tc.db.Update(func(tx database.Tx) error {
|
err = tc.db.Update(func(dbTx database.Tx) error {
|
||||||
// Store a bunch of blocks in the same transaction.
|
// Store a bunch of blocks in the same transaction.
|
||||||
for i, block := range tc.blocks {
|
for i, block := range tc.blocks {
|
||||||
err := tx.StoreBlock(block)
|
err := dbTx.StoreBlock(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("StoreBlock #%d: unexpected error: "+
|
tc.t.Errorf("StoreBlock #%d: unexpected error: "+
|
||||||
"%v", i, err)
|
"%v", i, err)
|
||||||
@ -1609,7 +1609,7 @@ func testBlockIOTxInterface(tc *testContext) bool {
|
|||||||
testName := fmt.Sprintf("duplicate block entry #%d "+
|
testName := fmt.Sprintf("duplicate block entry #%d "+
|
||||||
"(before commit)", i)
|
"(before commit)", i)
|
||||||
wantErrCode := database.ErrBlockExists
|
wantErrCode := database.ErrBlockExists
|
||||||
err := tx.StoreBlock(block)
|
err := dbTx.StoreBlock(block)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
}
|
}
|
||||||
@ -1617,7 +1617,7 @@ func testBlockIOTxInterface(tc *testContext) bool {
|
|||||||
|
|
||||||
// Ensure that all data fetches from the stored blocks before
|
// Ensure that all data fetches from the stored blocks before
|
||||||
// the transaction has been committed work as expected.
|
// the transaction has been committed work as expected.
|
||||||
if !testFetchBlockIO(tc, tx) {
|
if !testFetchBlockIO(tc, dbTx) {
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1633,8 +1633,8 @@ func testBlockIOTxInterface(tc *testContext) bool {
|
|||||||
// Ensure all data fetch tests work as expected using a managed
|
// Ensure all data fetch tests work as expected using a managed
|
||||||
// read-only transaction after the data was successfully committed
|
// read-only transaction after the data was successfully committed
|
||||||
// above.
|
// above.
|
||||||
err = tc.db.View(func(tx database.Tx) error {
|
err = tc.db.View(func(dbTx database.Tx) error {
|
||||||
if !testFetchBlockIO(tc, tx) {
|
if !testFetchBlockIO(tc, dbTx) {
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1650,8 +1650,8 @@ func testBlockIOTxInterface(tc *testContext) bool {
|
|||||||
// Ensure all data fetch tests work as expected using a managed
|
// Ensure all data fetch tests work as expected using a managed
|
||||||
// read-write transaction after the data was successfully committed
|
// read-write transaction after the data was successfully committed
|
||||||
// above.
|
// above.
|
||||||
err = tc.db.Update(func(tx database.Tx) error {
|
err = tc.db.Update(func(dbTx database.Tx) error {
|
||||||
if !testFetchBlockIO(tc, tx) {
|
if !testFetchBlockIO(tc, dbTx) {
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1663,7 +1663,7 @@ func testBlockIOTxInterface(tc *testContext) bool {
|
|||||||
for i, block := range tc.blocks {
|
for i, block := range tc.blocks {
|
||||||
testName := fmt.Sprintf("duplicate block entry #%d "+
|
testName := fmt.Sprintf("duplicate block entry #%d "+
|
||||||
"(before commit)", i)
|
"(before commit)", i)
|
||||||
err := tx.StoreBlock(block)
|
err := dbTx.StoreBlock(block)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
}
|
}
|
||||||
@ -1683,10 +1683,10 @@ func testBlockIOTxInterface(tc *testContext) bool {
|
|||||||
|
|
||||||
// testClosedTxInterface ensures that both the metadata and block IO API
|
// testClosedTxInterface ensures that both the metadata and block IO API
|
||||||
// functions behave as expected when attempted against a closed transaction.
|
// functions behave as expected when attempted against a closed transaction.
|
||||||
func testClosedTxInterface(tc *testContext, tx database.Tx) bool {
|
func testClosedTxInterface(tc *testContext, dbTx database.Tx) bool {
|
||||||
wantErrCode := database.ErrTxClosed
|
wantErrCode := database.ErrTxClosed
|
||||||
bucket := tx.Metadata()
|
bucket := dbTx.Metadata()
|
||||||
cursor := tx.Metadata().Cursor()
|
cursor := dbTx.Metadata().Cursor()
|
||||||
bucketName := []byte("closedtxbucket")
|
bucketName := []byte("closedtxbucket")
|
||||||
keyName := []byte("closedtxkey")
|
keyName := []byte("closedtxkey")
|
||||||
|
|
||||||
@ -1852,21 +1852,21 @@ func testClosedTxInterface(tc *testContext, tx database.Tx) bool {
|
|||||||
|
|
||||||
// Ensure StoreBlock returns expected error.
|
// Ensure StoreBlock returns expected error.
|
||||||
testName = "StoreBlock on closed tx"
|
testName = "StoreBlock on closed tx"
|
||||||
err = tx.StoreBlock(block)
|
err = dbTx.StoreBlock(block)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure FetchBlock returns expected error.
|
// Ensure FetchBlock returns expected error.
|
||||||
testName = fmt.Sprintf("FetchBlock #%d on closed tx", i)
|
testName = fmt.Sprintf("FetchBlock #%d on closed tx", i)
|
||||||
_, err = tx.FetchBlock(blockHash)
|
_, err = dbTx.FetchBlock(blockHash)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure FetchBlockHeader returns expected error.
|
// Ensure FetchBlockHeader returns expected error.
|
||||||
testName = fmt.Sprintf("FetchBlockHeader #%d on closed tx", i)
|
testName = fmt.Sprintf("FetchBlockHeader #%d on closed tx", i)
|
||||||
_, err = tx.FetchBlockHeader(blockHash)
|
_, err = dbTx.FetchBlockHeader(blockHash)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -1879,14 +1879,14 @@ func testClosedTxInterface(tc *testContext, tx database.Tx) bool {
|
|||||||
Len: uint32(txLocs[0].TxLen),
|
Len: uint32(txLocs[0].TxLen),
|
||||||
}
|
}
|
||||||
allBlockRegions[i] = region
|
allBlockRegions[i] = region
|
||||||
_, err = tx.FetchBlockRegion(®ion)
|
_, err = dbTx.FetchBlockRegion(®ion)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure HasBlock returns expected error.
|
// Ensure HasBlock returns expected error.
|
||||||
testName = fmt.Sprintf("HasBlock #%d on closed tx", i)
|
testName = fmt.Sprintf("HasBlock #%d on closed tx", i)
|
||||||
_, err = tx.HasBlock(blockHash)
|
_, err = dbTx.HasBlock(blockHash)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -1898,28 +1898,28 @@ func testClosedTxInterface(tc *testContext, tx database.Tx) bool {
|
|||||||
|
|
||||||
// Ensure FetchBlocks returns expected error.
|
// Ensure FetchBlocks returns expected error.
|
||||||
testName = "FetchBlocks on closed tx"
|
testName = "FetchBlocks on closed tx"
|
||||||
_, err = tx.FetchBlocks(allBlockHashes)
|
_, err = dbTx.FetchBlocks(allBlockHashes)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure FetchBlockHeaders returns expected error.
|
// Ensure FetchBlockHeaders returns expected error.
|
||||||
testName = "FetchBlockHeaders on closed tx"
|
testName = "FetchBlockHeaders on closed tx"
|
||||||
_, err = tx.FetchBlockHeaders(allBlockHashes)
|
_, err = dbTx.FetchBlockHeaders(allBlockHashes)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure FetchBlockRegions returns expected error.
|
// Ensure FetchBlockRegions returns expected error.
|
||||||
testName = "FetchBlockRegions on closed tx"
|
testName = "FetchBlockRegions on closed tx"
|
||||||
_, err = tx.FetchBlockRegions(allBlockRegions)
|
_, err = dbTx.FetchBlockRegions(allBlockRegions)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure HasBlocks returns expected error.
|
// Ensure HasBlocks returns expected error.
|
||||||
testName = "HasBlocks on closed tx"
|
testName = "HasBlocks on closed tx"
|
||||||
_, err = tx.HasBlocks(allBlockHashes)
|
_, err = dbTx.HasBlocks(allBlockHashes)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -1930,11 +1930,11 @@ func testClosedTxInterface(tc *testContext, tx database.Tx) bool {
|
|||||||
|
|
||||||
// Ensure that attempting to rollback or commit a transaction that is
|
// Ensure that attempting to rollback or commit a transaction that is
|
||||||
// already closed returns the expected error.
|
// already closed returns the expected error.
|
||||||
err = tx.Rollback()
|
err = dbTx.Rollback()
|
||||||
if !checkDbError(tc.t, "closed tx rollback", err, wantErrCode) {
|
if !checkDbError(tc.t, "closed tx rollback", err, wantErrCode) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
err = tx.Commit()
|
err = dbTx.Commit()
|
||||||
return checkDbError(tc.t, "closed tx commit", err, wantErrCode)
|
return checkDbError(tc.t, "closed tx commit", err, wantErrCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2003,8 +2003,8 @@ func testConcurrecy(tc *testContext) bool {
|
|||||||
// help prevent durations that are too short which would cause erroneous
|
// help prevent durations that are too short which would cause erroneous
|
||||||
// test failures on slower systems.
|
// test failures on slower systems.
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
err := tc.db.View(func(tx database.Tx) error {
|
err := tc.db.View(func(dbTx database.Tx) error {
|
||||||
_, err := tx.FetchBlock(tc.blocks[0].Hash())
|
_, err := dbTx.FetchBlock(tc.blocks[0].Hash())
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2024,9 +2024,9 @@ func testConcurrecy(tc *testContext) bool {
|
|||||||
numReaders := len(tc.blocks)
|
numReaders := len(tc.blocks)
|
||||||
resultChan := make(chan bool, numReaders)
|
resultChan := make(chan bool, numReaders)
|
||||||
reader := func(blockNum int) {
|
reader := func(blockNum int) {
|
||||||
err := tc.db.View(func(tx database.Tx) error {
|
err := tc.db.View(func(dbTx database.Tx) error {
|
||||||
time.Sleep(sleepTime)
|
time.Sleep(sleepTime)
|
||||||
_, err := tx.FetchBlock(tc.blocks[blockNum].Hash())
|
_, err := dbTx.FetchBlock(tc.blocks[blockNum].Hash())
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2092,7 +2092,7 @@ func testConcurrecy(tc *testContext) bool {
|
|||||||
started := make(chan struct{})
|
started := make(chan struct{})
|
||||||
writeComplete := make(chan struct{})
|
writeComplete := make(chan struct{})
|
||||||
reader = func(blockNum int) {
|
reader = func(blockNum int) {
|
||||||
err := tc.db.View(func(tx database.Tx) error {
|
err := tc.db.View(func(dbTx database.Tx) error {
|
||||||
started <- struct{}{}
|
started <- struct{}{}
|
||||||
|
|
||||||
// Wait for the writer to complete.
|
// Wait for the writer to complete.
|
||||||
@ -2100,7 +2100,7 @@ func testConcurrecy(tc *testContext) bool {
|
|||||||
|
|
||||||
// Since this reader was created before the write took
|
// Since this reader was created before the write took
|
||||||
// place, the data it added should not be visible.
|
// place, the data it added should not be visible.
|
||||||
val := tx.Metadata().Get(concurrentKey)
|
val := dbTx.Metadata().Get(concurrentKey)
|
||||||
if val != nil {
|
if val != nil {
|
||||||
return fmt.Errorf("%s should not be visible",
|
return fmt.Errorf("%s should not be visible",
|
||||||
concurrentKey)
|
concurrentKey)
|
||||||
@ -2124,8 +2124,8 @@ func testConcurrecy(tc *testContext) bool {
|
|||||||
// All readers are started and waiting for completion of the writer.
|
// All readers are started and waiting for completion of the writer.
|
||||||
// Set some data the readers are expecting to not find and signal the
|
// Set some data the readers are expecting to not find and signal the
|
||||||
// readers the write is done by closing the writeComplete channel.
|
// readers the write is done by closing the writeComplete channel.
|
||||||
err = tc.db.Update(func(tx database.Tx) error {
|
err = tc.db.Update(func(dbTx database.Tx) error {
|
||||||
return tx.Metadata().Put(concurrentKey, concurrentVal)
|
return dbTx.Metadata().Put(concurrentKey, concurrentVal)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("Unexpected error in update: %v", err)
|
tc.t.Errorf("Unexpected error in update: %v", err)
|
||||||
@ -2145,7 +2145,7 @@ func testConcurrecy(tc *testContext) bool {
|
|||||||
// can be active at a time.
|
// can be active at a time.
|
||||||
writeSleepTime := time.Millisecond * 250
|
writeSleepTime := time.Millisecond * 250
|
||||||
writer := func() {
|
writer := func() {
|
||||||
err := tc.db.Update(func(tx database.Tx) error {
|
err := tc.db.Update(func(dbTx database.Tx) error {
|
||||||
time.Sleep(writeSleepTime)
|
time.Sleep(writeSleepTime)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@ -2195,7 +2195,7 @@ func testConcurrentClose(tc *testContext) bool {
|
|||||||
finishReaders := make(chan struct{})
|
finishReaders := make(chan struct{})
|
||||||
resultChan := make(chan bool, numReaders+1)
|
resultChan := make(chan bool, numReaders+1)
|
||||||
reader := func() {
|
reader := func() {
|
||||||
err := tc.db.View(func(tx database.Tx) error {
|
err := tc.db.View(func(dbTx database.Tx) error {
|
||||||
atomic.AddInt32(&activeReaders, 1)
|
atomic.AddInt32(&activeReaders, 1)
|
||||||
started <- struct{}{}
|
started <- struct{}{}
|
||||||
<-finishReaders
|
<-finishReaders
|
||||||
|
@ -60,8 +60,8 @@ func reconcileDB(pdb *db, create bool) (database.DB, error) {
|
|||||||
|
|
||||||
// Load the current write cursor position from the metadata.
|
// Load the current write cursor position from the metadata.
|
||||||
var curFileNum, curOffset uint32
|
var curFileNum, curOffset uint32
|
||||||
err := pdb.View(func(tx database.Tx) error {
|
err := pdb.View(func(dbTx database.Tx) error {
|
||||||
writeRow := tx.Metadata().Get(writeLocKeyName)
|
writeRow := dbTx.Metadata().Get(writeLocKeyName)
|
||||||
if writeRow == nil {
|
if writeRow == nil {
|
||||||
str := "write cursor does not exist"
|
str := "write cursor does not exist"
|
||||||
return makeDbErr(database.ErrCorruption, str, nil)
|
return makeDbErr(database.ErrCorruption, str, nil)
|
||||||
|
180
database/ffldb/reconcile_test.go
Normal file
180
database/ffldb/reconcile_test.go
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
package ffldb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"bou.ke/monkey"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSerializeWriteRow(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
curBlockFileNum uint32
|
||||||
|
curFileOffset uint32
|
||||||
|
expectedWriteRow []byte
|
||||||
|
}{
|
||||||
|
// WriteRow format:
|
||||||
|
// First 4 bytes: curBlockFileNum
|
||||||
|
// Next 4 bytes: curFileOffset
|
||||||
|
// Next 4 bytes: Castagnoli CRC-32 checksum
|
||||||
|
// One can easily calculate checksums using the following code:
|
||||||
|
// https://play.golang.org/p/zoMKT-ORyF9
|
||||||
|
{0, 0, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xB2, 0x28, 0x8C}},
|
||||||
|
{10, 11, []byte{0x0A, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0xC1, 0xA6, 0x0D, 0xC8}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
actualWriteRow := serializeWriteRow(test.curBlockFileNum, test.curFileOffset)
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(test.expectedWriteRow, actualWriteRow) {
|
||||||
|
t.Errorf("TestSerializeWriteRow: %d: Expected: %v, but got: %v",
|
||||||
|
i, test.expectedWriteRow, actualWriteRow)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeserializeWriteRow(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
writeRow []byte
|
||||||
|
expectedCurBlockFileNum uint32
|
||||||
|
expectedCurFileOffset uint32
|
||||||
|
expectedError bool
|
||||||
|
}{
|
||||||
|
// WriteRow format:
|
||||||
|
// First 4 bytes: curBlockFileNum
|
||||||
|
// Next 4 bytes: curFileOffset
|
||||||
|
// Next 4 bytes: Castagnoli CRC-32 checksum
|
||||||
|
// One can easily calculate checksums using the following code:
|
||||||
|
// https://play.golang.org/p/zoMKT-ORyF9
|
||||||
|
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xB2, 0x28, 0x8C}, 0, 0, false},
|
||||||
|
{[]byte{0x0A, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0xC1, 0xA6, 0x0D, 0xC8}, 10, 11, false},
|
||||||
|
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8A, 0xB2, 0x28, 0x8D}, 0, 0, true},
|
||||||
|
{[]byte{0x0A, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 0, 0, true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
actualCurBlockFileNum, actualCurFileOffset, err := deserializeWriteRow(test.writeRow)
|
||||||
|
|
||||||
|
if (err != nil) != test.expectedError {
|
||||||
|
t.Errorf("TestDeserializeWriteRow: %d: Expected error status: %t, but got: %t",
|
||||||
|
i, test.expectedError, err != nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
if test.expectedCurBlockFileNum != actualCurBlockFileNum {
|
||||||
|
t.Errorf("TestDeserializeWriteRow: %d: Expected curBlockFileNum: %d, but got: %d",
|
||||||
|
i, test.expectedCurBlockFileNum, actualCurBlockFileNum)
|
||||||
|
}
|
||||||
|
|
||||||
|
if test.expectedCurFileOffset != actualCurFileOffset {
|
||||||
|
t.Errorf("TestDeserializeWriteRow: %d: Expected curFileOffset: %d, but got: %d",
|
||||||
|
i, test.expectedCurFileOffset, actualCurFileOffset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// setWriteRow is a low-level helper method to update the write row in the
|
||||||
|
// metadata bucket to enable certain test-cases in TestReconcileErrors
|
||||||
|
// if writeRow = nil deletes the write row altogether
|
||||||
|
func setWriteRow(pdb *db, writeRow []byte, t *testing.T) {
|
||||||
|
tx, err := pdb.begin(true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestReconcileErrors: Error getting tx for setting "+
|
||||||
|
"writeLoc in metadata: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if writeRow == nil {
|
||||||
|
tx.metaBucket.Delete(writeLocKeyName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestReconcileErrors: Error deleting writeLoc from metadata: %s",
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tx.metaBucket.Put(writeLocKeyName, writeRow)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestReconcileErrors: Error updating writeLoc in metadata: %s",
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = pdb.cache.commitTx(tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("TestReconcileErrors: Error commiting the update of "+
|
||||||
|
"writeLoc in metadata: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pdb.writeLock.Unlock()
|
||||||
|
pdb.closeLock.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestReconcileErrors tests all error-cases in reconclieDB.
|
||||||
|
// The non-error-cases are tested in the more general tests.
|
||||||
|
func TestReconcileErrors(t *testing.T) {
|
||||||
|
// Set-up tests
|
||||||
|
pdb := newTestDb("TestReconcileErrors", t)
|
||||||
|
|
||||||
|
// Test without writeLoc
|
||||||
|
setWriteRow(pdb, nil, t)
|
||||||
|
_, err := reconcileDB(pdb, false)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("TestReconcileErrors: ReconcileDB() didn't error out when " +
|
||||||
|
"running without a writeRowLoc")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test with writeLoc in metadata after the actual cursor position
|
||||||
|
setWriteRow(pdb, serializeWriteRow(1, 0), t)
|
||||||
|
_, err = reconcileDB(pdb, false)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("TestReconcileErrors: ReconcileDB() didn't error out when " +
|
||||||
|
"curBlockFileNum after the actual cursor position")
|
||||||
|
}
|
||||||
|
setWriteRow(pdb, serializeWriteRow(0, 1), t)
|
||||||
|
_, err = reconcileDB(pdb, false)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("TestReconcileErrors: ReconcileDB() didn't error out when " +
|
||||||
|
"curFileOffset after the actual cursor position")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore previous writeRow
|
||||||
|
setWriteRow(pdb, serializeWriteRow(0, 0), t)
|
||||||
|
|
||||||
|
// Test with writeLoc in metadata before the actual cursor position
|
||||||
|
handleRollbackCalled := false
|
||||||
|
patch := monkey.Patch((*blockStore).handleRollback,
|
||||||
|
func(s *blockStore, oldBlockFileNum, oldBlockOffset uint32) {
|
||||||
|
handleRollbackCalled = true
|
||||||
|
})
|
||||||
|
defer patch.Unpatch()
|
||||||
|
|
||||||
|
pdb.store.writeCursor.curFileNum = 1
|
||||||
|
_, err = reconcileDB(pdb, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("TestReconcileErrors: Error in ReconcileDB() when curFileNum before " +
|
||||||
|
"the actual cursor position ")
|
||||||
|
}
|
||||||
|
if !handleRollbackCalled {
|
||||||
|
t.Errorf("TestReconcileErrors: handleRollback was not called when curFileNum before " +
|
||||||
|
"the actual cursor position ")
|
||||||
|
}
|
||||||
|
|
||||||
|
pdb.store.writeCursor.curFileNum = 0
|
||||||
|
pdb.store.writeCursor.curOffset = 1
|
||||||
|
_, err = reconcileDB(pdb, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("TestReconcileErrors: Error in ReconcileDB() when curOffset before " +
|
||||||
|
"the actual cursor position ")
|
||||||
|
}
|
||||||
|
if !handleRollbackCalled {
|
||||||
|
t.Errorf("TestReconcileErrors: handleRollback was not called when curOffset before " +
|
||||||
|
"the actual cursor position ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore previous writeCursor location
|
||||||
|
pdb.store.writeCursor.curFileNum = 0
|
||||||
|
pdb.store.writeCursor.curOffset = 0
|
||||||
|
// Test create with closed DB to force initDB to fail
|
||||||
|
pdb.Close()
|
||||||
|
_, err = reconcileDB(pdb, true)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("ReconcileDB didn't error out when running with closed db and create = true")
|
||||||
|
}
|
||||||
|
}
|
@ -231,7 +231,7 @@ func TestCornerCases(t *testing.T) {
|
|||||||
// properly.
|
// properly.
|
||||||
testName = "View: underlying leveldb error"
|
testName = "View: underlying leveldb error"
|
||||||
wantErrCode = database.ErrDbNotOpen
|
wantErrCode = database.ErrDbNotOpen
|
||||||
err = idb.View(func(tx database.Tx) error {
|
err = idb.View(func(dbTx database.Tx) error {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if !checkDbError(t, testName, err, wantErrCode) {
|
if !checkDbError(t, testName, err, wantErrCode) {
|
||||||
@ -241,7 +241,7 @@ func TestCornerCases(t *testing.T) {
|
|||||||
// Ensure the Update handles errors in the underlying leveldb database
|
// Ensure the Update handles errors in the underlying leveldb database
|
||||||
// properly.
|
// properly.
|
||||||
testName = "Update: underlying leveldb error"
|
testName = "Update: underlying leveldb error"
|
||||||
err = idb.Update(func(tx database.Tx) error {
|
err = idb.Update(func(dbTx database.Tx) error {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if !checkDbError(t, testName, err, wantErrCode) {
|
if !checkDbError(t, testName, err, wantErrCode) {
|
||||||
@ -253,13 +253,13 @@ func TestCornerCases(t *testing.T) {
|
|||||||
// test context including all metadata and the mock files.
|
// test context including all metadata and the mock files.
|
||||||
func resetDatabase(tc *testContext) bool {
|
func resetDatabase(tc *testContext) bool {
|
||||||
// Reset the metadata.
|
// Reset the metadata.
|
||||||
err := tc.db.Update(func(tx database.Tx) error {
|
err := tc.db.Update(func(dbTx database.Tx) error {
|
||||||
// Remove all the keys using a cursor while also generating a
|
// Remove all the keys using a cursor while also generating a
|
||||||
// list of buckets. It's not safe to remove keys during ForEach
|
// list of buckets. It's not safe to remove keys during ForEach
|
||||||
// iteration nor is it safe to remove buckets during cursor
|
// iteration nor is it safe to remove buckets during cursor
|
||||||
// iteration, so this dual approach is needed.
|
// iteration, so this dual approach is needed.
|
||||||
var bucketNames [][]byte
|
var bucketNames [][]byte
|
||||||
cursor := tx.Metadata().Cursor()
|
cursor := dbTx.Metadata().Cursor()
|
||||||
for ok := cursor.First(); ok; ok = cursor.Next() {
|
for ok := cursor.First(); ok; ok = cursor.Next() {
|
||||||
if cursor.Value() != nil {
|
if cursor.Value() != nil {
|
||||||
if err := cursor.Delete(); err != nil {
|
if err := cursor.Delete(); err != nil {
|
||||||
@ -272,12 +272,12 @@ func resetDatabase(tc *testContext) bool {
|
|||||||
|
|
||||||
// Remove the buckets.
|
// Remove the buckets.
|
||||||
for _, k := range bucketNames {
|
for _, k := range bucketNames {
|
||||||
if err := tx.Metadata().DeleteBucket(k); err != nil {
|
if err := dbTx.Metadata().DeleteBucket(k); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := tx.Metadata().CreateBucket(blockIdxBucketName)
|
_, err := dbTx.Metadata().CreateBucket(blockIdxBucketName)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -360,9 +360,9 @@ func testWriteFailures(tc *testContext) bool {
|
|||||||
// file that fails the write fails when the transaction is
|
// file that fails the write fails when the transaction is
|
||||||
// committed, not when the block is stored.
|
// committed, not when the block is stored.
|
||||||
tc.maxFileSizes = map[uint32]int64{test.fileNum: test.maxSize}
|
tc.maxFileSizes = map[uint32]int64{test.fileNum: test.maxSize}
|
||||||
err := tc.db.Update(func(tx database.Tx) error {
|
err := tc.db.Update(func(dbTx database.Tx) error {
|
||||||
for i, block := range tc.blocks {
|
for i, block := range tc.blocks {
|
||||||
err := tx.StoreBlock(block)
|
err := dbTx.StoreBlock(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("StoreBlock (%d): unexpected "+
|
tc.t.Errorf("StoreBlock (%d): unexpected "+
|
||||||
"error: %v", i, err)
|
"error: %v", i, err)
|
||||||
@ -423,8 +423,8 @@ func testBlockFileErrors(tc *testContext) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Insert the first block into the mock file.
|
// Insert the first block into the mock file.
|
||||||
err = tc.db.Update(func(tx database.Tx) error {
|
err = tc.db.Update(func(dbTx database.Tx) error {
|
||||||
err := tx.StoreBlock(tc.blocks[0])
|
err := dbTx.StoreBlock(tc.blocks[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("StoreBlock: unexpected error: %v", err)
|
tc.t.Errorf("StoreBlock: unexpected error: %v", err)
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
@ -464,10 +464,10 @@ func testBlockFileErrors(tc *testContext) bool {
|
|||||||
|
|
||||||
// Ensure failures in FetchBlock and FetchBlockRegion(s) since the
|
// Ensure failures in FetchBlock and FetchBlockRegion(s) since the
|
||||||
// underlying file they need to read from has been closed.
|
// underlying file they need to read from has been closed.
|
||||||
err = tc.db.View(func(tx database.Tx) error {
|
err = tc.db.View(func(dbTx database.Tx) error {
|
||||||
testName = "FetchBlock closed file"
|
testName = "FetchBlock closed file"
|
||||||
wantErrCode := database.ErrDriverSpecific
|
wantErrCode := database.ErrDriverSpecific
|
||||||
_, err := tx.FetchBlock(block0Hash)
|
_, err := dbTx.FetchBlock(block0Hash)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
}
|
}
|
||||||
@ -480,13 +480,13 @@ func testBlockFileErrors(tc *testContext) bool {
|
|||||||
Offset: 0,
|
Offset: 0,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err = tx.FetchBlockRegion(®ions[0])
|
_, err = dbTx.FetchBlockRegion(®ions[0])
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
}
|
}
|
||||||
|
|
||||||
testName = "FetchBlockRegions closed file"
|
testName = "FetchBlockRegions closed file"
|
||||||
_, err = tx.FetchBlockRegions(regions)
|
_, err = dbTx.FetchBlockRegions(regions)
|
||||||
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
if !checkDbError(tc.t, testName, err, wantErrCode) {
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
}
|
}
|
||||||
@ -511,8 +511,8 @@ func testCorruption(tc *testContext) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Insert the first block into the mock file.
|
// Insert the first block into the mock file.
|
||||||
err := tc.db.Update(func(tx database.Tx) error {
|
err := tc.db.Update(func(dbTx database.Tx) error {
|
||||||
err := tx.StoreBlock(tc.blocks[0])
|
err := dbTx.StoreBlock(tc.blocks[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.t.Errorf("StoreBlock: unexpected error: %v", err)
|
tc.t.Errorf("StoreBlock: unexpected error: %v", err)
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
@ -556,7 +556,7 @@ func testCorruption(tc *testContext) bool {
|
|||||||
// Random checksum byte.
|
// Random checksum byte.
|
||||||
{uint32(len(block0Bytes)) + 10, false, database.ErrCorruption},
|
{uint32(len(block0Bytes)) + 10, false, database.ErrCorruption},
|
||||||
}
|
}
|
||||||
err = tc.db.View(func(tx database.Tx) error {
|
err = tc.db.View(func(dbTx database.Tx) error {
|
||||||
data := tc.files[0].file.(*mockFile).data
|
data := tc.files[0].file.(*mockFile).data
|
||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
// Corrupt the byte at the offset by a single bit.
|
// Corrupt the byte at the offset by a single bit.
|
||||||
@ -574,7 +574,7 @@ func testCorruption(tc *testContext) bool {
|
|||||||
|
|
||||||
testName := fmt.Sprintf("FetchBlock (test #%d): "+
|
testName := fmt.Sprintf("FetchBlock (test #%d): "+
|
||||||
"corruption", i)
|
"corruption", i)
|
||||||
_, err := tx.FetchBlock(block0Hash)
|
_, err := dbTx.FetchBlock(block0Hash)
|
||||||
if !checkDbError(tc.t, testName, err, test.wantErrCode) {
|
if !checkDbError(tc.t, testName, err, test.wantErrCode) {
|
||||||
return errSubTestFail
|
return errSubTestFail
|
||||||
}
|
}
|
||||||
|
@ -2132,8 +2132,8 @@ func (s *Server) Start() {
|
|||||||
func (s *Server) Stop() error {
|
func (s *Server) Stop() error {
|
||||||
|
|
||||||
// Save fee estimator state in the database.
|
// Save fee estimator state in the database.
|
||||||
s.db.Update(func(tx database.Tx) error {
|
s.db.Update(func(dbTx database.Tx) error {
|
||||||
metadata := tx.Metadata()
|
metadata := dbTx.Metadata()
|
||||||
metadata.Put(mempool.EstimateFeeDatabaseKey, s.FeeEstimator.Save())
|
metadata.Put(mempool.EstimateFeeDatabaseKey, s.FeeEstimator.Save())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -2396,8 +2396,8 @@ func NewServer(listenAddrs []string, db database.DB, dagParams *dagconfig.Params
|
|||||||
|
|
||||||
// Search for a FeeEstimator state in the database. If none can be found
|
// Search for a FeeEstimator state in the database. If none can be found
|
||||||
// or if it cannot be loaded, create a new one.
|
// or if it cannot be loaded, create a new one.
|
||||||
db.Update(func(tx database.Tx) error {
|
db.Update(func(dbTx database.Tx) error {
|
||||||
metadata := tx.Metadata()
|
metadata := dbTx.Metadata()
|
||||||
feeEstimationData := metadata.Get(mempool.EstimateFeeDatabaseKey)
|
feeEstimationData := metadata.Get(mempool.EstimateFeeDatabaseKey)
|
||||||
if feeEstimationData != nil {
|
if feeEstimationData != nil {
|
||||||
// delete it from the database so that we don't try to restore the
|
// delete it from the database so that we don't try to restore the
|
||||||
|
@ -314,6 +314,9 @@ func scriptError(c ErrorCode, desc string) Error {
|
|||||||
// IsErrorCode returns whether or not the provided error is a script error with
|
// IsErrorCode returns whether or not the provided error is a script error with
|
||||||
// the provided error code.
|
// the provided error code.
|
||||||
func IsErrorCode(err error, c ErrorCode) bool {
|
func IsErrorCode(err error, c ErrorCode) bool {
|
||||||
serr, ok := err.(Error)
|
if err, ok := err.(Error); ok {
|
||||||
return ok && serr.ErrorCode == c
|
return err.ErrorCode == c
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user