mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-03-30 15:08:33 +00:00

* [DEV-81] Overwrite maxOpenFiles for testInterface to force tests to check the LRU-mechanism in openFile * [DEV-81] Added database.UseLogger test * [DEV-81] Completed coverage of reconcileDB() * [DEV-81] Added some tests for dbcache * [DEV-81] Moved init and UseLogger to separate file to make them more easily-testable + added tests * [DEV-81] Added tests for deleteFile * [DEV-81] Added tests to cursor.Delete + made sure it returns error when transaction is not writable * [DEV-81] Moved database/error_test.go from database_test package to database package + added test for IsErrorCode * [DEV-81] Added tests for handleRollback error-cases * [DEV-81] Added tests for cursor.skipPendingUpdates * [DEV-81] Added tests for various cursor edge-cases * [DEV-81] tx.putKey no longer returns error, because there is no case when it does * [DEV-81] Added tests to CreateBucket error cases * [DEV-81] Added tests to bucket.Get and .Delete error cases + .Delete now returns error on empty key * [DEV-81] Added test for ForEachBucket * [DEV-81] Added tests to StoreBlock * [DEV-81] Added test for deleting a double nested bucket * [DEV-81] Removed log_test, as it is no longer necessary with the logging system re-design * [DEV-81] Added test to some of writePendingAndCommit error-cases * [DEV-81] Update references from btcutil to btcd/util * [DEV-81] Add tests for dbCacheIterator{.Next(), .Prev(), .Key, .Value()} in cases when iterator is exhausted * [DEV-81] Added tests for ldbIterator placeholder functions * [DEV-81] Added test name to Error messsages in TestSkipPendingUpdates * [DEV-81] Begin writing TestSkipPendingUpdatesCache * [DEV-81] Added error-cases for DBCache.flush() and DBCache.commitTreaps() * [DEV-81] Use monkey.patch from bou.ke and not from github * [DEV-81] Rewrote IsErrorCode in both database and txscript packages to be more concise * [DEV-81] Rename any database.Tx to dbTx instead of tx - to remove confusion with coin Tx * [DEV-81] Fix typo * [DEV-81] Use os.TempDir() instead of /tmp/ to be cross-platform * [DEV-81] use SimNet for database tests + Error if testDB exists after deleting it * [DEV-81] Removed useLogger - it's redundant * [DEV-81] Added comment on how CRC32 checksums are calculated in reconcile_test.go * [DEV-81] Added comment that explains what setWriteRow does * [DEV-81] Use constant instead of hard-coded value * [DEV-81] Fixed some typo's + better formatting
94 lines
2.3 KiB
Go
94 lines
2.3 KiB
Go
// Copyright (c) 2015-2016 The btcsuite developers
|
|
// Use of this source code is governed by an ISC
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package main
|
|
|
|
import (
|
|
"time"
|
|
|
|
"github.com/daglabs/btcd/dagconfig/daghash"
|
|
"github.com/daglabs/btcd/database"
|
|
)
|
|
|
|
// headersCmd defines the configuration options for the loadheaders command.
|
|
type headersCmd struct {
|
|
Bulk bool `long:"bulk" description:"Use bulk loading of headers instead of one at a time"`
|
|
}
|
|
|
|
var (
|
|
// headersCfg defines the configuration options for the command.
|
|
headersCfg = headersCmd{
|
|
Bulk: false,
|
|
}
|
|
)
|
|
|
|
// Execute is the main entry point for the command. It's invoked by the parser.
|
|
func (cmd *headersCmd) Execute(args []string) error {
|
|
// Setup the global config options and ensure they are valid.
|
|
if err := setupGlobalConfig(); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Load the block database.
|
|
db, err := loadBlockDB()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer db.Close()
|
|
|
|
// NOTE: This code will only work for ffldb. Ideally the package using
|
|
// the database would keep a metadata index of its own.
|
|
blockIdxName := []byte("ffldb-blockidx")
|
|
if !headersCfg.Bulk {
|
|
err = db.View(func(dbTx database.Tx) error {
|
|
totalHdrs := 0
|
|
blockIdxBucket := dbTx.Metadata().Bucket(blockIdxName)
|
|
blockIdxBucket.ForEach(func(k, v []byte) error {
|
|
totalHdrs++
|
|
return nil
|
|
})
|
|
log.Infof("Loading headers for %d blocks...", totalHdrs)
|
|
numLoaded := 0
|
|
startTime := time.Now()
|
|
blockIdxBucket.ForEach(func(k, v []byte) error {
|
|
var hash daghash.Hash
|
|
copy(hash[:], k)
|
|
_, err := dbTx.FetchBlockHeader(&hash)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
numLoaded++
|
|
return nil
|
|
})
|
|
log.Infof("Loaded %d headers in %v", numLoaded,
|
|
time.Since(startTime))
|
|
return nil
|
|
})
|
|
return err
|
|
}
|
|
|
|
// Bulk load headers.
|
|
err = db.View(func(dbTx database.Tx) error {
|
|
blockIdxBucket := dbTx.Metadata().Bucket(blockIdxName)
|
|
hashes := make([]daghash.Hash, 0, 500000)
|
|
blockIdxBucket.ForEach(func(k, v []byte) error {
|
|
var hash daghash.Hash
|
|
copy(hash[:], k)
|
|
hashes = append(hashes, hash)
|
|
return nil
|
|
})
|
|
|
|
log.Infof("Loading headers for %d blocks...", len(hashes))
|
|
startTime := time.Now()
|
|
hdrs, err := dbTx.FetchBlockHeaders(hashes)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
log.Infof("Loaded %d headers in %v", len(hdrs),
|
|
time.Since(startTime))
|
|
return nil
|
|
})
|
|
return err
|
|
}
|