[NOD-1413] Remove /cmd/addblock (#951)

This commit is contained in:
stasatdaglabs 2020-10-12 13:23:19 +03:00 committed by GitHub
parent e9951bc34a
commit 04ead57731
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 0 additions and 507 deletions

View File

@ -1,89 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"os"
"runtime"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/os/limits"
"github.com/kaspanet/kaspad/util/panics"
)
const (
// blockDBNamePrefix is the prefix for the kaspad block database.
blockDBNamePrefix = "blocks"
)
var (
cfg *ConfigFlags
log *logger.Logger
spawn func(string, func())
)
// realMain is the real main function for the utility. It is necessary to work
// around the fact that deferred functions do not run when os.Exit() is called.
func realMain() error {
// Load configuration and parse command line.
tcfg, _, err := loadConfig()
if err != nil {
return err
}
cfg = tcfg
// Setup logging.
backendLogger := logger.NewBackend()
defer os.Stdout.Sync()
log = backendLogger.Logger("MAIN")
spawn = panics.GoroutineWrapperFunc(log)
fi, err := os.Open(cfg.InFile)
if err != nil {
log.Errorf("Failed to open file %s: %s", cfg.InFile, err)
return err
}
defer fi.Close()
// Create a block importer for the database and input file and start it.
// The done channel returned from start will contain an error if
// anything went wrong.
importer, err := newBlockImporter(fi)
if err != nil {
log.Errorf("Failed create block importer: %s", err)
return err
}
// Perform the import asynchronously. This allows blocks to be
// processed and read in parallel. The results channel returned from
// Import contains the statistics about the import including an error
// if something went wrong.
log.Info("Starting import")
resultsChan := importer.Import()
results := <-resultsChan
if results.err != nil {
log.Errorf("%s", results.err)
return results.err
}
log.Infof("Processed a total of %d blocks (%d imported, %d already "+
"known)", results.blocksProcessed, results.blocksImported,
results.blocksProcessed-results.blocksImported)
return nil
}
func main() {
// Use all processor cores and up some limits.
runtime.GOMAXPROCS(runtime.NumCPU())
if err := limits.SetLimits(nil); err != nil {
os.Exit(1)
}
// Work around defer not working after os.Exit()
if err := realMain(); err != nil {
os.Exit(1)
}
}

View File

@ -1,97 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"fmt"
flags "github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"os"
"path/filepath"
)
const (
defaultDataFile = "bootstrap.dat"
defaultProgress = 10
)
var (
kaspadHomeDir = util.AppDataDir("kaspad", false)
defaultDataDir = filepath.Join(kaspadHomeDir, "data")
activeConfig *ConfigFlags
)
// ActiveConfig returns the active configuration struct
func ActiveConfig() *ConfigFlags {
return activeConfig
}
// ConfigFlags defines the configuration options for addblock.
//
// See loadConfig for details on the configuration load process.
type ConfigFlags struct {
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainFromBlock RPC available"`
config.NetworkFlags
}
// filesExists reports whether the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// loadConfig initializes and parses the config using command line options.
func loadConfig() (*ConfigFlags, []string, error) {
// Default config.
activeConfig = &ConfigFlags{
DataDir: defaultDataDir,
InFile: defaultDataFile,
Progress: defaultProgress,
}
// Parse command line options.
parser := flags.NewParser(&activeConfig, flags.Default)
remainingArgs, err := parser.Parse()
if err != nil {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
parser.WriteHelp(os.Stderr)
}
return nil, nil, err
}
err = activeConfig.ResolveNetwork(parser)
if err != nil {
return nil, nil, err
}
// Append the network type to the data directory so it is "namespaced"
// per network. In addition to the block database, there are other
// pieces of data that are saved to disk such as address manager state.
// All data is specific to a network, so namespacing the data directory
// means each individual piece of serialized data does not have to
// worry about changing names per network and such.
cfg.DataDir = filepath.Join(cfg.DataDir, ActiveConfig().NetParams().Name)
// Ensure the specified block file exists.
if !fileExists(cfg.InFile) {
str := "%s: The specified block file [%s] does not exist"
err := errors.Errorf(str, "loadConfig", cfg.InFile)
fmt.Fprintln(os.Stderr, err)
parser.WriteHelp(os.Stderr)
return nil, nil, err
}
return cfg, remainingArgs, nil
}

View File

@ -1,321 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"encoding/binary"
"github.com/kaspanet/kaspad/domain/blockdag/indexers"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"io"
"sync"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/util"
)
// importResults houses the stats and result as an import operation.
type importResults struct {
blocksProcessed int64
blocksImported int64
err error
}
// blockImporter houses information about an ongoing import from a block data
// file to the block database.
type blockImporter struct {
dag *blockdag.BlockDAG
r io.ReadSeeker
processQueue chan []byte
doneChan chan bool
errChan chan error
quit chan struct{}
wg sync.WaitGroup
blocksProcessed int64
blocksImported int64
receivedLogBlocks int64
receivedLogTx int64
lastHeight int64
lastBlockTime mstime.Time
lastLogTime mstime.Time
}
// readBlock reads the next block from the input file.
func (bi *blockImporter) readBlock() ([]byte, error) {
// The block file format is:
// <network> <block length> <serialized block>
var net uint32
err := binary.Read(bi.r, binary.LittleEndian, &net)
if err != nil {
if err != io.EOF {
return nil, err
}
// No block and no error means there are no more blocks to read.
return nil, nil
}
if net != uint32(ActiveConfig().NetParams().Net) {
return nil, errors.Errorf("network mismatch -- got %x, want %x",
net, uint32(ActiveConfig().NetParams().Net))
}
// Read the block length and ensure it is sane.
var blockLen uint32
if err := binary.Read(bi.r, binary.LittleEndian, &blockLen); err != nil {
return nil, err
}
if blockLen > appmessage.MaxMessagePayload {
return nil, errors.Errorf("block payload of %d bytes is larger "+
"than the max allowed %d bytes", blockLen,
appmessage.MaxMessagePayload)
}
serializedBlock := make([]byte, blockLen)
if _, err := io.ReadFull(bi.r, serializedBlock); err != nil {
return nil, err
}
return serializedBlock, nil
}
// processBlock potentially imports the block into the database. It first
// deserializes the raw block while checking for errors. Already known blocks
// are skipped and orphan blocks are considered errors. Finally, it runs the
// block through the DAG rules to ensure it follows all rules.
// Returns whether the block was imported along with any potential errors.
func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
// Deserialize the block which includes checks for malformed blocks.
block, err := util.NewBlockFromBytes(serializedBlock)
if err != nil {
return false, err
}
// update progress statistics
bi.lastBlockTime = block.MsgBlock().Header.Timestamp
bi.receivedLogTx += int64(len(block.MsgBlock().Transactions))
// Skip blocks that already exist.
blockHash := block.Hash()
if bi.dag.IsKnownBlock(blockHash) {
return false, nil
}
// Don't bother trying to process orphans.
parentHashes := block.MsgBlock().Header.ParentHashes
if len(parentHashes) > 0 {
if !bi.dag.AreKnownBlocks(parentHashes) {
return false, errors.Errorf("import file contains block "+
"%v which does not link to the available "+
"block DAG", parentHashes)
}
}
// Ensure the blocks follows all of the DAG rules.
isOrphan, isDelayed, err := bi.dag.ProcessBlock(block,
blockdag.BFFastAdd)
if err != nil {
return false, err
}
if isDelayed {
return false, errors.Errorf("import file contains a block that is too far in the future")
}
if isOrphan {
return false, errors.Errorf("import file contains an orphan "+
"block: %s", blockHash)
}
return true, nil
}
// readHandler is the main handler for reading blocks from the import file.
// This allows block processing to take place in parallel with block reads.
// It must be run as a goroutine.
func (bi *blockImporter) readHandler() {
out:
for {
// Read the next block from the file and if anything goes wrong
// notify the status handler with the error and bail.
serializedBlock, err := bi.readBlock()
if err != nil {
bi.errChan <- errors.Errorf("Error reading from input "+
"file: %s", err.Error())
break out
}
// A nil block with no error means we're done.
if serializedBlock == nil {
break out
}
// Send the block or quit if we've been signalled to exit by
// the status handler due to an error elsewhere.
select {
case bi.processQueue <- serializedBlock:
case <-bi.quit:
break out
}
}
// Close the processing channel to signal no more blocks are coming.
close(bi.processQueue)
bi.wg.Done()
}
// logProgress logs block progress as an information message. In order to
// prevent spam, it limits logging to one message every cfg.Progress seconds
// with duration and totals included.
func (bi *blockImporter) logProgress() {
bi.receivedLogBlocks++
now := mstime.Now()
duration := now.Sub(bi.lastLogTime)
if duration < time.Second*time.Duration(cfg.Progress) {
return
}
// Truncate the duration to 10s of milliseconds.
durationMillis := int64(duration / time.Millisecond)
tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)
// Log information about new block height.
blockStr := "blocks"
if bi.receivedLogBlocks == 1 {
blockStr = "block"
}
txStr := "transactions"
if bi.receivedLogTx == 1 {
txStr = "transaction"
}
log.Infof("Processed %d %s in the last %s (%d %s, height %d, %s)",
bi.receivedLogBlocks, blockStr, tDuration, bi.receivedLogTx,
txStr, bi.lastHeight, bi.lastBlockTime)
bi.receivedLogBlocks = 0
bi.receivedLogTx = 0
bi.lastLogTime = now
}
// processHandler is the main handler for processing blocks. This allows block
// processing to take place in parallel with block reads from the import file.
// It must be run as a goroutine.
func (bi *blockImporter) processHandler() {
out:
for {
select {
case serializedBlock, ok := <-bi.processQueue:
// We're done when the channel is closed.
if !ok {
break out
}
bi.blocksProcessed++
bi.lastHeight++
imported, err := bi.processBlock(serializedBlock)
if err != nil {
bi.errChan <- err
break out
}
if imported {
bi.blocksImported++
}
bi.logProgress()
case <-bi.quit:
break out
}
}
bi.wg.Done()
}
// statusHandler waits for updates from the import operation and notifies
// the passed doneChan with the results of the import. It also causes all
// goroutines to exit if an error is reported from any of them.
func (bi *blockImporter) statusHandler(resultsChan chan *importResults) {
select {
// An error from either of the goroutines means we're done so signal
// caller with the error and signal all goroutines to quit.
case err := <-bi.errChan:
resultsChan <- &importResults{
blocksProcessed: bi.blocksProcessed,
blocksImported: bi.blocksImported,
err: err,
}
close(bi.quit)
// The import finished normally.
case <-bi.doneChan:
resultsChan <- &importResults{
blocksProcessed: bi.blocksProcessed,
blocksImported: bi.blocksImported,
err: nil,
}
}
}
// Import is the core function which handles importing the blocks from the file
// associated with the block importer to the database. It returns a channel
// on which the results will be returned when the operation has completed.
func (bi *blockImporter) Import() chan *importResults {
// Start up the read and process handling goroutines. This setup allows
// blocks to be read from disk in parallel while being processed.
bi.wg.Add(2)
spawn("blockImporter.readHandler", bi.readHandler)
spawn("blockImporter.processHandler", bi.processHandler)
// Wait for the import to finish in a separate goroutine and signal
// the status handler when done.
spawn("blockImporter.sendToDoneChan", func() {
bi.wg.Wait()
bi.doneChan <- true
})
// Start the status handler and return the result channel that it will
// send the results on when the import is done.
resultChan := make(chan *importResults)
spawn("blockImporter.statusHandler", func() {
bi.statusHandler(resultChan)
})
return resultChan
}
// newBlockImporter returns a new importer for the provided file reader seeker
// and database.
func newBlockImporter(r io.ReadSeeker) (*blockImporter, error) {
// Create the acceptance index if needed.
var indexes []indexers.Indexer
if cfg.AcceptanceIndex {
log.Info("Acceptance index is enabled")
indexes = append(indexes, indexers.NewAcceptanceIndex())
}
// Create an index manager if any of the optional indexes are enabled.
var indexManager blockdag.IndexManager
if len(indexes) > 0 {
indexManager = indexers.NewManager(indexes)
}
dag, err := blockdag.New(&blockdag.Config{
DAGParams: ActiveConfig().NetParams(),
TimeSource: blockdag.NewTimeSource(),
IndexManager: indexManager,
})
if err != nil {
return nil, err
}
return &blockImporter{
r: r,
processQueue: make(chan []byte, 2),
doneChan: make(chan bool),
errChan: make(chan error),
quit: make(chan struct{}),
dag: dag,
lastLogTime: mstime.Now(),
}, nil
}