[NOD-1119] Refactor main, and remove p2p layer from it (#785)

* [NOD-1119] Removed all p2p server from all the initialization of server

* [NOD-1119] Removed any calling for p2p server in main

* [NOD-1119] Simplified some functions to not take both dag and dagParams

* [NOD-1119] Simplify creation of mempool and rpc server

* [NOD-1119] Setup indexes in separate function

* [NOD-1119] Some cleanup in NewServer

* [NOD-1119] Fix mempool test

* [NOD-1119] Fix go format

* [NOD-1119] Unexport dag.timeSource

* [NOD-1119] Removed server package + renamed the Server object to Kaspad, and made it minimal

* [NOD-1119] Delete redundant functions

* Unexported kaspad and related methods

* [NOD-1119] Unexported newKaspad

* [NOD-1119] Revise comments and remove redundant function

* [NOD-1119] Make comments of unexported methods lower-case

* [NOD-1119] Some more refactoring in newKaspad
This commit is contained in:
Svarog 2020-07-06 18:00:28 +03:00 committed by GitHub
parent 580e37943b
commit 1a43cabfb9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 615 additions and 559 deletions

View File

@ -223,7 +223,7 @@ func (node *blockNode) isGenesis() bool {
}
func (node *blockNode) finalityScore(dag *BlockDAG) uint64 {
return node.blueScore / uint64(dag.dagParams.FinalityInterval)
return node.blueScore / uint64(dag.Params.FinalityInterval)
}
// String returns a string that contains the block hash.

View File

@ -177,7 +177,7 @@ func coinbaseOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
}
}
totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.dagParams) + totalFees
totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.Params) + totalFees
if totalReward == 0 {
return nil, nil

View File

@ -7,7 +7,6 @@ package blockdag
import (
"compress/bzip2"
"encoding/binary"
"github.com/kaspanet/kaspad/util/mstime"
"io"
"os"
"path/filepath"
@ -15,6 +14,8 @@ import (
"testing"
"time"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/dagconfig"
@ -87,7 +88,7 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
// TestSetCoinbaseMaturity makes the ability to set the coinbase maturity
// available when running tests.
func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
dag.dagParams.BlockCoinbaseMaturity = maturity
dag.Params.BlockCoinbaseMaturity = maturity
}
// newTestDAG returns a DAG that is usable for syntetic tests. It is
@ -98,7 +99,7 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
index := newBlockIndex(params)
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
dag := &BlockDAG{
dagParams: params,
Params: params,
timeSource: NewTimeSource(),
targetTimePerBlock: targetTimePerBlock,
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,

View File

@ -6,12 +6,13 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/util/mstime"
"math"
"sort"
"sync"
"time"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
@ -62,7 +63,7 @@ type BlockDAG struct {
// The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a
// separate mutex.
dagParams *dagconfig.Params
Params *dagconfig.Params
timeSource TimeSource
sigCache *txscript.SigCache
indexManager IndexManager
@ -889,7 +890,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
}
var nodesToDelete []*blockNode
if deleteDiffData {
nodesToDelete = make([]*blockNode, 0, dag.dagParams.FinalityInterval)
nodesToDelete = make([]*blockNode, 0, dag.Params.FinalityInterval)
}
for len(queue) > 0 {
var current *blockNode
@ -1341,7 +1342,7 @@ func (dag *BlockDAG) isSynced() bool {
var dagTimestamp int64
selectedTip := dag.selectedTip()
if selectedTip == nil {
dagTimestamp = dag.dagParams.GenesisBlock.Header.Timestamp.UnixMilliseconds()
dagTimestamp = dag.Params.GenesisBlock.Header.Timestamp.UnixMilliseconds()
} else {
dagTimestamp = selectedTip.timestamp
}
@ -2042,7 +2043,7 @@ func New(config *Config) (*BlockDAG, error) {
index := newBlockIndex(params)
dag := &BlockDAG{
dagParams: params,
Params: params,
timeSource: config.TimeSource,
sigCache: config.SigCache,
indexManager: config.IndexManager,
@ -2087,7 +2088,7 @@ func New(config *Config) (*BlockDAG, error) {
genesis, ok := index.LookupNode(params.GenesisHash)
if !ok {
genesisBlock := util.NewBlock(dag.dagParams.GenesisBlock)
genesisBlock := util.NewBlock(dag.Params.GenesisBlock)
// To prevent the creation of a new err variable unintentionally so the
// defered function above could read err - declare isOrphan and isDelayed explicitly.
var isOrphan, isDelayed bool

View File

@ -699,7 +699,7 @@ func TestConfirmations(t *testing.T) {
// Add a chain of blocks
chainBlocks := make([]*wire.MsgBlock, 5)
chainBlocks[0] = dag.dagParams.GenesisBlock
chainBlocks[0] = dag.Params.GenesisBlock
for i := uint32(1); i < 5; i++ {
chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1])
}
@ -808,7 +808,7 @@ func TestAcceptingBlock(t *testing.T) {
numChainBlocks := uint32(10)
chainBlocks := make([]*wire.MsgBlock, numChainBlocks)
chainBlocks[0] = dag.dagParams.GenesisBlock
chainBlocks[0] = dag.Params.GenesisBlock
for i := uint32(1); i <= numChainBlocks-1; i++ {
chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1])
}
@ -954,7 +954,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
flushUTXODiffStore()
return node
}
finalityInterval := dag.dagParams.FinalityInterval
finalityInterval := dag.Params.FinalityInterval
nodes := make([]*blockNode, 0, finalityInterval)
currentNode := dag.genesis
nodes = append(nodes, currentNode)

View File

@ -165,8 +165,8 @@ func saveDAGState(dbContext dbaccess.Context, state *dagState) error {
// genesis block and the node's local subnetwork id.
func (dag *BlockDAG) createDAGState(localSubnetworkID *subnetworkid.SubnetworkID) error {
return saveDAGState(dbaccess.NoTx(), &dagState{
TipHashes: []*daghash.Hash{dag.dagParams.GenesisHash},
LastFinalityPoint: dag.dagParams.GenesisHash,
TipHashes: []*daghash.Hash{dag.Params.GenesisHash},
LastFinalityPoint: dag.Params.GenesisHash,
LocalSubnetworkID: localSubnetworkID,
})
}
@ -293,7 +293,7 @@ func (dag *BlockDAG) initBlockIndex() (unprocessedBlockNodes []*blockNode, err e
}
if dag.blockCount == 0 {
if !node.hash.IsEqual(dag.dagParams.GenesisHash) {
if !node.hash.IsEqual(dag.Params.GenesisHash) {
return nil, errors.Errorf("Expected "+
"first entry in block index to be genesis block, "+
"found %s", node.hash)

View File

@ -34,7 +34,7 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ms
defer bigintpool.Release(newTarget)
windowTimeStampDifference := bigintpool.Acquire(windowMaxTimeStamp - windowMinTimestamp)
defer bigintpool.Release(windowTimeStampDifference)
targetTimePerBlock := bigintpool.Acquire(dag.dagParams.TargetTimePerBlock.Milliseconds())
targetTimePerBlock := bigintpool.Acquire(dag.Params.TargetTimePerBlock.Milliseconds())
defer bigintpool.Release(targetTimePerBlock)
difficultyAdjustmentWindowSize := bigintpool.Acquire(int64(dag.difficultyAdjustmentWindowSize))
defer bigintpool.Release(difficultyAdjustmentWindowSize)
@ -44,7 +44,7 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ms
Mul(newTarget, windowTimeStampDifference).
Div(newTarget, targetTimePerBlock).
Div(newTarget, difficultyAdjustmentWindowSize)
if newTarget.Cmp(dag.dagParams.PowMax) > 0 {
if newTarget.Cmp(dag.Params.PowMax) > 0 {
return dag.powMaxBits
}
newTargetBits := util.BigToCompact(newTarget)

View File

@ -2,11 +2,12 @@ package blockdag_test
import (
"fmt"
"github.com/pkg/errors"
"math"
"strings"
"testing"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/util/daghash"
@ -49,7 +50,7 @@ func TestFinality(t *testing.T) {
}
defer teardownFunc()
buildNodeToDag := func(parentHashes []*daghash.Hash) (*util.Block, error) {
msgBlock, err := mining.PrepareBlockForTest(dag, &params, parentHashes, nil, false)
msgBlock, err := mining.PrepareBlockForTest(dag, parentHashes, nil, false)
if err != nil {
return nil, err
}
@ -221,7 +222,7 @@ func TestChainedTransactions(t *testing.T) {
}
defer teardownFunc()
block1, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{params.GenesisHash}, nil, false)
block1, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{params.GenesisHash}, nil, false)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
@ -269,7 +270,7 @@ func TestChainedTransactions(t *testing.T) {
}
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
block2, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx}, false)
block2, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx}, false)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
@ -315,7 +316,7 @@ func TestChainedTransactions(t *testing.T) {
}
nonChainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{nonChainedTxIn}, []*wire.TxOut{nonChainedTxOut})
block3, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false)
block3, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
@ -372,7 +373,7 @@ func TestOrderInDiffFromAcceptanceData(t *testing.T) {
}
// Create the block
msgBlock, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{previousBlock.Hash()}, txs, false)
msgBlock, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{previousBlock.Hash()}, txs, false)
if err != nil {
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to prepare block: %s", err)
}
@ -429,7 +430,7 @@ func TestGasLimit(t *testing.T) {
cbTxs := []*wire.MsgTx{}
for i := 0; i < 4; i++ {
fundsBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), nil, false)
fundsBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), nil, false)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
@ -481,7 +482,7 @@ func TestGasLimit(t *testing.T) {
tx2 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx2In}, []*wire.TxOut{tx2Out}, subnetworkID, 10000, []byte{})
// Here we check that we can't process a block that has transactions that exceed the gas limit
overLimitBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true)
overLimitBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
@ -516,7 +517,7 @@ func TestGasLimit(t *testing.T) {
subnetworkID, math.MaxUint64, []byte{})
// Here we check that we can't process a block that its transactions' gas overflows uint64
overflowGasBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true)
overflowGasBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
@ -550,7 +551,7 @@ func TestGasLimit(t *testing.T) {
nonExistentSubnetworkTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{nonExistentSubnetworkTxIn},
[]*wire.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{})
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
@ -571,7 +572,7 @@ func TestGasLimit(t *testing.T) {
}
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit
validBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1}, true)
validBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*wire.MsgTx{tx1}, true)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}

View File

@ -78,13 +78,13 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
}
candidateAnticoneSize++
if candidateAnticoneSize > dag.dagParams.K {
if candidateAnticoneSize > dag.Params.K {
// k-cluster violation: The candidate's blue anticone exceeded k
possiblyBlue = false
break
}
if candidateBluesAnticoneSizes[block] == dag.dagParams.K {
if candidateBluesAnticoneSizes[block] == dag.Params.K {
// k-cluster violation: A block in candidate's blue anticone already
// has k blue blocks in its own anticone
possiblyBlue = false
@ -93,7 +93,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
// This is a sanity check that validates that a blue
// block's blue anticone is not already larger than K.
if candidateBluesAnticoneSizes[block] > dag.dagParams.K {
if candidateBluesAnticoneSizes[block] > dag.Params.K {
return nil, errors.New("found blue anticone size larger than k")
}
}
@ -109,7 +109,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
// The maximum length of node.blues can be K+1 because
// it contains the selected parent.
if dagconfig.KType(len(newNode.blues)) == dag.dagParams.K+1 {
if dagconfig.KType(len(newNode.blues)) == dag.Params.K+1 {
break
}
}

View File

@ -294,15 +294,15 @@ func TestBlueAnticoneSizeErrors(t *testing.T) {
defer teardownFunc()
// Prepare a block chain with size K beginning with the genesis block
currentBlockA := dag.dagParams.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
currentBlockA := dag.Params.GenesisBlock
for i := dagconfig.KType(0); i < dag.Params.K; i++ {
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockA)
currentBlockA = newBlock
}
// Prepare another block chain with size K beginning with the genesis block
currentBlockB := dag.dagParams.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
currentBlockB := dag.Params.GenesisBlock
for i := dagconfig.KType(0); i < dag.Params.K; i++ {
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockB)
currentBlockB = newBlock
}
@ -342,8 +342,8 @@ func TestGHOSTDAGErrors(t *testing.T) {
defer teardownFunc()
// Add two child blocks to the genesis
block1 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
block2 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
block1 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
block2 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
// Add a child block to the previous two blocks
block3 := prepareAndProcessBlockByParentMsgBlocks(t, dag, block1, block2)

View File

@ -221,7 +221,7 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
// The number K*2 was chosen since in peace times anticone is limited to K blocks,
// while some red block can make it a bit bigger, but much more than that indicates
// there might be some problem with the netsync process.
if flags&BFIsSync == BFIsSync && dagconfig.KType(len(dag.orphans)) < dag.dagParams.K*2 {
if flags&BFIsSync == BFIsSync && dagconfig.KType(len(dag.orphans)) < dag.Params.K*2 {
log.Debugf("Adding orphan block %s. This is normal part of netsync process", blockHash)
} else {
log.Infof("Adding orphan block %s", blockHash)

View File

@ -1,11 +1,12 @@
package blockdag
import (
"github.com/kaspanet/kaspad/util"
"path/filepath"
"testing"
"time"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/daghash"
)
@ -89,18 +90,18 @@ func TestProcessDelayedBlocks(t *testing.T) {
}
}()
initialTime := dag1.dagParams.GenesisBlock.Header.Timestamp
initialTime := dag1.Params.GenesisBlock.Header.Timestamp
// Here we use a fake time source that returns a timestamp
// one hour into the future to make delayedBlock artificially
// valid.
dag1.timeSource = newFakeTimeSource(initialTime.Add(time.Hour))
delayedBlock, err := PrepareBlockForTest(dag1, []*daghash.Hash{dag1.dagParams.GenesisBlock.BlockHash()}, nil)
delayedBlock, err := PrepareBlockForTest(dag1, []*daghash.Hash{dag1.Params.GenesisBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
blockDelay := time.Duration(dag1.dagParams.TimestampDeviationTolerance*uint64(dag1.targetTimePerBlock)+5) * time.Second
blockDelay := time.Duration(dag1.Params.TimestampDeviationTolerance*uint64(dag1.targetTimePerBlock)+5) * time.Second
delayedBlock.Header.Timestamp = initialTime.Add(blockDelay)
isOrphan, isDelayed, err := dag1.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
@ -177,7 +178,7 @@ func TestProcessDelayedBlocks(t *testing.T) {
t.Errorf("dag.IsKnownBlock should return true for a child of a delayed block")
}
blockBeforeDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()}, nil)
blockBeforeDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.Params.GenesisBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
@ -210,7 +211,7 @@ func TestProcessDelayedBlocks(t *testing.T) {
dag2.timeSource = newFakeTimeSource(initialTime.Add(timeUntilDelayedBlockIsValid))
blockAfterDelay, err := PrepareBlockForTest(dag2,
[]*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()},
[]*daghash.Hash{dag2.Params.GenesisBlock.BlockHash()},
nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)

View File

@ -903,7 +903,7 @@ func (rt *reachabilityTree) init(dbContext dbaccess.Context) error {
if !dbaccess.IsNotFoundError(err) {
return err
}
reindexRootHash = rt.dag.dagParams.GenesisHash
reindexRootHash = rt.dag.Params.GenesisHash
}
// Init the reindex root

View File

@ -52,7 +52,7 @@ func (dag *BlockDAG) IsSyncRateBelowThreshold(maxDeviation float64) bool {
return false
}
return dag.syncRate() < 1/dag.dagParams.TargetTimePerBlock.Seconds()*maxDeviation
return dag.syncRate() < 1/dag.Params.TargetTimePerBlock.Seconds()*maxDeviation
}
func (dag *BlockDAG) uptime() time.Duration {

View File

@ -249,7 +249,7 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
oldVirtual := SetVirtualForTest(dag, newVirtual)
defer SetVirtualForTest(dag, oldVirtual)
OpTrueAddr, err := opTrueAddress(dag.dagParams.Prefix)
OpTrueAddr, err := opTrueAddress(dag.Params.Prefix)
if err != nil {
return nil, err
}

View File

@ -297,11 +297,11 @@ func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
//
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) {
if deploymentID > uint32(len(dag.dagParams.Deployments)) {
if deploymentID > uint32(len(dag.Params.Deployments)) {
return ThresholdFailed, errors.Errorf("deployment ID %d does not exist", deploymentID)
}
deployment := &dag.dagParams.Deployments[deploymentID]
deployment := &dag.Params.Deployments[deploymentID]
checker := deploymentChecker{deployment: deployment, dag: dag}
cache := &dag.deploymentCaches[deploymentID]
@ -325,8 +325,8 @@ func (dag *BlockDAG) initThresholdCaches() error {
return err
}
}
for id := 0; id < len(dag.dagParams.Deployments); id++ {
deployment := &dag.dagParams.Deployments[id]
for id := 0; id < len(dag.Params.Deployments); id++ {
deployment := &dag.Params.Deployments[id]
cache := &dag.deploymentCaches[id]
checker := deploymentChecker{deployment: deployment, dag: dag}
_, err := dag.thresholdState(prevNode, checker, cache)

View File

@ -265,9 +265,9 @@ func (dag *BlockDAG) checkProofOfWork(header *wire.BlockHeader, flags BehaviorFl
}
// The target difficulty must be less than the maximum allowed.
if target.Cmp(dag.dagParams.PowMax) > 0 {
if target.Cmp(dag.Params.PowMax) > 0 {
str := fmt.Sprintf("block target difficulty of %064x is "+
"higher than max of %064x", target, dag.dagParams.PowMax)
"higher than max of %064x", target, dag.Params.PowMax)
return ruleError(ErrUnexpectedDifficulty, str)
}
@ -403,7 +403,7 @@ func (dag *BlockDAG) checkBlockHeaderSanity(block *util.Block, flags BehaviorFla
}
if len(header.ParentHashes) == 0 {
if !header.BlockHash().IsEqual(dag.dagParams.GenesisHash) {
if !header.BlockHash().IsEqual(dag.Params.GenesisHash) {
return 0, ruleError(ErrNoParents, "block has no parents")
}
} else {
@ -554,7 +554,7 @@ func (dag *BlockDAG) checkBlockTransactionOrder(block *util.Block) error {
func (dag *BlockDAG) checkNoNonNativeTransactions(block *util.Block) error {
// Disallow non-native/coinbase subnetworks in networks that don't allow them
if !dag.dagParams.EnableNonNativeSubnetworks {
if !dag.Params.EnableNonNativeSubnetworks {
transactions := block.Transactions()
for _, tx := range transactions {
if !(tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) ||
@ -939,7 +939,7 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
for _, tx := range transactions {
txFee, err := CheckTransactionInputsAndCalulateFee(tx, block.blueScore, pastUTXO,
dag.dagParams, fastAdd)
dag.Params, fastAdd)
if err != nil {
return nil, err
}

View File

@ -5,12 +5,13 @@
package blockdag
import (
"github.com/kaspanet/kaspad/util/mstime"
"math"
"path/filepath"
"testing"
"time"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/dagconfig"
@ -554,9 +555,9 @@ func TestValidateParents(t *testing.T) {
}
defer teardownFunc()
a := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
a := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
b := prepareAndProcessBlockByParentMsgBlocks(t, dag, a)
c := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
c := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.Params.GenesisBlock)
aNode := nodeByMsgBlock(t, dag, a)
bNode := nodeByMsgBlock(t, dag, b)

View File

@ -78,7 +78,7 @@ func (c bitConditionChecker) EndTime() uint64 {
//
// This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) RuleChangeActivationThreshold() uint64 {
return c.dag.dagParams.RuleChangeActivationThreshold
return c.dag.Params.RuleChangeActivationThreshold
}
// MinerConfirmationWindow is the number of blocks in each threshold state
@ -89,7 +89,7 @@ func (c bitConditionChecker) RuleChangeActivationThreshold() uint64 {
//
// This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) MinerConfirmationWindow() uint64 {
return c.dag.dagParams.MinerConfirmationWindow
return c.dag.Params.MinerConfirmationWindow
}
// Condition returns true when the specific bit associated with the checker is
@ -159,7 +159,7 @@ func (c deploymentChecker) EndTime() uint64 {
//
// This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) RuleChangeActivationThreshold() uint64 {
return c.dag.dagParams.RuleChangeActivationThreshold
return c.dag.Params.RuleChangeActivationThreshold
}
// MinerConfirmationWindow is the number of blocks in each threshold state
@ -170,7 +170,7 @@ func (c deploymentChecker) RuleChangeActivationThreshold() uint64 {
//
// This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) MinerConfirmationWindow() uint64 {
return c.dag.dagParams.MinerConfirmationWindow
return c.dag.Params.MinerConfirmationWindow
}
// Condition returns true when the specific bit defined by the deployment
@ -198,8 +198,8 @@ func (dag *BlockDAG) calcNextBlockVersion(prevNode *blockNode) (int32, error) {
// that is either in the process of being voted on, or locked in for the
// activation at the next threshold window change.
expectedVersion := uint32(vbTopBits)
for id := 0; id < len(dag.dagParams.Deployments); id++ {
deployment := &dag.dagParams.Deployments[id]
for id := 0; id < len(dag.Params.Deployments); id++ {
deployment := &dag.Params.Deployments[id]
cache := &dag.deploymentCaches[id]
checker := deploymentChecker{deployment: deployment, dag: dag}
state, err := dag.thresholdState(prevNode, checker, cache)

411
kaspad.go
View File

@ -1,291 +1,162 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"fmt"
_ "net/http/pprof"
"os"
"path/filepath"
"runtime"
"runtime/pprof"
"strings"
"time"
"github.com/kaspanet/kaspad/dbaccess"
"sync/atomic"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/limits"
"github.com/kaspanet/kaspad/server"
"github.com/kaspanet/kaspad/mempool"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/server/rpc"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/util/fs"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/util/profiling"
"github.com/kaspanet/kaspad/version"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
)
const (
// blockDbNamePrefix is the prefix for the block database name. The
// database type is appended to this value to form the full block
// database name.
blockDbNamePrefix = "blocks"
)
// kaspad is a wrapper for all the kaspad services
type kaspad struct {
rpcServer *rpc.Server
var (
cfg *config.Config
)
started, shutdown int32
}
// winServiceMain is only invoked on Windows. It detects when kaspad is running
// as a service and reacts accordingly.
var winServiceMain func() (bool, error)
// kaspadMain is the real main function for kaspad. It is necessary to work
// around the fact that deferred functions do not run when os.Exit() is called.
// The optional serverChan parameter is mainly used by the service code to be
// notified with the server once it is setup so it can gracefully stop it when
// requested from the service control manager.
func kaspadMain(serverChan chan<- *server.Server) error {
interrupt := signal.InterruptListener()
// Load configuration and parse command line. This function also
// initializes logging and configures it accordingly.
err := config.LoadAndSetActiveConfig()
if err != nil {
return err
// start launches all the kaspad services.
func (s *kaspad) start() {
// Already started?
if atomic.AddInt32(&s.started, 1) != 1 {
return
}
cfg = config.ActiveConfig()
defer panics.HandlePanic(kasdLog, nil)
// Get a channel that will be closed when a shutdown signal has been
// triggered either from an OS signal such as SIGINT (Ctrl+C) or from
// another subsystem such as the RPC server.
defer kasdLog.Info("Shutdown complete")
log.Trace("Starting kaspad")
// Show version at startup.
kasdLog.Infof("Version %s", version.Version())
cfg := config.ActiveConfig()
// Enable http profiling server if requested.
if cfg.Profile != "" {
if !cfg.DisableRPC {
s.rpcServer.Start()
}
}
// stop gracefully shuts down all the kaspad services.
func (s *kaspad) stop() error {
// Make sure this only happens once.
if atomic.AddInt32(&s.shutdown, 1) != 1 {
log.Infof("Kaspad is already in the process of shutting down")
return nil
}
log.Warnf("Kaspad shutting down")
// Shutdown the RPC server if it's not disabled.
if !config.ActiveConfig().DisableRPC {
s.rpcServer.Stop()
}
return nil
}
// newKaspad returns a new kaspad instance configured to listen on addr for the
// kaspa network type specified by dagParams. Use start to begin accepting
// connections from peers.
func newKaspad(listenAddrs []string, interrupt <-chan struct{}) (*kaspad, error) {
indexManager, acceptanceIndex := setupIndexes()
sigCache := txscript.NewSigCache(config.ActiveConfig().SigCacheMaxSize)
// Create a new block DAG instance with the appropriate configuration.
dag, err := setupDAG(interrupt, sigCache, indexManager)
if err != nil {
return nil, err
}
txMempool := setupMempool(dag, sigCache)
rpcServer, err := setupRPC(dag, txMempool, sigCache, acceptanceIndex)
if err != nil {
return nil, err
}
return &kaspad{
rpcServer: rpcServer,
}, nil
}
func setupDAG(interrupt <-chan struct{}, sigCache *txscript.SigCache, indexManager blockdag.IndexManager) (*blockdag.BlockDAG, error) {
dag, err := blockdag.New(&blockdag.Config{
Interrupt: interrupt,
DAGParams: config.ActiveConfig().NetParams(),
TimeSource: blockdag.NewTimeSource(),
SigCache: sigCache,
IndexManager: indexManager,
SubnetworkID: config.ActiveConfig().SubnetworkID,
})
return dag, err
}
func setupIndexes() (blockdag.IndexManager, *indexers.AcceptanceIndex) {
// Create indexes if needed.
var indexes []indexers.Indexer
var acceptanceIndex *indexers.AcceptanceIndex
if config.ActiveConfig().AcceptanceIndex {
log.Info("acceptance index is enabled")
indexes = append(indexes, acceptanceIndex)
}
// Create an index manager if any of the optional indexes are enabled.
if len(indexes) < 0 {
return nil, nil
}
indexManager := indexers.NewManager(indexes)
return indexManager, acceptanceIndex
}
func setupMempool(dag *blockdag.BlockDAG, sigCache *txscript.SigCache) *mempool.TxPool {
mempoolConfig := mempool.Config{
Policy: mempool.Policy{
AcceptNonStd: config.ActiveConfig().RelayNonStd,
MaxOrphanTxs: config.ActiveConfig().MaxOrphanTxs,
MaxOrphanTxSize: config.DefaultMaxOrphanTxSize,
MinRelayTxFee: config.ActiveConfig().MinRelayTxFee,
MaxTxVersion: 1,
},
CalcSequenceLockNoLock: func(tx *util.Tx, utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
return dag.CalcSequenceLockNoLock(tx, utxoSet, true)
},
IsDeploymentActive: dag.IsDeploymentActive,
SigCache: sigCache,
DAG: dag,
}
return mempool.New(&mempoolConfig)
}
func setupRPC(dag *blockdag.BlockDAG, txMempool *mempool.TxPool, sigCache *txscript.SigCache,
acceptanceIndex *indexers.AcceptanceIndex) (*rpc.Server, error) {
cfg := config.ActiveConfig()
if !cfg.DisableRPC {
policy := mining.Policy{
BlockMaxMass: cfg.BlockMaxMass,
}
blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy, txMempool, dag, sigCache)
rpcServer, err := rpc.NewRPCServer(dag, txMempool, acceptanceIndex, blockTemplateGenerator)
if err != nil {
return nil, err
}
// Signal process shutdown when the RPC server requests it.
spawn(func() {
profiling.Start(cfg.Profile, kasdLog)
<-rpcServer.RequestedProcessShutdown()
signal.ShutdownRequestChannel <- struct{}{}
})
return rpcServer, nil
}
// Write cpu profile if requested.
if cfg.CPUProfile != "" {
f, err := os.Create(cfg.CPUProfile)
if err != nil {
kasdLog.Errorf("Unable to create cpu profile: %s", err)
return err
}
pprof.StartCPUProfile(f)
defer f.Close()
defer pprof.StopCPUProfile()
}
// Perform upgrades to kaspad as new versions require it.
if err := doUpgrades(); err != nil {
kasdLog.Errorf("%s", err)
return err
}
// Return now if an interrupt signal was triggered.
if signal.InterruptRequested(interrupt) {
return nil
}
if cfg.ResetDatabase {
err := removeDatabase()
if err != nil {
kasdLog.Errorf("%s", err)
return err
}
}
// Open the database
err = openDB()
if err != nil {
kasdLog.Errorf("%s", err)
return err
}
defer func() {
kasdLog.Infof("Gracefully shutting down the database...")
err := dbaccess.Close()
if err != nil {
kasdLog.Errorf("Failed to close the database: %s", err)
}
}()
// Return now if an interrupt signal was triggered.
if signal.InterruptRequested(interrupt) {
return nil
}
// Drop indexes and exit if requested.
if cfg.DropAcceptanceIndex {
if err := indexers.DropAcceptanceIndex(); err != nil {
kasdLog.Errorf("%s", err)
return err
}
return nil
}
// Create server and start it.
server, err := server.NewServer(cfg.Listeners, config.ActiveConfig().NetParams(),
interrupt)
if err != nil {
kasdLog.Errorf("Unable to start server on %s: %+v",
strings.Join(cfg.Listeners, ", "), err)
return err
}
defer func() {
kasdLog.Infof("Gracefully shutting down the server...")
server.Stop()
shutdownDone := make(chan struct{})
go func() {
server.WaitForShutdown()
shutdownDone <- struct{}{}
}()
const shutdownTimeout = 2 * time.Minute
select {
case <-shutdownDone:
case <-time.After(shutdownTimeout):
kasdLog.Criticalf("Graceful shutdown timed out %s. Terminating...", shutdownTimeout)
}
srvrLog.Infof("Server shutdown complete")
}()
server.Start()
if serverChan != nil {
serverChan <- server
}
// Wait until the interrupt signal is received from an OS signal or
// shutdown is requested through one of the subsystems such as the RPC
// server.
<-interrupt
return nil
return nil, nil
}
func removeDatabase() error {
dbPath := blockDbPath(cfg.DbType)
return os.RemoveAll(dbPath)
}
// removeRegressionDB removes the existing regression test database if running
// in regression test mode and it already exists.
func removeRegressionDB(dbPath string) error {
// Don't do anything if not in regression test mode.
if !cfg.RegressionTest {
return nil
}
// Remove the old regression test database if it already exists.
fi, err := os.Stat(dbPath)
if err == nil {
kasdLog.Infof("Removing regression test database from '%s'", dbPath)
if fi.IsDir() {
err := os.RemoveAll(dbPath)
if err != nil {
return err
}
} else {
err := os.Remove(dbPath)
if err != nil {
return err
}
}
}
return nil
}
// dbPath returns the path to the block database given a database type.
func blockDbPath(dbType string) string {
// The database name is based on the database type.
dbName := blockDbNamePrefix + "_" + dbType
if dbType == "sqlite" {
dbName = dbName + ".db"
}
dbPath := filepath.Join(cfg.DataDir, dbName)
return dbPath
}
// warnMultipleDBs shows a warning if multiple block database types are detected.
// This is not a situation most users want. It is handy for development however
// to support multiple side-by-side databases.
func warnMultipleDBs() {
// This is intentionally not using the known db types which depend
// on the database types compiled into the binary since we want to
// detect legacy db types as well.
dbTypes := []string{"ffldb", "leveldb", "sqlite"}
duplicateDbPaths := make([]string, 0, len(dbTypes)-1)
for _, dbType := range dbTypes {
if dbType == cfg.DbType {
continue
}
// Store db path as a duplicate db if it exists.
dbPath := blockDbPath(dbType)
if fs.FileExists(dbPath) {
duplicateDbPaths = append(duplicateDbPaths, dbPath)
}
}
// Warn if there are extra databases.
if len(duplicateDbPaths) > 0 {
selectedDbPath := blockDbPath(cfg.DbType)
kasdLog.Warnf("WARNING: There are multiple block DAG databases "+
"using different database types.\nYou probably don't "+
"want to waste disk space by having more than one.\n"+
"Your current database is located at [%s].\nThe "+
"additional database is located at %s", selectedDbPath,
strings.Join(duplicateDbPaths, ", "))
}
}
func openDB() error {
dbPath := filepath.Join(cfg.DataDir, "db")
kasdLog.Infof("Loading database from '%s'", dbPath)
err := dbaccess.Open(dbPath)
if err != nil {
return err
}
return nil
}
func main() {
// Use all processor cores.
runtime.GOMAXPROCS(runtime.NumCPU())
// Up some limits.
if err := limits.SetLimits(); err != nil {
fmt.Fprintf(os.Stderr, "failed to set limits: %s\n", err)
os.Exit(1)
}
// Call serviceMain on Windows to handle running as a service. When
// the return isService flag is true, exit now since we ran as a
// service. Otherwise, just fall through to normal operation.
if runtime.GOOS == "windows" {
isService, err := winServiceMain()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if isService {
os.Exit(0)
}
}
// Work around defer not working after os.Exit()
if err := kaspadMain(nil); err != nil {
os.Exit(1)
}
// WaitForShutdown blocks until the main listener and peer handlers are stopped.
func (s *kaspad) WaitForShutdown() {
// TODO(libp2p)
// s.p2pServer.WaitForShutdown()
}

5
log.go
View File

@ -10,6 +10,5 @@ import (
"github.com/kaspanet/kaspad/util/panics"
)
var kasdLog, _ = logger.Get(logger.SubsystemTags.KASD)
var spawn = panics.GoroutineWrapperFunc(kasdLog)
var srvrLog, _ = logger.Get(logger.SubsystemTags.SRVR)
var log, _ = logger.Get(logger.SubsystemTags.KASD)
var spawn = panics.GoroutineWrapperFunc(log)

253
main.go Normal file
View File

@ -0,0 +1,253 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"fmt"
_ "net/http/pprof"
"os"
"path/filepath"
"runtime"
"runtime/pprof"
"strings"
"time"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/limits"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/util/profiling"
"github.com/kaspanet/kaspad/version"
)
const (
// blockDbNamePrefix is the prefix for the block database name. The
// database type is appended to this value to form the full block
// database name.
blockDbNamePrefix = "blocks"
)
var (
cfg *config.Config
)
// winServiceMain is only invoked on Windows. It detects when kaspad is running
// as a service and reacts accordingly.
var winServiceMain func() (bool, error)
// kaspadMain is the real main function for kaspad. It is necessary to work
// around the fact that deferred functions do not run when os.Exit() is called.
// The optional startedChan writes once all services has started.
func kaspadMain(startedChan chan<- struct{}) error {
interrupt := signal.InterruptListener()
// Load configuration and parse command line. This function also
// initializes logging and configures it accordingly.
err := config.LoadAndSetActiveConfig()
if err != nil {
return err
}
cfg = config.ActiveConfig()
defer panics.HandlePanic(log, nil)
// Get a channel that will be closed when a shutdown signal has been
// triggered either from an OS signal such as SIGINT (Ctrl+C) or from
// another subsystem such as the RPC server.
defer log.Info("Shutdown complete")
// Show version at startup.
log.Infof("Version %s", version.Version())
// Enable http profiling server if requested.
if cfg.Profile != "" {
spawn(func() {
profiling.Start(cfg.Profile, log)
})
}
// Write cpu profile if requested.
if cfg.CPUProfile != "" {
f, err := os.Create(cfg.CPUProfile)
if err != nil {
log.Errorf("Unable to create cpu profile: %s", err)
return err
}
pprof.StartCPUProfile(f)
defer f.Close()
defer pprof.StopCPUProfile()
}
// Perform upgrades to kaspad as new versions require it.
if err := doUpgrades(); err != nil {
log.Errorf("%s", err)
return err
}
// Return now if an interrupt signal was triggered.
if signal.InterruptRequested(interrupt) {
return nil
}
if cfg.ResetDatabase {
err := removeDatabase()
if err != nil {
log.Errorf("%s", err)
return err
}
}
// Open the database
err = openDB()
if err != nil {
log.Errorf("%s", err)
return err
}
defer func() {
log.Infof("Gracefully shutting down the database...")
err := dbaccess.Close()
if err != nil {
log.Errorf("Failed to close the database: %s", err)
}
}()
// Return now if an interrupt signal was triggered.
if signal.InterruptRequested(interrupt) {
return nil
}
// Drop indexes and exit if requested.
if cfg.DropAcceptanceIndex {
if err := indexers.DropAcceptanceIndex(); err != nil {
log.Errorf("%s", err)
return err
}
return nil
}
// Create kaspad and start it.
kaspad, err := newKaspad(cfg.Listeners, interrupt)
if err != nil {
log.Errorf("Unable to start kaspad on %s: %+v",
strings.Join(cfg.Listeners, ", "), err)
return err
}
defer func() {
log.Infof("Gracefully shutting down kaspad...")
kaspad.stop()
shutdownDone := make(chan struct{})
go func() {
kaspad.WaitForShutdown()
shutdownDone <- struct{}{}
}()
const shutdownTimeout = 2 * time.Minute
select {
case <-shutdownDone:
case <-time.After(shutdownTimeout):
log.Criticalf("Graceful shutdown timed out %s. Terminating...", shutdownTimeout)
}
log.Infof("Kaspad shutdown complete")
}()
kaspad.start()
if startedChan != nil {
startedChan <- struct{}{}
}
// Wait until the interrupt signal is received from an OS signal or
// shutdown is requested through one of the subsystems such as the RPC
// server.
<-interrupt
return nil
}
func removeDatabase() error {
dbPath := blockDbPath(cfg.DbType)
return os.RemoveAll(dbPath)
}
// removeRegressionDB removes the existing regression test database if running
// in regression test mode and it already exists.
func removeRegressionDB(dbPath string) error {
// Don't do anything if not in regression test mode.
if !cfg.RegressionTest {
return nil
}
// Remove the old regression test database if it already exists.
fi, err := os.Stat(dbPath)
if err == nil {
log.Infof("Removing regression test database from '%s'", dbPath)
if fi.IsDir() {
err := os.RemoveAll(dbPath)
if err != nil {
return err
}
} else {
err := os.Remove(dbPath)
if err != nil {
return err
}
}
}
return nil
}
// dbPath returns the path to the block database given a database type.
func blockDbPath(dbType string) string {
// The database name is based on the database type.
dbName := blockDbNamePrefix + "_" + dbType
if dbType == "sqlite" {
dbName = dbName + ".db"
}
dbPath := filepath.Join(cfg.DataDir, dbName)
return dbPath
}
func openDB() error {
dbPath := filepath.Join(cfg.DataDir, "db")
log.Infof("Loading database from '%s'", dbPath)
err := dbaccess.Open(dbPath)
if err != nil {
return err
}
return nil
}
func main() {
// Use all processor cores.
runtime.GOMAXPROCS(runtime.NumCPU())
// Up some limits.
if err := limits.SetLimits(); err != nil {
fmt.Fprintf(os.Stderr, "failed to set limits: %s\n", err)
os.Exit(1)
}
// Call serviceMain on Windows to handle running as a service. When
// the return isService flag is true, exit now since we ran as a
// service. Otherwise, just fall through to normal operation.
if runtime.GOOS == "windows" {
isService, err := winServiceMain()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if isService {
os.Exit(0)
}
}
// Work around defer not working after os.Exit()
if err := kaspadMain(nil); err != nil {
os.Exit(1)
}
}

View File

@ -7,14 +7,14 @@ package mempool
import (
"container/list"
"fmt"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"sync"
"sync/atomic"
"time"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/rpcmodel"
@ -54,15 +54,6 @@ type Config struct {
// to policy.
Policy Policy
// DAGParams identifies which DAG parameters the txpool is
// associated with.
DAGParams *dagconfig.Params
// MedianTimePast defines the function to use in order to access the
// median time past calculated from the point-of-view of the current
// selected tip.
MedianTimePast func() mstime.Time
// CalcSequenceLockNoLock defines the function to use in order to generate
// the current sequence lock for the given transaction using the passed
// utxo set.
@ -825,7 +816,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, rejectDupOrphans bool) ([]
}
// Disallow non-native/coinbase subnetworks in networks that don't allow them
if !mp.cfg.DAGParams.EnableNonNativeSubnetworks {
if !mp.cfg.DAG.Params.EnableNonNativeSubnetworks {
if !(tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) ||
tx.MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)) {
return nil, nil, txRuleError(wire.RejectInvalid, "non-native/coinbase subnetworks are not allowed")
@ -876,7 +867,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, rejectDupOrphans bool) ([]
// the transaction as though it was mined on top of the current tips
nextBlockBlueScore := mp.cfg.DAG.VirtualBlueScore()
medianTimePast := mp.cfg.MedianTimePast()
medianTimePast := mp.cfg.DAG.CalcPastMedianTime()
// Don't allow non-standard transactions if the network parameters
// forbid their acceptance.
@ -978,7 +969,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, rejectDupOrphans bool) ([]
// Also returns the fees associated with the transaction which will be
// used later.
txFee, err := blockdag.CheckTransactionInputsAndCalulateFee(tx, nextBlockBlueScore,
mp.mpUTXOSet, mp.cfg.DAGParams, false)
mp.mpUTXOSet, mp.cfg.DAG.Params, false)
if err != nil {
var dagRuleErr blockdag.RuleError
if ok := errors.As(err, &dagRuleErr); ok {

View File

@ -7,8 +7,6 @@ package mempool
import (
"bytes"
"fmt"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"math"
"reflect"
"runtime"
@ -16,6 +14,9 @@ import (
"sync"
"testing"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/util/testtools"
@ -254,7 +255,7 @@ func (tc *testContext) mineTransactions(transactions []*util.Tx, numberOfBlocks
if i == 0 {
blockTxs = msgTxs
}
block, err := mining.PrepareBlockForTest(tc.harness.txPool.cfg.DAG, tc.harness.txPool.cfg.DAGParams, tc.harness.txPool.cfg.DAG.TipHashes(), blockTxs, false)
block, err := mining.PrepareBlockForTest(tc.harness.txPool.cfg.DAG, tc.harness.txPool.cfg.DAG.TipHashes(), blockTxs, false)
if err != nil {
tc.t.Fatalf("PrepareBlockForTest: %s", err)
}
@ -347,8 +348,6 @@ func newPoolHarness(t *testing.T, dagParams *dagconfig.Params, numOutputs uint32
MinRelayTxFee: 1000, // 1 sompi per byte
MaxTxVersion: 1,
},
DAGParams: &params,
MedianTimePast: fDAG.MedianTimePast,
CalcSequenceLockNoLock: calcSequenceLock,
SigCache: nil,
}),

View File

@ -9,7 +9,6 @@ import (
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
@ -86,12 +85,10 @@ type BlockTemplate struct {
// It also houses additional state required in order to ensure the templates
// are built on top of the current DAG and adhere to the consensus rules.
type BlkTmplGenerator struct {
policy *Policy
dagParams *dagconfig.Params
txSource TxSource
dag *blockdag.BlockDAG
timeSource blockdag.TimeSource
sigCache *txscript.SigCache
policy *Policy
txSource TxSource
dag *blockdag.BlockDAG
sigCache *txscript.SigCache
}
// NewBlkTmplGenerator returns a new block template generator for the given
@ -100,18 +97,15 @@ type BlkTmplGenerator struct {
// The additional state-related fields are required in order to ensure the
// templates are built on top of the current DAG and adhere to the
// consensus rules.
func NewBlkTmplGenerator(policy *Policy, params *dagconfig.Params,
func NewBlkTmplGenerator(policy *Policy,
txSource TxSource, dag *blockdag.BlockDAG,
timeSource blockdag.TimeSource,
sigCache *txscript.SigCache) *BlkTmplGenerator {
return &BlkTmplGenerator{
policy: policy,
dagParams: params,
txSource: txSource,
dag: dag,
timeSource: timeSource,
sigCache: sigCache,
policy: policy,
txSource: txSource,
dag: dag,
sigCache: sigCache,
}
}

View File

@ -3,7 +3,6 @@ package mining
// This file functions are not considered safe for regular use, and should be used for test purposes only.
import (
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
@ -37,7 +36,9 @@ func (txs *fakeTxSource) HaveTransaction(txID *daghash.TxID) bool {
}
// PrepareBlockForTest generates a block with the proper merkle roots, coinbase transaction etc. This function is used for test purposes only
func PrepareBlockForTest(dag *blockdag.BlockDAG, params *dagconfig.Params, parentHashes []*daghash.Hash, transactions []*wire.MsgTx, forceTransactions bool) (*wire.MsgBlock, error) {
func PrepareBlockForTest(dag *blockdag.BlockDAG, parentHashes []*daghash.Hash, transactions []*wire.MsgTx, forceTransactions bool,
) (*wire.MsgBlock, error) {
newVirtual, err := blockdag.GetVirtualFromParentsForTest(dag, parentHashes)
if err != nil {
return nil, err
@ -59,10 +60,9 @@ func PrepareBlockForTest(dag *blockdag.BlockDAG, params *dagconfig.Params, paren
}
}
blockTemplateGenerator := NewBlkTmplGenerator(&policy,
params, txSource, dag, blockdag.NewTimeSource(), txscript.NewSigCache(100000))
blockTemplateGenerator := NewBlkTmplGenerator(&policy, txSource, dag, txscript.NewSigCache(100000))
OpTrueAddr, err := OpTrueAddress(params.Prefix)
OpTrueAddr, err := OpTrueAddress(dag.Params.Prefix)
if err != nil {
return nil, err
}

View File

@ -1,12 +1,13 @@
package mining
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/subnetworkid"
"math"
"math/rand"
"sort"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
const (
@ -136,7 +137,7 @@ func (g *BlkTmplGenerator) collectCandidatesTxs(sourceTxs []*TxDesc) []*candidat
// A block can't contain non-finalized transactions.
if !blockdag.IsFinalizedTransaction(tx, nextBlockBlueScore,
g.timeSource.Now()) {
g.dag.Now()) {
log.Debugf("Skipping non-finalized tx %s", tx.ID())
continue
}

View File

@ -1,15 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package server
import (
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var (
log, _ = logger.Get(logger.SubsystemTags.SRVR)
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@ -9,7 +9,6 @@ import (
"crypto/rand"
"encoding/binary"
"fmt"
"github.com/kaspanet/kaspad/util/mstime"
"math"
"net"
"runtime"
@ -217,8 +216,8 @@ type Server struct {
DAGParams *dagconfig.Params
AddrManager *addrmgr.AddrManager
connManager *connmgr.ConnManager
SigCache *txscript.SigCache
SyncManager *netsync.SyncManager
SigCache *txscript.SigCache
DAG *blockdag.BlockDAG
TxMemPool *mempool.TxPool
@ -1540,8 +1539,6 @@ func NewServer(listenAddrs []string, dagParams *dagconfig.Params, interrupt <-ch
MinRelayTxFee: config.ActiveConfig().MinRelayTxFee,
MaxTxVersion: 1,
},
DAGParams: dagParams,
MedianTimePast: func() mstime.Time { return s.DAG.CalcPastMedianTime() },
CalcSequenceLockNoLock: func(tx *util.Tx, utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
return s.DAG.CalcSequenceLockNoLock(tx, utxoSet, true)
},

View File

@ -4,12 +4,13 @@ import (
"bytes"
"encoding/hex"
"fmt"
"github.com/kaspanet/kaspad/util/mstime"
"strconv"
"strings"
"sync"
"time"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/mining"
@ -61,16 +62,14 @@ type gbtWorkState struct {
minTimestamp mstime.Time
template *mining.BlockTemplate
notifyMap map[string]map[int64]chan struct{}
timeSource blockdag.TimeSource
payAddress util.Address
}
// newGbtWorkState returns a new instance of a gbtWorkState with all internal
// fields initialized and ready to use.
func newGbtWorkState(timeSource blockdag.TimeSource) *gbtWorkState {
func newGbtWorkState() *gbtWorkState {
return &gbtWorkState{
notifyMap: make(map[string]map[int64]chan struct{}),
timeSource: timeSource,
notifyMap: make(map[string]map[int64]chan struct{}),
}
}
@ -636,7 +635,7 @@ func (state *gbtWorkState) blockTemplateResult(s *Server) (*rpcmodel.GetBlockTem
template := state.template
msgBlock := template.Block
header := &msgBlock.Header
adjustedTime := state.timeSource.Now()
adjustedTime := dag.Now()
maxTime := adjustedTime.Add(time.Millisecond * time.Duration(dag.TimestampDeviationTolerance))
if header.Timestamp.After(maxTime) {
return nil, &rpcmodel.RPCError{

View File

@ -12,7 +12,6 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"github.com/kaspanet/kaspad/addrmgr"
"io"
"io/ioutil"
"math/rand"
@ -23,6 +22,10 @@ import (
"sync/atomic"
"time"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/pkg/errors"
"github.com/btcsuite/websocket"
@ -34,11 +37,11 @@ import (
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/peer"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/server/p2p"
"github.com/kaspanet/kaspad/server/serverutils"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/fs"
"github.com/kaspanet/kaspad/util/network"
"github.com/kaspanet/kaspad/wire"
)
@ -769,9 +772,8 @@ type rpcserverConfig struct {
// These fields allow the RPC server to interface with the local block
// DAG data and state.
TimeSource blockdag.TimeSource
DAG *blockdag.BlockDAG
DAGParams *dagconfig.Params
DAG *blockdag.BlockDAG
DAGParams *dagconfig.Params
// TxMemPool defines the transaction memory pool to interact with.
TxMemPool *mempool.TxPool
@ -821,7 +823,7 @@ func setupRPCListeners() ([]net.Listener, error) {
}
}
netAddrs, err := p2p.ParseListeners(config.ActiveConfig().RPCListeners)
netAddrs, err := network.ParseListeners(config.ActiveConfig().RPCListeners)
if err != nil {
return nil, err
}
@ -841,10 +843,10 @@ func setupRPCListeners() ([]net.Listener, error) {
// NewRPCServer returns a new instance of the rpcServer struct.
func NewRPCServer(
startupTime int64,
p2pServer *p2p.Server,
dag *blockdag.BlockDAG,
txMempool *mempool.TxPool,
acceptanceIndex *indexers.AcceptanceIndex,
blockTemplateGenerator *mining.BlkTmplGenerator,
) (*Server, error) {
// Setup listeners for the configured RPC listen addresses and
// TLS settings.
@ -857,21 +859,17 @@ func NewRPCServer(
}
cfg := &rpcserverConfig{
Listeners: rpcListeners,
StartupTime: startupTime,
ConnMgr: &rpcConnManager{p2pServer},
SyncMgr: &rpcSyncMgr{p2pServer, p2pServer.SyncManager},
addressManager: p2pServer.AddrManager,
TimeSource: p2pServer.TimeSource,
DAGParams: p2pServer.DAGParams,
TxMemPool: p2pServer.TxMemPool,
StartupTime: mstime.Now().UnixMilliseconds(),
DAGParams: dag.Params,
TxMemPool: txMempool,
Generator: blockTemplateGenerator,
AcceptanceIndex: p2pServer.AcceptanceIndex,
DAG: p2pServer.DAG,
AcceptanceIndex: acceptanceIndex,
DAG: dag,
}
rpc := Server{
cfg: *cfg,
statusLines: make(map[int]string),
gbtWorkState: newGbtWorkState(cfg.TimeSource),
gbtWorkState: newGbtWorkState(),
helpCacher: newHelpCacher(),
requestProcessShutdown: make(chan struct{}),
quit: make(chan int),

View File

@ -1,118 +0,0 @@
package server
import (
"github.com/kaspanet/kaspad/util/mstime"
"sync/atomic"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/mempool"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/server/p2p"
"github.com/kaspanet/kaspad/server/rpc"
"github.com/kaspanet/kaspad/signal"
)
// Server is a wrapper for p2p server and rpc server
type Server struct {
rpcServer *rpc.Server
p2pServer *p2p.Server
startupTime int64
started, shutdown int32
}
// Start begins accepting connections from peers.
func (s *Server) Start() {
// Already started?
if atomic.AddInt32(&s.started, 1) != 1 {
return
}
log.Trace("Starting server")
// Server startup time. Used for the uptime command for uptime calculation.
s.startupTime = mstime.Now().UnixMilliseconds()
s.p2pServer.Start()
cfg := config.ActiveConfig()
if !cfg.DisableRPC {
s.rpcServer.Start()
}
}
// Stop gracefully shuts down the server by stopping and disconnecting all
// peers and the main listener.
func (s *Server) Stop() error {
// Make sure this only happens once.
if atomic.AddInt32(&s.shutdown, 1) != 1 {
log.Infof("Server is already in the process of shutting down")
return nil
}
log.Warnf("Server shutting down")
s.p2pServer.Stop()
// Shutdown the RPC server if it's not disabled.
if !config.ActiveConfig().DisableRPC {
s.rpcServer.Stop()
}
return nil
}
// NewServer returns a new kaspad server configured to listen on addr for the
// kaspa network type specified by dagParams. Use start to begin accepting
// connections from peers.
func NewServer(listenAddrs []string, dagParams *dagconfig.Params, interrupt <-chan struct{}) (*Server, error) {
s := &Server{}
var err error
notifyNewTransactions := func(txns []*mempool.TxDesc) {
// Notify both websocket and getblocktemplate long poll clients of all
// newly accepted transactions.
if s.rpcServer != nil {
s.rpcServer.NotifyNewTransactions(txns)
}
}
s.p2pServer, err = p2p.NewServer(listenAddrs, dagParams, interrupt, notifyNewTransactions)
if err != nil {
return nil, err
}
cfg := config.ActiveConfig()
// Create the mining policy and block template generator based on the
// configuration options.
policy := mining.Policy{
BlockMaxMass: cfg.BlockMaxMass,
}
blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy,
s.p2pServer.DAGParams, s.p2pServer.TxMemPool, s.p2pServer.DAG, s.p2pServer.TimeSource, s.p2pServer.SigCache)
if !cfg.DisableRPC {
s.rpcServer, err = rpc.NewRPCServer(
s.startupTime,
s.p2pServer,
blockTemplateGenerator,
)
if err != nil {
return nil, err
}
// Signal process shutdown when the RPC server requests it.
spawn(func() {
<-s.rpcServer.RequestedProcessShutdown()
signal.ShutdownRequestChannel <- struct{}{}
})
}
return s, nil
}
// WaitForShutdown blocks until the main listener and peer handlers are stopped.
func (s *Server) WaitForShutdown() {
s.p2pServer.WaitForShutdown()
}

View File

@ -6,16 +6,16 @@ package main
import (
"fmt"
"github.com/pkg/errors"
"os"
"path/filepath"
"time"
"github.com/pkg/errors"
"github.com/btcsuite/winsvc/eventlog"
"github.com/btcsuite/winsvc/mgr"
"github.com/btcsuite/winsvc/svc"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/server"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/version"
)
@ -37,9 +37,9 @@ const (
// elog is used to send messages to the Windows event log.
var elog *eventlog.Log
// logServiceStartOfDay logs information about kaspad when the main server has
// logServiceStart logs information about kaspad when the main server has
// been started to the Windows event log.
func logServiceStartOfDay() {
func logServiceStart() {
var message string
message += fmt.Sprintf("Version %s\n", version.Version())
message += fmt.Sprintf("Configuration directory: %s\n", config.DefaultHomeDir)
@ -64,12 +64,12 @@ func (s *kaspadService) Execute(args []string, r <-chan svc.ChangeRequest, chang
// Start kaspadMain in a separate goroutine so the service can start
// quickly. Shutdown (along with a potential error) is reported via
// doneChan. serverChan is notified with the main server instance once
// it is started so it can be gracefully stopped.
// doneChan. startedChan is notified once kaspad is started so this can
// be properly logged
doneChan := make(chan error)
serverChan := make(chan *server.Server)
startedChan := make(chan struct{})
spawn(func() {
err := kaspadMain(serverChan)
err := kaspadMain(startedChan)
doneChan <- err
})
@ -96,8 +96,8 @@ loop:
"request #%d.", c))
}
case <-serverChan:
logServiceStartOfDay()
case <-startedChan:
logServiceStart()
case err := <-doneChan:
if err != nil {

View File

@ -0,0 +1,82 @@
package network
import (
"net"
"runtime"
"strings"
"github.com/pkg/errors"
)
// simpleAddr implements the net.Addr interface with two struct fields
type simpleAddr struct {
net, addr string
}
// String returns the address.
//
// This is part of the net.Addr interface.
func (a simpleAddr) String() string {
return a.addr
}
// Network returns the network.
//
// This is part of the net.Addr interface.
func (a simpleAddr) Network() string {
return a.net
}
// Ensure simpleAddr implements the net.Addr interface.
var _ net.Addr = simpleAddr{}
// ParseListeners determines whether each listen address is IPv4 and IPv6 and
// returns a slice of appropriate net.Addrs to listen on with TCP. It also
// properly detects addresses which apply to "all interfaces" and adds the
// address as both IPv4 and IPv6.
func ParseListeners(addrs []string) ([]net.Addr, error) {
netAddrs := make([]net.Addr, 0, len(addrs)*2)
for _, addr := range addrs {
host, _, err := net.SplitHostPort(addr)
if err != nil {
// Shouldn't happen due to already being normalized.
return nil, err
}
// Empty host or host of * on plan9 is both IPv4 and IPv6.
if host == "" || (host == "*" && runtime.GOOS == "plan9") {
netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr})
netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr})
continue
}
// Strip IPv6 zone id if present since net.ParseIP does not
// handle it.
zoneIndex := strings.LastIndex(host, "%")
if zoneIndex > 0 {
host = host[:zoneIndex]
}
// Parse the IP.
ip := net.ParseIP(host)
if ip == nil {
hostAddrs, err := net.LookupHost(host)
if err != nil {
return nil, err
}
ip = net.ParseIP(hostAddrs[0])
if ip == nil {
return nil, errors.Errorf("Cannot resolve IP address for host '%s'", host)
}
}
// To4 returns nil when the IP is not an IPv4 address, so use
// this determine the address type.
if ip.To4() == nil {
netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr})
} else {
netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr})
}
}
return netAddrs, nil
}

View File

@ -20,7 +20,7 @@ import (
// RegisterSubnetworkForTest is used to register network on DAG with specified gas limit
func RegisterSubnetworkForTest(dag *blockdag.BlockDAG, params *dagconfig.Params, gasLimit uint64) (*subnetworkid.SubnetworkID, error) {
buildNextBlock := func(parentHashes []*daghash.Hash, txs []*wire.MsgTx) (*util.Block, error) {
msgBlock, err := mining.PrepareBlockForTest(dag, params, parentHashes, txs, false)
msgBlock, err := mining.PrepareBlockForTest(dag, parentHashes, txs, false)
if err != nil {
return nil, err
}

View File

@ -192,7 +192,7 @@ func TestTxHashAndID(t *testing.T) {
return
}
payload := []byte{1, 2, 3}
txIns := []*TxIn{&TxIn{
txIns := []*TxIn{{
PreviousOutpoint: Outpoint{
Index: 0,
TxID: daghash.TxID{1, 2, 3},