mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-07-08 13:52:32 +00:00
Merge remote-tracking branch 'origin/master' into nod-495-move-out-non-kaspad-apps
This commit is contained in:
commit
9b832997f8
@ -13,10 +13,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// blockIndex provides facilities for keeping track of an in-memory index of the
|
// blockIndex provides facilities for keeping track of an in-memory index of the
|
||||||
// block chain. Although the name block chain suggests a single chain of
|
// block DAG.
|
||||||
// blocks, it is actually a tree-shaped structure where any node can have
|
|
||||||
// multiple children. However, there can only be one active branch which does
|
|
||||||
// indeed form a chain from the tip all the way back to the genesis block.
|
|
||||||
type blockIndex struct {
|
type blockIndex struct {
|
||||||
// The following fields are set when the instance is created and can't
|
// The following fields are set when the instance is created and can't
|
||||||
// be changed afterwards, so there is no need to protect them with a
|
// be changed afterwards, so there is no need to protect them with a
|
||||||
|
@ -81,10 +81,9 @@ type blockNode struct {
|
|||||||
// chainHeight is the number of hops you need to go down the selected parent chain in order to get to the genesis block.
|
// chainHeight is the number of hops you need to go down the selected parent chain in order to get to the genesis block.
|
||||||
chainHeight uint64
|
chainHeight uint64
|
||||||
|
|
||||||
// Some fields from block headers to aid in best chain selection and
|
// Some fields from block headers to aid in reconstructing headers
|
||||||
// reconstructing headers from memory. These must be treated as
|
// from memory. These must be treated as immutable and are intentionally
|
||||||
// immutable and are intentionally ordered to avoid padding on 64-bit
|
// ordered to avoid padding on 64-bit platforms.
|
||||||
// platforms.
|
|
||||||
version int32
|
version int32
|
||||||
bits uint32
|
bits uint32
|
||||||
nonce uint64
|
nonce uint64
|
||||||
|
@ -109,12 +109,12 @@ func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newTestDAG returns a DAG that is usable for syntetic tests. It is
|
// newTestDAG returns a DAG that is usable for syntetic tests. It is
|
||||||
// important to note that this chain has no database associated with it, so
|
// important to note that this DAG has no database associated with it, so
|
||||||
// it is not usable with all functions and the tests must take care when making
|
// it is not usable with all functions and the tests must take care when making
|
||||||
// use of it.
|
// use of it.
|
||||||
func newTestDAG(params *dagconfig.Params) *BlockDAG {
|
func newTestDAG(params *dagconfig.Params) *BlockDAG {
|
||||||
// Create a genesis block node and block index index populated with it
|
// Create a genesis block node and block index index populated with it
|
||||||
// for use when creating the fake chain below.
|
// for use when creating the fake DAG below.
|
||||||
node := newBlockNode(¶ms.GenesisBlock.Header, newSet(), params.K)
|
node := newBlockNode(¶ms.GenesisBlock.Header, newSet(), params.K)
|
||||||
index := newBlockIndex(nil, params)
|
index := newBlockIndex(nil, params)
|
||||||
index.AddNode(node)
|
index.AddNode(node)
|
||||||
|
@ -45,7 +45,7 @@ type chainUpdates struct {
|
|||||||
|
|
||||||
// BlockDAG provides functions for working with the kaspa block DAG.
|
// BlockDAG provides functions for working with the kaspa block DAG.
|
||||||
// It includes functionality such as rejecting duplicate blocks, ensuring blocks
|
// It includes functionality such as rejecting duplicate blocks, ensuring blocks
|
||||||
// follow all rules, orphan handling, and best chain selection with reorganization.
|
// follow all rules, and orphan handling.
|
||||||
type BlockDAG struct {
|
type BlockDAG struct {
|
||||||
// The following fields are set when the instance is created and can't
|
// The following fields are set when the instance is created and can't
|
||||||
// be changed afterwards, so there is no need to protect them with a
|
// be changed afterwards, so there is no need to protect them with a
|
||||||
@ -93,7 +93,7 @@ type BlockDAG struct {
|
|||||||
subnetworkID *subnetworkid.SubnetworkID
|
subnetworkID *subnetworkid.SubnetworkID
|
||||||
|
|
||||||
// These fields are related to handling of orphan blocks. They are
|
// These fields are related to handling of orphan blocks. They are
|
||||||
// protected by a combination of the chain lock and the orphan lock.
|
// protected by a combination of the DAG lock and the orphan lock.
|
||||||
orphanLock sync.RWMutex
|
orphanLock sync.RWMutex
|
||||||
orphans map[daghash.Hash]*orphanBlock
|
orphans map[daghash.Hash]*orphanBlock
|
||||||
prevOrphans map[daghash.Hash][]*orphanBlock
|
prevOrphans map[daghash.Hash][]*orphanBlock
|
||||||
@ -128,7 +128,7 @@ type BlockDAG struct {
|
|||||||
unknownVersionsWarned bool
|
unknownVersionsWarned bool
|
||||||
|
|
||||||
// The notifications field stores a slice of callbacks to be executed on
|
// The notifications field stores a slice of callbacks to be executed on
|
||||||
// certain blockchain events.
|
// certain blockDAG events.
|
||||||
notificationsLock sync.RWMutex
|
notificationsLock sync.RWMutex
|
||||||
notifications []NotificationCallback
|
notifications []NotificationCallback
|
||||||
|
|
||||||
@ -1251,8 +1251,8 @@ func (dag *BlockDAG) isCurrent() bool {
|
|||||||
return dagTimestamp >= minus24Hours
|
return dagTimestamp >= minus24Hours
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsCurrent returns whether or not the chain believes it is current. Several
|
// IsCurrent returns whether or not the DAG believes it is current. Several
|
||||||
// factors are used to guess, but the key factors that allow the chain to
|
// factors are used to guess, but the key factors that allow the DAG to
|
||||||
// believe it is current are:
|
// believe it is current are:
|
||||||
// - Latest block has a timestamp newer than 24 hours ago
|
// - Latest block has a timestamp newer than 24 hours ago
|
||||||
//
|
//
|
||||||
@ -1800,10 +1800,9 @@ func (dag *BlockDAG) SubnetworkID() *subnetworkid.SubnetworkID {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IndexManager provides a generic interface that is called when blocks are
|
// IndexManager provides a generic interface that is called when blocks are
|
||||||
// connected and disconnected to and from the tip of the main chain for the
|
// connected to the DAG for the purpose of supporting optional indexes.
|
||||||
// purpose of supporting optional indexes.
|
|
||||||
type IndexManager interface {
|
type IndexManager interface {
|
||||||
// Init is invoked during chain initialize in order to allow the index
|
// Init is invoked during DAG initialize in order to allow the index
|
||||||
// manager to initialize itself and any indexes it is managing. The
|
// manager to initialize itself and any indexes it is managing. The
|
||||||
// channel parameter specifies a channel the caller can close to signal
|
// channel parameter specifies a channel the caller can close to signal
|
||||||
// that the process should be interrupted. It can be nil if that
|
// that the process should be interrupted. It can be nil if that
|
||||||
@ -1815,7 +1814,7 @@ type IndexManager interface {
|
|||||||
ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *BlockDAG, acceptedTxsData MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData) error
|
ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *BlockDAG, acceptedTxsData MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config is a descriptor which specifies the blockchain instance configuration.
|
// Config is a descriptor which specifies the blockDAG instance configuration.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// DB defines the database which houses the blocks and will be used to
|
// DB defines the database which houses the blocks and will be used to
|
||||||
// store all metadata created by this package such as the utxo set.
|
// store all metadata created by this package such as the utxo set.
|
||||||
@ -1837,7 +1836,7 @@ type Config struct {
|
|||||||
DAGParams *dagconfig.Params
|
DAGParams *dagconfig.Params
|
||||||
|
|
||||||
// TimeSource defines the median time source to use for things such as
|
// TimeSource defines the median time source to use for things such as
|
||||||
// block processing and determining whether or not the chain is current.
|
// block processing and determining whether or not the DAG is current.
|
||||||
//
|
//
|
||||||
// The caller is expected to keep a reference to the time source as well
|
// The caller is expected to keep a reference to the time source as well
|
||||||
// and add time samples from other peers on the network so the local
|
// and add time samples from other peers on the network so the local
|
||||||
@ -1854,7 +1853,7 @@ type Config struct {
|
|||||||
SigCache *txscript.SigCache
|
SigCache *txscript.SigCache
|
||||||
|
|
||||||
// IndexManager defines an index manager to use when initializing the
|
// IndexManager defines an index manager to use when initializing the
|
||||||
// chain and connecting and disconnecting blocks.
|
// DAG and connecting blocks.
|
||||||
//
|
//
|
||||||
// This field can be nil if the caller does not wish to make use of an
|
// This field can be nil if the caller does not wish to make use of an
|
||||||
// index manager.
|
// index manager.
|
||||||
@ -1907,7 +1906,7 @@ func New(config *Config) (*BlockDAG, error) {
|
|||||||
|
|
||||||
dag.utxoDiffStore = newUTXODiffStore(&dag)
|
dag.utxoDiffStore = newUTXODiffStore(&dag)
|
||||||
|
|
||||||
// Initialize the chain state from the passed database. When the db
|
// Initialize the DAG state from the passed database. When the db
|
||||||
// does not yet contain any DAG state, both it and the DAG state
|
// does not yet contain any DAG state, both it and the DAG state
|
||||||
// will be initialized to contain only the genesis block.
|
// will be initialized to contain only the genesis block.
|
||||||
err := dag.initDAGState()
|
err := dag.initDAGState()
|
||||||
|
@ -283,7 +283,7 @@ func TestCalcSequenceLock(t *testing.T) {
|
|||||||
medianTime := node.RelativeAncestor(5).PastMedianTime(dag).Unix()
|
medianTime := node.RelativeAncestor(5).PastMedianTime(dag).Unix()
|
||||||
|
|
||||||
// The median time calculated from the PoV of the best block in the
|
// The median time calculated from the PoV of the best block in the
|
||||||
// test chain. For unconfirmed inputs, this value will be used since
|
// test DAG. For unconfirmed inputs, this value will be used since
|
||||||
// the MTP will be calculated from the PoV of the yet-to-be-mined
|
// the MTP will be calculated from the PoV of the yet-to-be-mined
|
||||||
// block.
|
// block.
|
||||||
nextMedianTime := node.PastMedianTime(dag).Unix()
|
nextMedianTime := node.PastMedianTime(dag).Unix()
|
||||||
@ -689,7 +689,7 @@ func TestChainHeightToHashRange(t *testing.T) {
|
|||||||
// TestIntervalBlockHashes ensures that fetching block hashes at specified
|
// TestIntervalBlockHashes ensures that fetching block hashes at specified
|
||||||
// intervals by end hash works as expected.
|
// intervals by end hash works as expected.
|
||||||
func TestIntervalBlockHashes(t *testing.T) {
|
func TestIntervalBlockHashes(t *testing.T) {
|
||||||
// Construct a synthetic block chain with a block index consisting of
|
// Construct a synthetic block DAG with a block index consisting of
|
||||||
// the following structure.
|
// the following structure.
|
||||||
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
|
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
|
||||||
// \-> 16a -> 17a -> 18a (unvalidated)
|
// \-> 16a -> 17a -> 18a (unvalidated)
|
||||||
|
@ -139,8 +139,7 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
|
|||||||
// bits 1-x - height of the block that contains the unspent txout
|
// bits 1-x - height of the block that contains the unspent txout
|
||||||
//
|
//
|
||||||
// Example 1:
|
// Example 1:
|
||||||
// From tx in main blockchain:
|
// b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
|
||||||
// Blk 1, b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
|
|
||||||
//
|
//
|
||||||
// 03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
|
// 03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
|
||||||
// <><------------------------------------------------------------------>
|
// <><------------------------------------------------------------------>
|
||||||
@ -154,8 +153,7 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
|
|||||||
// - 0x96...52: x-coordinate of the pubkey
|
// - 0x96...52: x-coordinate of the pubkey
|
||||||
//
|
//
|
||||||
// Example 2:
|
// Example 2:
|
||||||
// From tx in main blockchain:
|
// 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2
|
||||||
// Blk 113931, 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2
|
|
||||||
//
|
//
|
||||||
// 8cf316800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
|
// 8cf316800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
|
||||||
// <----><------------------------------------------>
|
// <----><------------------------------------------>
|
||||||
@ -169,8 +167,7 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
|
|||||||
// - 0xb8...58: pubkey hash
|
// - 0xb8...58: pubkey hash
|
||||||
//
|
//
|
||||||
// Example 3:
|
// Example 3:
|
||||||
// From tx in main blockchain:
|
// 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22
|
||||||
// Blk 338156, 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22
|
|
||||||
//
|
//
|
||||||
// a8a2588ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
|
// a8a2588ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
|
||||||
// <----><-------------------------------------------------->
|
// <----><-------------------------------------------------->
|
||||||
@ -432,7 +429,7 @@ func (dag *BlockDAG) initDAGState() error {
|
|||||||
|
|
||||||
if !initialized {
|
if !initialized {
|
||||||
// At this point the database has not already been initialized, so
|
// At this point the database has not already been initialized, so
|
||||||
// initialize both it and the chain state to the genesis block.
|
// initialize both it and the DAG state to the genesis block.
|
||||||
return dag.createDAGState()
|
return dag.createDAGState()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -792,7 +789,7 @@ func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
|
|||||||
// Lookup the block hash in block index and ensure it is in the DAG
|
// Lookup the block hash in block index and ensure it is in the DAG
|
||||||
node := dag.index.LookupNode(hash)
|
node := dag.index.LookupNode(hash)
|
||||||
if node == nil {
|
if node == nil {
|
||||||
str := fmt.Sprintf("block %s is not in the main chain", hash)
|
str := fmt.Sprintf("block %s is not in the DAG", hash)
|
||||||
return nil, errNotInDAG(str)
|
return nil, errNotInDAG(str)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,8 +46,6 @@ func TestUtxoSerialization(t *testing.T) {
|
|||||||
entry *UTXOEntry
|
entry *UTXOEntry
|
||||||
serialized []byte
|
serialized []byte
|
||||||
}{
|
}{
|
||||||
// From tx in main blockchain:
|
|
||||||
// b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
|
|
||||||
{
|
{
|
||||||
name: "blue score 1, coinbase",
|
name: "blue score 1, coinbase",
|
||||||
entry: &UTXOEntry{
|
entry: &UTXOEntry{
|
||||||
@ -58,8 +56,6 @@ func TestUtxoSerialization(t *testing.T) {
|
|||||||
},
|
},
|
||||||
serialized: hexToBytes("03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"),
|
serialized: hexToBytes("03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"),
|
||||||
},
|
},
|
||||||
// From tx in main blockchain:
|
|
||||||
// 8131ffb0a2c945ecaf9b9063e59558784f9c3a74741ce6ae2a18d0571dac15bb:1
|
|
||||||
{
|
{
|
||||||
name: "blue score 100001, not coinbase",
|
name: "blue score 100001, not coinbase",
|
||||||
entry: &UTXOEntry{
|
entry: &UTXOEntry{
|
||||||
|
@ -13,20 +13,20 @@ transactions, so it is extremely important that fully validating nodes agree on
|
|||||||
all rules.
|
all rules.
|
||||||
|
|
||||||
At a high level, this package provides support for inserting new blocks into
|
At a high level, this package provides support for inserting new blocks into
|
||||||
the block chain according to the aforementioned rules. It includes
|
the block DAG according to the aforementioned rules. It includes
|
||||||
functionality such as rejecting duplicate blocks, ensuring blocks and
|
functionality such as rejecting duplicate blocks, ensuring blocks and
|
||||||
transactions follow all rules, orphan handling, and best chain selection along
|
transactions follow all rules, orphan handling, and DAG order along
|
||||||
with reorganization.
|
with reorganization.
|
||||||
|
|
||||||
Since this package does not deal with other kaspa specifics such as network
|
Since this package does not deal with other kaspa specifics such as network
|
||||||
communication or wallets, it provides a notification system which gives the
|
communication or wallets, it provides a notification system which gives the
|
||||||
caller a high level of flexibility in how they want to react to certain events
|
caller a high level of flexibility in how they want to react to certain events
|
||||||
such as orphan blocks which need their parents requested and newly connected
|
such as orphan blocks which need their parents requested and newly connected
|
||||||
main chain blocks which might result in wallet updates.
|
DAG blocks which might result in wallet updates.
|
||||||
|
|
||||||
Kaspa DAG Processing Overview
|
Kaspa DAG Processing Overview
|
||||||
|
|
||||||
Before a block is allowed into the block chain, it must go through an intensive
|
Before a block is allowed into the block DAG, it must go through an intensive
|
||||||
series of validation rules. The following list serves as a general outline of
|
series of validation rules. The following list serves as a general outline of
|
||||||
those rules to provide some intuition into what is going on under the hood, but
|
those rules to provide some intuition into what is going on under the hood, but
|
||||||
is by no means exhaustive:
|
is by no means exhaustive:
|
||||||
@ -38,21 +38,16 @@ is by no means exhaustive:
|
|||||||
- Save the most recent orphan blocks for a limited time in case their parent
|
- Save the most recent orphan blocks for a limited time in case their parent
|
||||||
blocks become available
|
blocks become available
|
||||||
- Stop processing if the block is an orphan as the rest of the processing
|
- Stop processing if the block is an orphan as the rest of the processing
|
||||||
depends on the block's position within the block chain
|
depends on the block's position within the block DAG
|
||||||
- Perform a series of more thorough checks that depend on the block's position
|
- Perform a series of more thorough checks that depend on the block's position
|
||||||
within the block chain such as verifying block difficulties adhere to
|
within the block DAG such as verifying block difficulties adhere to
|
||||||
difficulty retarget rules, timestamps are after the median of the last
|
difficulty retarget rules, timestamps are after the median of the last
|
||||||
several blocks, all transactions are finalized, and
|
several blocks, all transactions are finalized, and
|
||||||
block versions are in line with the previous blocks
|
block versions are in line with the previous blocks
|
||||||
- Determine how the block fits into the chain and perform different actions
|
- When a block is being connected to the DAG, perform further checks on the
|
||||||
accordingly in order to ensure any side chains which have higher difficulty
|
block's transactions such as verifying transaction duplicates, script
|
||||||
than the main chain become the new main chain
|
complexity for the combination of connected scripts, coinbase maturity,
|
||||||
- When a block is being connected to the main chain (either through
|
double spends, and connected transaction values
|
||||||
reorganization of a side chain to the main chain or just extending the
|
|
||||||
main chain), perform further checks on the block's transactions such as
|
|
||||||
verifying transaction duplicates, script complexity for the combination of
|
|
||||||
connected scripts, coinbase maturity, double spends, and connected
|
|
||||||
transaction values
|
|
||||||
- Run the transaction scripts to verify the spender is allowed to spend the
|
- Run the transaction scripts to verify the spender is allowed to spend the
|
||||||
coins
|
coins
|
||||||
- Insert the block into the block database
|
- Insert the block into the block database
|
||||||
@ -60,10 +55,10 @@ is by no means exhaustive:
|
|||||||
Errors
|
Errors
|
||||||
|
|
||||||
Errors returned by this package are either the raw errors provided by underlying
|
Errors returned by this package are either the raw errors provided by underlying
|
||||||
calls or of type blockchain.RuleError. This allows the caller to differentiate
|
calls or of type blockdag.RuleError. This allows the caller to differentiate
|
||||||
between unexpected errors, such as database errors, versus errors due to rule
|
between unexpected errors, such as database errors, versus errors due to rule
|
||||||
violations through type assertions. In addition, callers can programmatically
|
violations through type assertions. In addition, callers can programmatically
|
||||||
determine the specific rule violation by examining the ErrorCode field of the
|
determine the specific rule violation by examining the ErrorCode field of the
|
||||||
type asserted blockchain.RuleError.
|
type asserted blockdag.RuleError.
|
||||||
*/
|
*/
|
||||||
package blockdag
|
package blockdag
|
||||||
|
@ -47,12 +47,12 @@ const (
|
|||||||
ErrBlockVersionTooOld
|
ErrBlockVersionTooOld
|
||||||
|
|
||||||
// ErrInvalidTime indicates the time in the passed block has a precision
|
// ErrInvalidTime indicates the time in the passed block has a precision
|
||||||
// that is more than one second. The chain consensus rules require
|
// that is more than one second. The DAG consensus rules require
|
||||||
// timestamps to have a maximum precision of one second.
|
// timestamps to have a maximum precision of one second.
|
||||||
ErrInvalidTime
|
ErrInvalidTime
|
||||||
|
|
||||||
// ErrTimeTooOld indicates the time is either before the median time of
|
// ErrTimeTooOld indicates the time is either before the median time of
|
||||||
// the last several blocks per the chain consensus rules.
|
// the last several blocks per the DAG consensus rules.
|
||||||
ErrTimeTooOld
|
ErrTimeTooOld
|
||||||
|
|
||||||
// ErrTimeTooNew indicates the time is too far in the future as compared
|
// ErrTimeTooNew indicates the time is too far in the future as compared
|
||||||
|
@ -57,8 +57,8 @@ func isSupportedDbType(dbType string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// DAGSetup is used to create a new db and chain instance with the genesis
|
// DAGSetup is used to create a new db and DAG instance with the genesis
|
||||||
// block already inserted. In addition to the new chain instance, it returns
|
// block already inserted. In addition to the new DAG instance, it returns
|
||||||
// a teardown function the caller should invoke when done testing to clean up.
|
// a teardown function the caller should invoke when done testing to clean up.
|
||||||
func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func(), error) {
|
func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func(), error) {
|
||||||
if !isSupportedDbType(testDbType) {
|
if !isSupportedDbType(testDbType) {
|
||||||
@ -109,12 +109,12 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy the chain params to ensure any modifications the tests do to
|
// Copy the DAG params to ensure any modifications the tests do to
|
||||||
// the DAG parameters do not affect the global instance.
|
// the DAG parameters do not affect the global instance.
|
||||||
paramsCopy := *params
|
paramsCopy := *params
|
||||||
|
|
||||||
// Create the main chain instance.
|
// Create the DAG instance.
|
||||||
chain, err := blockdag.New(&blockdag.Config{
|
dag, err := blockdag.New(&blockdag.Config{
|
||||||
DB: db,
|
DB: db,
|
||||||
DAGParams: ¶msCopy,
|
DAGParams: ¶msCopy,
|
||||||
TimeSource: blockdag.NewMedianTime(),
|
TimeSource: blockdag.NewMedianTime(),
|
||||||
@ -122,10 +122,10 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
teardown()
|
teardown()
|
||||||
err := errors.Errorf("failed to create chain instance: %v", err)
|
err := errors.Errorf("failed to create DAG instance: %v", err)
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
return chain, teardown, nil
|
return dag, teardown, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestFullBlocks ensures all tests generated by the fullblocktests package
|
// TestFullBlocks ensures all tests generated by the fullblocktests package
|
||||||
@ -140,7 +140,7 @@ func TestFullBlocks(t *testing.T) {
|
|||||||
t.Fatalf("failed to generate tests: %v", err)
|
t.Fatalf("failed to generate tests: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new database and chain instance to run tests against.
|
// Create a new database and DAG instance to run tests against.
|
||||||
dag, teardownFunc, err := DAGSetup("fullblocktest",
|
dag, teardownFunc, err := DAGSetup("fullblocktest",
|
||||||
&dagconfig.RegressionNetParams)
|
&dagconfig.RegressionNetParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -255,7 +255,7 @@ func TestFullBlocks(t *testing.T) {
|
|||||||
if _, ok := err.(blockdag.RuleError); !ok {
|
if _, ok := err.(blockdag.RuleError); !ok {
|
||||||
t.Fatalf("block %q (hash %s, height %d) "+
|
t.Fatalf("block %q (hash %s, height %d) "+
|
||||||
"returned unexpected error type -- "+
|
"returned unexpected error type -- "+
|
||||||
"got %T, want blockchain.RuleError",
|
"got %T, want blockdag.RuleError",
|
||||||
item.Name, block.Hash(), blockHeight,
|
item.Name, block.Hash(), blockHeight,
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
@ -274,7 +274,7 @@ func TestFullBlocks(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// testExpectedTip ensures the current tip of the blockchain is the
|
// testExpectedTip ensures the current tip of the blockDAG is the
|
||||||
// block specified in the provided test instance.
|
// block specified in the provided test instance.
|
||||||
testExpectedTip := func(item fullblocktests.ExpectedTip) {
|
testExpectedTip := func(item fullblocktests.ExpectedTip) {
|
||||||
blockHeight := item.Height
|
blockHeight := item.Height
|
||||||
|
@ -68,9 +68,9 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
// The address index maps addresses referenced in the blockchain to a list of
|
// The address index maps addresses referenced in the blockDAG to a list of
|
||||||
// all the transactions involving that address. Transactions are stored
|
// all the transactions involving that address. Transactions are stored
|
||||||
// according to their order of appearance in the blockchain. That is to say
|
// according to their order of appearance in the blockDAG. That is to say
|
||||||
// first by block height and then by offset inside the block. It is also
|
// first by block height and then by offset inside the block. It is also
|
||||||
// important to note that this implementation requires the transaction index
|
// important to note that this implementation requires the transaction index
|
||||||
// since it is needed in order to catch up old blocks due to the fact the spent
|
// since it is needed in order to catch up old blocks due to the fact the spent
|
||||||
@ -536,7 +536,7 @@ func addrToKey(addr util.Address) ([addrKeySize]byte, error) {
|
|||||||
// AddrIndex implements a transaction by address index. That is to say, it
|
// AddrIndex implements a transaction by address index. That is to say, it
|
||||||
// supports querying all transactions that reference a given address because
|
// supports querying all transactions that reference a given address because
|
||||||
// they are either crediting or debiting the address. The returned transactions
|
// they are either crediting or debiting the address. The returned transactions
|
||||||
// are ordered according to their order of appearance in the blockchain. In
|
// are ordered according to their order of appearance in the blockDAG. In
|
||||||
// other words, first by block height and then by offset inside the block.
|
// other words, first by block height and then by offset inside the block.
|
||||||
//
|
//
|
||||||
// In addition, support is provided for a memory-only index of unconfirmed
|
// In addition, support is provided for a memory-only index of unconfirmed
|
||||||
@ -681,7 +681,7 @@ func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *bl
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ConnectBlock is invoked by the index manager when a new block has been
|
// ConnectBlock is invoked by the index manager when a new block has been
|
||||||
// connected to the main chain. This indexer adds a mapping for each address
|
// connected to the DAG. This indexer adds a mapping for each address
|
||||||
// the transactions in the block involve.
|
// the transactions in the block involve.
|
||||||
//
|
//
|
||||||
// This is part of the Indexer interface.
|
// This is part of the Indexer interface.
|
||||||
@ -881,12 +881,12 @@ func (idx *AddrIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewAddrIndex returns a new instance of an indexer that is used to create a
|
// NewAddrIndex returns a new instance of an indexer that is used to create a
|
||||||
// mapping of all addresses in the blockchain to the respective transactions
|
// mapping of all addresses in the blockDAG to the respective transactions
|
||||||
// that involve them.
|
// that involve them.
|
||||||
//
|
//
|
||||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||||
// turn is used by the blockchain package. This allows the index to be
|
// turn is used by the blockDAG package. This allows the index to be
|
||||||
// seamlessly maintained along with the chain.
|
// seamlessly maintained along with the DAG.
|
||||||
func NewAddrIndex(dagParams *dagconfig.Params) *AddrIndex {
|
func NewAddrIndex(dagParams *dagconfig.Params) *AddrIndex {
|
||||||
return &AddrIndex{
|
return &AddrIndex{
|
||||||
dagParams: dagParams,
|
dagParams: dagParams,
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package indexers implements optional block chain indexes.
|
Package indexers implements optional block DAG indexes.
|
||||||
*/
|
*/
|
||||||
package indexers
|
package indexers
|
||||||
|
|
||||||
|
@ -20,14 +20,14 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Manager defines an index manager that manages multiple optional indexes and
|
// Manager defines an index manager that manages multiple optional indexes and
|
||||||
// implements the blockchain.IndexManager interface so it can be seamlessly
|
// implements the blockdag.IndexManager interface so it can be seamlessly
|
||||||
// plugged into normal chain processing.
|
// plugged into normal DAG processing.
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
db database.DB
|
db database.DB
|
||||||
enabledIndexes []Indexer
|
enabledIndexes []Indexer
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the Manager type implements the blockchain.IndexManager interface.
|
// Ensure the Manager type implements the blockdag.IndexManager interface.
|
||||||
var _ blockdag.IndexManager = (*Manager)(nil)
|
var _ blockdag.IndexManager = (*Manager)(nil)
|
||||||
|
|
||||||
// indexDropKey returns the key for an index which indicates it is in the
|
// indexDropKey returns the key for an index which indicates it is in the
|
||||||
@ -116,14 +116,14 @@ func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init initializes the enabled indexes. This is called during chain
|
// Init initializes the enabled indexes. This is called during DAG
|
||||||
// initialization and primarily consists of catching up all indexes to the
|
// initialization and primarily consists of catching up all indexes to the
|
||||||
// current best chain tip. This is necessary since each index can be disabled
|
// current tips. This is necessary since each index can be disabled
|
||||||
// and re-enabled at any time and attempting to catch-up indexes at the same
|
// and re-enabled at any time and attempting to catch-up indexes at the same
|
||||||
// time new blocks are being downloaded would lead to an overall longer time to
|
// time new blocks are being downloaded would lead to an overall longer time to
|
||||||
// catch up due to the I/O contention.
|
// catch up due to the I/O contention.
|
||||||
//
|
//
|
||||||
// This is part of the blockchain.IndexManager interface.
|
// This is part of the blockdag.IndexManager interface.
|
||||||
func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
|
func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
|
||||||
// Nothing to do when no indexes are enabled.
|
// Nothing to do when no indexes are enabled.
|
||||||
if len(m.enabledIndexes) == 0 {
|
if len(m.enabledIndexes) == 0 {
|
||||||
@ -192,11 +192,11 @@ func (m *Manager) recoverIfNeeded() error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConnectBlock must be invoked when a block is extending the main chain. It
|
// ConnectBlock must be invoked when a block is added to the DAG. It
|
||||||
// keeps track of the state of each index it is managing, performs some sanity
|
// keeps track of the state of each index it is managing, performs some sanity
|
||||||
// checks, and invokes each indexer.
|
// checks, and invokes each indexer.
|
||||||
//
|
//
|
||||||
// This is part of the blockchain.IndexManager interface.
|
// This is part of the blockdag.IndexManager interface.
|
||||||
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
|
||||||
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
|
||||||
|
|
||||||
@ -231,8 +231,8 @@ func (m *Manager) updateIndexersWithCurrentBlockID(dbTx database.Tx, blockHash *
|
|||||||
|
|
||||||
// NewManager returns a new index manager with the provided indexes enabled.
|
// NewManager returns a new index manager with the provided indexes enabled.
|
||||||
//
|
//
|
||||||
// The manager returned satisfies the blockchain.IndexManager interface and thus
|
// The manager returned satisfies the blockdag.IndexManager interface and thus
|
||||||
// cleanly plugs into the normal blockchain processing path.
|
// cleanly plugs into the normal blockdag processing path.
|
||||||
func NewManager(enabledIndexes []Indexer) *Manager {
|
func NewManager(enabledIndexes []Indexer) *Manager {
|
||||||
return &Manager{
|
return &Manager{
|
||||||
enabledIndexes: enabledIndexes,
|
enabledIndexes: enabledIndexes,
|
||||||
|
@ -394,12 +394,12 @@ func dbFetchTxAcceptingBlock(dbTx database.Tx, txID *daghash.TxID, dag *blockdag
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewTxIndex returns a new instance of an indexer that is used to create a
|
// NewTxIndex returns a new instance of an indexer that is used to create a
|
||||||
// mapping of the hashes of all transactions in the blockchain to the respective
|
// mapping of the hashes of all transactions in the blockDAG to the respective
|
||||||
// block, location within the block, and size of the transaction.
|
// block, location within the block, and size of the transaction.
|
||||||
//
|
//
|
||||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||||
// turn is used by the blockchain package. This allows the index to be
|
// turn is used by the blockdag package. This allows the index to be
|
||||||
// seamlessly maintained along with the chain.
|
// seamlessly maintained along with the DAG.
|
||||||
func NewTxIndex() *TxIndex {
|
func NewTxIndex() *TxIndex {
|
||||||
return &TxIndex{}
|
return &TxIndex{}
|
||||||
}
|
}
|
||||||
|
@ -207,7 +207,7 @@ func (m *medianTime) Offset() time.Duration {
|
|||||||
|
|
||||||
// NewMedianTime returns a new instance of concurrency-safe implementation of
|
// NewMedianTime returns a new instance of concurrency-safe implementation of
|
||||||
// the MedianTimeSource interface. The returned implementation contains the
|
// the MedianTimeSource interface. The returned implementation contains the
|
||||||
// rules necessary for proper time handling in the chain consensus rules and
|
// rules necessary for proper time handling in the DAG consensus rules and
|
||||||
// expects the time samples to be added from the timestamp field of the version
|
// expects the time samples to be added from the timestamp field of the version
|
||||||
// message received from remote peers that successfully connect and negotiate.
|
// message received from remote peers that successfully connect and negotiate.
|
||||||
func NewMedianTime() MedianTimeSource {
|
func NewMedianTime() MedianTimeSource {
|
||||||
|
@ -14,7 +14,7 @@ import (
|
|||||||
type NotificationType int
|
type NotificationType int
|
||||||
|
|
||||||
// NotificationCallback is used for a caller to provide a callback for
|
// NotificationCallback is used for a caller to provide a callback for
|
||||||
// notifications about various chain events.
|
// notifications about various blockDAG events.
|
||||||
type NotificationCallback func(*Notification)
|
type NotificationCallback func(*Notification)
|
||||||
|
|
||||||
// Constants for the type of a notification message.
|
// Constants for the type of a notification message.
|
||||||
@ -52,7 +52,7 @@ type Notification struct {
|
|||||||
Data interface{}
|
Data interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Subscribe to block chain notifications. Registers a callback to be executed
|
// Subscribe to block DAG notifications. Registers a callback to be executed
|
||||||
// when various events take place. See the documentation on Notification and
|
// when various events take place. See the documentation on Notification and
|
||||||
// NotificationType for details on the types and contents of notifications.
|
// NotificationType for details on the types and contents of notifications.
|
||||||
func (dag *BlockDAG) Subscribe(callback NotificationCallback) {
|
func (dag *BlockDAG) Subscribe(callback NotificationCallback) {
|
||||||
|
@ -18,9 +18,8 @@ type BehaviorFlags uint32
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// BFFastAdd may be set to indicate that several checks can be avoided
|
// BFFastAdd may be set to indicate that several checks can be avoided
|
||||||
// for the block since it is already known to fit into the chain due to
|
// for the block since it is already known to fit into the DAG due to
|
||||||
// already proving it correct links into the chain.
|
// already proving it correct links into the DAG.
|
||||||
// This is primarily used for headers-first mode.
|
|
||||||
BFFastAdd BehaviorFlags = 1 << iota
|
BFFastAdd BehaviorFlags = 1 << iota
|
||||||
|
|
||||||
// BFNoPoWCheck may be set to indicate the proof of work check which
|
// BFNoPoWCheck may be set to indicate the proof of work check which
|
||||||
@ -64,7 +63,7 @@ func (dag *BlockDAG) BlockExists(hash *daghash.Hash) bool {
|
|||||||
// The flags do not modify the behavior of this function directly, however they
|
// The flags do not modify the behavior of this function directly, however they
|
||||||
// are needed to pass along to maybeAcceptBlock.
|
// are needed to pass along to maybeAcceptBlock.
|
||||||
//
|
//
|
||||||
// This function MUST be called with the chain state lock held (for writes).
|
// This function MUST be called with the DAG state lock held (for writes).
|
||||||
func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) error {
|
func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) error {
|
||||||
// Start with processing at least the passed hash. Leave a little room
|
// Start with processing at least the passed hash. Leave a little room
|
||||||
// for additional orphan blocks that need to be processed without
|
// for additional orphan blocks that need to be processed without
|
||||||
@ -127,7 +126,7 @@ func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ProcessBlock is the main workhorse for handling insertion of new blocks into
|
// ProcessBlock is the main workhorse for handling insertion of new blocks into
|
||||||
// the block chain. It includes functionality such as rejecting duplicate
|
// the block DAG. It includes functionality such as rejecting duplicate
|
||||||
// blocks, ensuring blocks follow all rules, orphan handling, and insertion into
|
// blocks, ensuring blocks follow all rules, orphan handling, and insertion into
|
||||||
// the block DAG.
|
// the block DAG.
|
||||||
//
|
//
|
||||||
|
@ -56,8 +56,8 @@ func FileExists(name string) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// DAGSetup is used to create a new db and chain instance with the genesis
|
// DAGSetup is used to create a new db and DAG instance with the genesis
|
||||||
// block already inserted. In addition to the new chain instance, it returns
|
// block already inserted. In addition to the new DAG instance, it returns
|
||||||
// a teardown function the caller should invoke when done testing to clean up.
|
// a teardown function the caller should invoke when done testing to clean up.
|
||||||
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
|
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
|
||||||
if !isSupportedDbType(testDbType) {
|
if !isSupportedDbType(testDbType) {
|
||||||
|
@ -125,7 +125,7 @@ func newThresholdCaches(numCaches uint32) []thresholdStateCache {
|
|||||||
// AFTER the given node and deployment ID. The cache is used to ensure the
|
// AFTER the given node and deployment ID. The cache is used to ensure the
|
||||||
// threshold states for previous windows are only calculated once.
|
// threshold states for previous windows are only calculated once.
|
||||||
//
|
//
|
||||||
// This function MUST be called with the chain state lock held (for writes).
|
// This function MUST be called with the DAG state lock held (for writes).
|
||||||
func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdConditionChecker, cache *thresholdStateCache) (ThresholdState, error) {
|
func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdConditionChecker, cache *thresholdStateCache) (ThresholdState, error) {
|
||||||
// The threshold state for the window that contains the genesis block is
|
// The threshold state for the window that contains the genesis block is
|
||||||
// defined by definition.
|
// defined by definition.
|
||||||
@ -260,7 +260,7 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ThresholdState returns the current rule change threshold state of the given
|
// ThresholdState returns the current rule change threshold state of the given
|
||||||
// deployment ID for the block AFTER the end of the current best chain.
|
// deployment ID for the block AFTER the blueScore of the current DAG.
|
||||||
//
|
//
|
||||||
// This function is safe for concurrent access.
|
// This function is safe for concurrent access.
|
||||||
func (dag *BlockDAG) ThresholdState(deploymentID uint32) (ThresholdState, error) {
|
func (dag *BlockDAG) ThresholdState(deploymentID uint32) (ThresholdState, error) {
|
||||||
@ -295,21 +295,21 @@ func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
|
|||||||
// desired. In other words, the returned deployment state is for the block
|
// desired. In other words, the returned deployment state is for the block
|
||||||
// AFTER the passed node.
|
// AFTER the passed node.
|
||||||
//
|
//
|
||||||
// This function MUST be called with the chain state lock held (for writes).
|
// This function MUST be called with the DAG state lock held (for writes).
|
||||||
func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) {
|
func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) {
|
||||||
if deploymentID > uint32(len(dag.dagParams.Deployments)) {
|
if deploymentID > uint32(len(dag.dagParams.Deployments)) {
|
||||||
return ThresholdFailed, DeploymentError(deploymentID)
|
return ThresholdFailed, DeploymentError(deploymentID)
|
||||||
}
|
}
|
||||||
|
|
||||||
deployment := &dag.dagParams.Deployments[deploymentID]
|
deployment := &dag.dagParams.Deployments[deploymentID]
|
||||||
checker := deploymentChecker{deployment: deployment, chain: dag}
|
checker := deploymentChecker{deployment: deployment, dag: dag}
|
||||||
cache := &dag.deploymentCaches[deploymentID]
|
cache := &dag.deploymentCaches[deploymentID]
|
||||||
|
|
||||||
return dag.thresholdState(prevNode, checker, cache)
|
return dag.thresholdState(prevNode, checker, cache)
|
||||||
}
|
}
|
||||||
|
|
||||||
// initThresholdCaches initializes the threshold state caches for each warning
|
// initThresholdCaches initializes the threshold state caches for each warning
|
||||||
// bit and defined deployment and provides warnings if the chain is current per
|
// bit and defined deployment and provides warnings if the DAG is current per
|
||||||
// the warnUnknownVersions and warnUnknownRuleActivations functions.
|
// the warnUnknownVersions and warnUnknownRuleActivations functions.
|
||||||
func (dag *BlockDAG) initThresholdCaches() error {
|
func (dag *BlockDAG) initThresholdCaches() error {
|
||||||
// Initialize the warning and deployment caches by calculating the
|
// Initialize the warning and deployment caches by calculating the
|
||||||
@ -318,7 +318,7 @@ func (dag *BlockDAG) initThresholdCaches() error {
|
|||||||
// definition changes is done now.
|
// definition changes is done now.
|
||||||
prevNode := dag.selectedTip().selectedParent
|
prevNode := dag.selectedTip().selectedParent
|
||||||
for bit := uint32(0); bit < vbNumBits; bit++ {
|
for bit := uint32(0); bit < vbNumBits; bit++ {
|
||||||
checker := bitConditionChecker{bit: bit, chain: dag}
|
checker := bitConditionChecker{bit: bit, dag: dag}
|
||||||
cache := &dag.warningCaches[bit]
|
cache := &dag.warningCaches[bit]
|
||||||
_, err := dag.thresholdState(prevNode, checker, cache)
|
_, err := dag.thresholdState(prevNode, checker, cache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -328,14 +328,14 @@ func (dag *BlockDAG) initThresholdCaches() error {
|
|||||||
for id := 0; id < len(dag.dagParams.Deployments); id++ {
|
for id := 0; id < len(dag.dagParams.Deployments); id++ {
|
||||||
deployment := &dag.dagParams.Deployments[id]
|
deployment := &dag.dagParams.Deployments[id]
|
||||||
cache := &dag.deploymentCaches[id]
|
cache := &dag.deploymentCaches[id]
|
||||||
checker := deploymentChecker{deployment: deployment, chain: dag}
|
checker := deploymentChecker{deployment: deployment, dag: dag}
|
||||||
_, err := dag.thresholdState(prevNode, checker, cache)
|
_, err := dag.thresholdState(prevNode, checker, cache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// No warnings about unknown rules or versions until the chain is
|
// No warnings about unknown rules or versions until the DAG is
|
||||||
// current.
|
// current.
|
||||||
if dag.isCurrent() {
|
if dag.isCurrent() {
|
||||||
// Warn if a high enough percentage of the last blocks have
|
// Warn if a high enough percentage of the last blocks have
|
||||||
|
@ -69,7 +69,7 @@ func TestSequenceLocksActive(t *testing.T) {
|
|||||||
// TestCheckConnectBlockTemplate tests the CheckConnectBlockTemplate function to
|
// TestCheckConnectBlockTemplate tests the CheckConnectBlockTemplate function to
|
||||||
// ensure it fails.
|
// ensure it fails.
|
||||||
func TestCheckConnectBlockTemplate(t *testing.T) {
|
func TestCheckConnectBlockTemplate(t *testing.T) {
|
||||||
// Create a new database and chain instance to run tests against.
|
// Create a new database and DAG instance to run tests against.
|
||||||
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", Config{
|
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", Config{
|
||||||
DAGParams: &dagconfig.SimNetParams,
|
DAGParams: &dagconfig.SimNetParams,
|
||||||
})
|
})
|
||||||
@ -1033,7 +1033,7 @@ var Block100000 = wire.MsgBlock{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockWithWrongTxOrder defines invalid block 100,000 of the block chain.
|
// BlockWithWrongTxOrder defines invalid block 100,000 of the block DAG.
|
||||||
var BlockWithWrongTxOrder = wire.MsgBlock{
|
var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||||
Header: wire.BlockHeader{
|
Header: wire.BlockHeader{
|
||||||
Version: 1,
|
Version: 1,
|
||||||
|
@ -36,11 +36,11 @@ const (
|
|||||||
// bitConditionChecker provides a thresholdConditionChecker which can be used to
|
// bitConditionChecker provides a thresholdConditionChecker which can be used to
|
||||||
// test whether or not a specific bit is set when it's not supposed to be
|
// test whether or not a specific bit is set when it's not supposed to be
|
||||||
// according to the expected version based on the known deployments and the
|
// according to the expected version based on the known deployments and the
|
||||||
// current state of the chain. This is useful for detecting and warning about
|
// current state of the DAG. This is useful for detecting and warning about
|
||||||
// unknown rule activations.
|
// unknown rule activations.
|
||||||
type bitConditionChecker struct {
|
type bitConditionChecker struct {
|
||||||
bit uint32
|
bit uint32
|
||||||
chain *BlockDAG
|
dag *BlockDAG
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the bitConditionChecker type implements the thresholdConditionChecker
|
// Ensure the bitConditionChecker type implements the thresholdConditionChecker
|
||||||
@ -73,30 +73,30 @@ func (c bitConditionChecker) EndTime() uint64 {
|
|||||||
// RuleChangeActivationThreshold is the number of blocks for which the condition
|
// RuleChangeActivationThreshold is the number of blocks for which the condition
|
||||||
// must be true in order to lock in a rule change.
|
// must be true in order to lock in a rule change.
|
||||||
//
|
//
|
||||||
// This implementation returns the value defined by the chain params the checker
|
// This implementation returns the value defined by the DAG params the checker
|
||||||
// is associated with.
|
// is associated with.
|
||||||
//
|
//
|
||||||
// This is part of the thresholdConditionChecker interface implementation.
|
// This is part of the thresholdConditionChecker interface implementation.
|
||||||
func (c bitConditionChecker) RuleChangeActivationThreshold() uint64 {
|
func (c bitConditionChecker) RuleChangeActivationThreshold() uint64 {
|
||||||
return c.chain.dagParams.RuleChangeActivationThreshold
|
return c.dag.dagParams.RuleChangeActivationThreshold
|
||||||
}
|
}
|
||||||
|
|
||||||
// MinerConfirmationWindow is the number of blocks in each threshold state
|
// MinerConfirmationWindow is the number of blocks in each threshold state
|
||||||
// retarget window.
|
// retarget window.
|
||||||
//
|
//
|
||||||
// This implementation returns the value defined by the chain params the checker
|
// This implementation returns the value defined by the DAG params the checker
|
||||||
// is associated with.
|
// is associated with.
|
||||||
//
|
//
|
||||||
// This is part of the thresholdConditionChecker interface implementation.
|
// This is part of the thresholdConditionChecker interface implementation.
|
||||||
func (c bitConditionChecker) MinerConfirmationWindow() uint64 {
|
func (c bitConditionChecker) MinerConfirmationWindow() uint64 {
|
||||||
return c.chain.dagParams.MinerConfirmationWindow
|
return c.dag.dagParams.MinerConfirmationWindow
|
||||||
}
|
}
|
||||||
|
|
||||||
// Condition returns true when the specific bit associated with the checker is
|
// Condition returns true when the specific bit associated with the checker is
|
||||||
// set and it's not supposed to be according to the expected version based on
|
// set and it's not supposed to be according to the expected version based on
|
||||||
// the known deployments and the current state of the chain.
|
// the known deployments and the current state of the DAG.
|
||||||
//
|
//
|
||||||
// This function MUST be called with the chain state lock held (for writes).
|
// This function MUST be called with the DAG state lock held (for writes).
|
||||||
//
|
//
|
||||||
// This is part of the thresholdConditionChecker interface implementation.
|
// This is part of the thresholdConditionChecker interface implementation.
|
||||||
func (c bitConditionChecker) Condition(node *blockNode) (bool, error) {
|
func (c bitConditionChecker) Condition(node *blockNode) (bool, error) {
|
||||||
@ -109,7 +109,7 @@ func (c bitConditionChecker) Condition(node *blockNode) (bool, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedVersion, err := c.chain.calcNextBlockVersion(node.selectedParent)
|
expectedVersion, err := c.dag.calcNextBlockVersion(node.selectedParent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -121,7 +121,7 @@ func (c bitConditionChecker) Condition(node *blockNode) (bool, error) {
|
|||||||
// and activating consensus rule changes.
|
// and activating consensus rule changes.
|
||||||
type deploymentChecker struct {
|
type deploymentChecker struct {
|
||||||
deployment *dagconfig.ConsensusDeployment
|
deployment *dagconfig.ConsensusDeployment
|
||||||
chain *BlockDAG
|
dag *BlockDAG
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the deploymentChecker type implements the thresholdConditionChecker
|
// Ensure the deploymentChecker type implements the thresholdConditionChecker
|
||||||
@ -154,23 +154,23 @@ func (c deploymentChecker) EndTime() uint64 {
|
|||||||
// RuleChangeActivationThreshold is the number of blocks for which the condition
|
// RuleChangeActivationThreshold is the number of blocks for which the condition
|
||||||
// must be true in order to lock in a rule change.
|
// must be true in order to lock in a rule change.
|
||||||
//
|
//
|
||||||
// This implementation returns the value defined by the chain params the checker
|
// This implementation returns the value defined by the DAG params the checker
|
||||||
// is associated with.
|
// is associated with.
|
||||||
//
|
//
|
||||||
// This is part of the thresholdConditionChecker interface implementation.
|
// This is part of the thresholdConditionChecker interface implementation.
|
||||||
func (c deploymentChecker) RuleChangeActivationThreshold() uint64 {
|
func (c deploymentChecker) RuleChangeActivationThreshold() uint64 {
|
||||||
return c.chain.dagParams.RuleChangeActivationThreshold
|
return c.dag.dagParams.RuleChangeActivationThreshold
|
||||||
}
|
}
|
||||||
|
|
||||||
// MinerConfirmationWindow is the number of blocks in each threshold state
|
// MinerConfirmationWindow is the number of blocks in each threshold state
|
||||||
// retarget window.
|
// retarget window.
|
||||||
//
|
//
|
||||||
// This implementation returns the value defined by the chain params the checker
|
// This implementation returns the value defined by the DAG params the checker
|
||||||
// is associated with.
|
// is associated with.
|
||||||
//
|
//
|
||||||
// This is part of the thresholdConditionChecker interface implementation.
|
// This is part of the thresholdConditionChecker interface implementation.
|
||||||
func (c deploymentChecker) MinerConfirmationWindow() uint64 {
|
func (c deploymentChecker) MinerConfirmationWindow() uint64 {
|
||||||
return c.chain.dagParams.MinerConfirmationWindow
|
return c.dag.dagParams.MinerConfirmationWindow
|
||||||
}
|
}
|
||||||
|
|
||||||
// Condition returns true when the specific bit defined by the deployment
|
// Condition returns true when the specific bit defined by the deployment
|
||||||
@ -189,10 +189,10 @@ func (c deploymentChecker) Condition(node *blockNode) (bool, error) {
|
|||||||
// rule change deployments.
|
// rule change deployments.
|
||||||
//
|
//
|
||||||
// This function differs from the exported CalcNextBlockVersion in that the
|
// This function differs from the exported CalcNextBlockVersion in that the
|
||||||
// exported version uses the current best chain as the previous block node
|
// exported version uses the selected tip as the previous block node
|
||||||
// while this function accepts any block node.
|
// while this function accepts any block node.
|
||||||
//
|
//
|
||||||
// This function MUST be called with the chain state lock held (for writes).
|
// This function MUST be called with the DAG state lock held (for writes).
|
||||||
func (dag *BlockDAG) calcNextBlockVersion(prevNode *blockNode) (int32, error) {
|
func (dag *BlockDAG) calcNextBlockVersion(prevNode *blockNode) (int32, error) {
|
||||||
// Set the appropriate bits for each actively defined rule deployment
|
// Set the appropriate bits for each actively defined rule deployment
|
||||||
// that is either in the process of being voted on, or locked in for the
|
// that is either in the process of being voted on, or locked in for the
|
||||||
@ -201,7 +201,7 @@ func (dag *BlockDAG) calcNextBlockVersion(prevNode *blockNode) (int32, error) {
|
|||||||
for id := 0; id < len(dag.dagParams.Deployments); id++ {
|
for id := 0; id < len(dag.dagParams.Deployments); id++ {
|
||||||
deployment := &dag.dagParams.Deployments[id]
|
deployment := &dag.dagParams.Deployments[id]
|
||||||
cache := &dag.deploymentCaches[id]
|
cache := &dag.deploymentCaches[id]
|
||||||
checker := deploymentChecker{deployment: deployment, chain: dag}
|
checker := deploymentChecker{deployment: deployment, dag: dag}
|
||||||
state, err := dag.thresholdState(prevNode, checker, cache)
|
state, err := dag.thresholdState(prevNode, checker, cache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -214,7 +214,7 @@ func (dag *BlockDAG) calcNextBlockVersion(prevNode *blockNode) (int32, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CalcNextBlockVersion calculates the expected version of the block after the
|
// CalcNextBlockVersion calculates the expected version of the block after the
|
||||||
// end of the current best chain based on the state of started and locked in
|
// end of the current selected tip based on the state of started and locked in
|
||||||
// rule change deployments.
|
// rule change deployments.
|
||||||
//
|
//
|
||||||
// This function is safe for concurrent access.
|
// This function is safe for concurrent access.
|
||||||
@ -228,12 +228,12 @@ func (dag *BlockDAG) CalcNextBlockVersion() (int32, error) {
|
|||||||
// when new rules have been activated and every block for those about to be
|
// when new rules have been activated and every block for those about to be
|
||||||
// activated.
|
// activated.
|
||||||
//
|
//
|
||||||
// This function MUST be called with the chain state lock held (for writes)
|
// This function MUST be called with the DAG state lock held (for writes)
|
||||||
func (dag *BlockDAG) warnUnknownRuleActivations(node *blockNode) error {
|
func (dag *BlockDAG) warnUnknownRuleActivations(node *blockNode) error {
|
||||||
// Warn if any unknown new rules are either about to activate or have
|
// Warn if any unknown new rules are either about to activate or have
|
||||||
// already been activated.
|
// already been activated.
|
||||||
for bit := uint32(0); bit < vbNumBits; bit++ {
|
for bit := uint32(0); bit < vbNumBits; bit++ {
|
||||||
checker := bitConditionChecker{bit: bit, chain: dag}
|
checker := bitConditionChecker{bit: bit, dag: dag}
|
||||||
cache := &dag.warningCaches[bit]
|
cache := &dag.warningCaches[bit]
|
||||||
state, err := dag.thresholdState(node.selectedParent, checker, cache)
|
state, err := dag.thresholdState(node.selectedParent, checker, cache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -262,7 +262,7 @@ func (dag *BlockDAG) warnUnknownRuleActivations(node *blockNode) error {
|
|||||||
// warnUnknownVersions logs a warning if a high enough percentage of the last
|
// warnUnknownVersions logs a warning if a high enough percentage of the last
|
||||||
// blocks have unexpected versions.
|
// blocks have unexpected versions.
|
||||||
//
|
//
|
||||||
// This function MUST be called with the chain state lock held (for writes)
|
// This function MUST be called with the DAG state lock held (for writes)
|
||||||
func (dag *BlockDAG) warnUnknownVersions(node *blockNode) error {
|
func (dag *BlockDAG) warnUnknownVersions(node *blockNode) error {
|
||||||
// Nothing to do if already warned.
|
// Nothing to do if already warned.
|
||||||
if dag.unknownVersionsWarned {
|
if dag.unknownVersionsWarned {
|
||||||
|
@ -41,7 +41,7 @@ func ActiveConfig() *ConfigFlags {
|
|||||||
// See loadConfig for details on the configuration load process.
|
// See loadConfig for details on the configuration load process.
|
||||||
type ConfigFlags struct {
|
type ConfigFlags struct {
|
||||||
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
|
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
|
||||||
DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"`
|
DbType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
|
||||||
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
|
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
|
||||||
TxIndex bool `long:"txindex" description:"Build a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
|
TxIndex bool `long:"txindex" description:"Build a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
|
||||||
AddrIndex bool `long:"addrindex" description:"Build a full address-based transaction index which makes the searchrawtransactions RPC available"`
|
AddrIndex bool `long:"addrindex" description:"Build a full address-based transaction index which makes the searchrawtransactions RPC available"`
|
||||||
|
@ -115,7 +115,7 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the blocks follows all of the chain rules.
|
// Ensure the blocks follows all of the DAG rules.
|
||||||
isOrphan, delay, err := bi.dag.ProcessBlock(block,
|
isOrphan, delay, err := bi.dag.ProcessBlock(block,
|
||||||
blockdag.BFFastAdd)
|
blockdag.BFFastAdd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -45,7 +45,7 @@ func ActiveConfig() *ConfigFlags {
|
|||||||
// their one-line usage.
|
// their one-line usage.
|
||||||
func listCommands() {
|
func listCommands() {
|
||||||
const (
|
const (
|
||||||
categoryChain uint8 = iota
|
categoryDAG uint8 = iota
|
||||||
numCategories
|
numCategories
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -73,13 +73,13 @@ func listCommands() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Categorize the command based on the usage flags.
|
// Categorize the command based on the usage flags.
|
||||||
category := categoryChain
|
category := categoryDAG
|
||||||
categorized[category] = append(categorized[category], usage)
|
categorized[category] = append(categorized[category], usage)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Display the command according to their categories.
|
// Display the command according to their categories.
|
||||||
categoryTitles := make([]string, numCategories)
|
categoryTitles := make([]string, numCategories)
|
||||||
categoryTitles[categoryChain] = "Chain Server Commands:"
|
categoryTitles[categoryDAG] = "DAG Server Commands:"
|
||||||
for category := uint8(0); category < numCategories; category++ {
|
for category := uint8(0); category < numCategories; category++ {
|
||||||
fmt.Println(categoryTitles[category])
|
fmt.Println(categoryTitles[category])
|
||||||
for _, usage := range categorized[category] {
|
for _, usage := range categorized[category] {
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Package dagconfig defines chain configuration parameters.
|
// Package dagconfig defines DAG configuration parameters.
|
||||||
//
|
//
|
||||||
// In addition to the main Kaspa network, which is intended for the transfer
|
// In addition to the main Kaspa network, which is intended for the transfer
|
||||||
// of monetary value, there also exists two currently active standard networks:
|
// of monetary value, there also exists two currently active standard networks:
|
||||||
@ -7,7 +7,7 @@
|
|||||||
// handle errors where input intended for one network is used on an application
|
// handle errors where input intended for one network is used on an application
|
||||||
// instance running on a different network.
|
// instance running on a different network.
|
||||||
//
|
//
|
||||||
// For library packages, dagconfig provides the ability to lookup chain
|
// For library packages, dagconfig provides the ability to lookup DAG
|
||||||
// parameters and encoding magics when passed a *Params. Older APIs not updated
|
// parameters and encoding magics when passed a *Params. Older APIs not updated
|
||||||
// to the new convention of passing a *Params may lookup the parameters for a
|
// to the new convention of passing a *Params may lookup the parameters for a
|
||||||
// wire.KaspaNet using ParamsForNet, but be aware that this usage is
|
// wire.KaspaNet using ParamsForNet, but be aware that this usage is
|
||||||
@ -32,21 +32,21 @@
|
|||||||
// var testnet = flag.Bool("testnet", false, "operate on the testnet Kaspa network")
|
// var testnet = flag.Bool("testnet", false, "operate on the testnet Kaspa network")
|
||||||
//
|
//
|
||||||
// // By default (without -testnet), use mainnet.
|
// // By default (without -testnet), use mainnet.
|
||||||
// var chainParams = &dagconfig.MainNetParams
|
// var dagParams = &dagconfig.MainNetParams
|
||||||
//
|
//
|
||||||
// func main() {
|
// func main() {
|
||||||
// flag.Parse()
|
// flag.Parse()
|
||||||
//
|
//
|
||||||
// // Modify active network parameters if operating on testnet.
|
// // Modify active network parameters if operating on testnet.
|
||||||
// if *testnet {
|
// if *testnet {
|
||||||
// chainParams = &dagconfig.TestNetParams
|
// dagParams = &dagconfig.TestNetParams
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// // later...
|
// // later...
|
||||||
//
|
//
|
||||||
// // Create and print new payment address, specific to the active network.
|
// // Create and print new payment address, specific to the active network.
|
||||||
// pubKeyHash := make([]byte, 20)
|
// pubKeyHash := make([]byte, 20)
|
||||||
// addr, err := util.NewAddressPubKeyHash(pubKeyHash, chainParams)
|
// addr, err := util.NewAddressPubKeyHash(pubKeyHash, dagParams)
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// log.Fatal(err)
|
// log.Fatal(err)
|
||||||
// }
|
// }
|
||||||
|
@ -73,7 +73,7 @@ var genesisBlock = wire.MsgBlock{
|
|||||||
Transactions: []*wire.MsgTx{genesisCoinbaseTx},
|
Transactions: []*wire.MsgTx{genesisCoinbaseTx},
|
||||||
}
|
}
|
||||||
|
|
||||||
// regTestGenesisHash is the hash of the first block in the block chain for the
|
// regTestGenesisHash is the hash of the first block in the block DAG for the
|
||||||
// regression test network (genesis block).
|
// regression test network (genesis block).
|
||||||
var regTestGenesisHash = genesisHash
|
var regTestGenesisHash = genesisHash
|
||||||
|
|
||||||
@ -82,11 +82,11 @@ var regTestGenesisHash = genesisHash
|
|||||||
// the main network.
|
// the main network.
|
||||||
var regTestGenesisMerkleRoot = genesisMerkleRoot
|
var regTestGenesisMerkleRoot = genesisMerkleRoot
|
||||||
|
|
||||||
// regTestGenesisBlock defines the genesis block of the block chain which serves
|
// regTestGenesisBlock defines the genesis block of the block DAG which serves
|
||||||
// as the public transaction ledger for the regression test network.
|
// as the public transaction ledger for the regression test network.
|
||||||
var regTestGenesisBlock = genesisBlock
|
var regTestGenesisBlock = genesisBlock
|
||||||
|
|
||||||
// testNetGenesisHash is the hash of the first block in the block chain for the
|
// testNetGenesisHash is the hash of the first block in the block DAG for the
|
||||||
// test network.
|
// test network.
|
||||||
var testNetGenesisHash = genesisHash
|
var testNetGenesisHash = genesisHash
|
||||||
|
|
||||||
@ -95,11 +95,11 @@ var testNetGenesisHash = genesisHash
|
|||||||
// network.
|
// network.
|
||||||
var testNetGenesisMerkleRoot = genesisMerkleRoot
|
var testNetGenesisMerkleRoot = genesisMerkleRoot
|
||||||
|
|
||||||
// testNetGenesisBlock defines the genesis block of the block chain which
|
// testNetGenesisBlock defines the genesis block of the block DAG which
|
||||||
// serves as the public transaction ledger for the test network.
|
// serves as the public transaction ledger for the test network.
|
||||||
var testNetGenesisBlock = genesisBlock
|
var testNetGenesisBlock = genesisBlock
|
||||||
|
|
||||||
// simNetGenesisHash is the hash of the first block in the block chain for the
|
// simNetGenesisHash is the hash of the first block in the block DAG for the
|
||||||
// simulation test network.
|
// simulation test network.
|
||||||
var simNetGenesisHash = genesisHash
|
var simNetGenesisHash = genesisHash
|
||||||
|
|
||||||
@ -108,7 +108,7 @@ var simNetGenesisHash = genesisHash
|
|||||||
// the main network.
|
// the main network.
|
||||||
var simNetGenesisMerkleRoot = genesisMerkleRoot
|
var simNetGenesisMerkleRoot = genesisMerkleRoot
|
||||||
|
|
||||||
// simNetGenesisBlock defines the genesis block of the block chain which serves
|
// simNetGenesisBlock defines the genesis block of the block DAG which serves
|
||||||
// as the public transaction ledger for the simulation test network.
|
// as the public transaction ledger for the simulation test network.
|
||||||
var simNetGenesisBlock = genesisBlock
|
var simNetGenesisBlock = genesisBlock
|
||||||
|
|
||||||
@ -129,7 +129,7 @@ var devNetGenesisHash = daghash.Hash([daghash.HashSize]byte{
|
|||||||
// for the devopment network.
|
// for the devopment network.
|
||||||
var devNetGenesisMerkleRoot = genesisMerkleRoot
|
var devNetGenesisMerkleRoot = genesisMerkleRoot
|
||||||
|
|
||||||
// devNetGenesisBlock defines the genesis block of the block chain which serves as the
|
// devNetGenesisBlock defines the genesis block of the block DAG which serves as the
|
||||||
// public transaction ledger for the development network.
|
// public transaction ledger for the development network.
|
||||||
var devNetGenesisBlock = wire.MsgBlock{
|
var devNetGenesisBlock = wire.MsgBlock{
|
||||||
Header: wire.BlockHeader{
|
Header: wire.BlockHeader{
|
||||||
|
@ -17,7 +17,7 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/wire"
|
"github.com/kaspanet/kaspad/wire"
|
||||||
)
|
)
|
||||||
|
|
||||||
// These variables are the chain proof-of-work limit parameters for each default
|
// These variables are the DAG proof-of-work limit parameters for each default
|
||||||
// network.
|
// network.
|
||||||
var (
|
var (
|
||||||
// bigOne is 1 represented as a big.Int. It is defined here to avoid
|
// bigOne is 1 represented as a big.Int. It is defined here to avoid
|
||||||
@ -103,7 +103,7 @@ type Params struct {
|
|||||||
// as one method to discover peers.
|
// as one method to discover peers.
|
||||||
DNSSeeds []string
|
DNSSeeds []string
|
||||||
|
|
||||||
// GenesisBlock defines the first block of the chain.
|
// GenesisBlock defines the first block of the DAG.
|
||||||
GenesisBlock *wire.MsgBlock
|
GenesisBlock *wire.MsgBlock
|
||||||
|
|
||||||
// GenesisHash is the starting block hash.
|
// GenesisHash is the starting block hash.
|
||||||
@ -414,7 +414,7 @@ var DevNetParams = Params{
|
|||||||
Net: wire.DevNet,
|
Net: wire.DevNet,
|
||||||
RPCPort: "18334",
|
RPCPort: "18334",
|
||||||
DefaultPort: "18333",
|
DefaultPort: "18333",
|
||||||
DNSSeeds: []string{"devnet-dnsseed.daglabs.com"},
|
DNSSeeds: []string{},
|
||||||
|
|
||||||
// DAG parameters
|
// DAG parameters
|
||||||
GenesisBlock: &devNetGenesisBlock,
|
GenesisBlock: &devNetGenesisBlock,
|
||||||
|
@ -31,7 +31,7 @@ var (
|
|||||||
// config defines the global configuration options.
|
// config defines the global configuration options.
|
||||||
type config struct {
|
type config struct {
|
||||||
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
|
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
|
||||||
DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"`
|
DbType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
|
||||||
TestNet bool `long:"testnet" description:"Use the test network"`
|
TestNet bool `long:"testnet" description:"Use the test network"`
|
||||||
RegressionTest bool `long:"regtest" description:"Use the regression test network"`
|
RegressionTest bool `long:"regtest" description:"Use the regression test network"`
|
||||||
SimNet bool `long:"simnet" description:"Use the simulation test network"`
|
SimNet bool `long:"simnet" description:"Use the simulation test network"`
|
||||||
|
@ -100,7 +100,7 @@ func (bi *blockImporter) readBlock() ([]byte, error) {
|
|||||||
// are skipped and orphan blocks are considered errors. Returns whether the
|
// are skipped and orphan blocks are considered errors. Returns whether the
|
||||||
// block was imported along with any potential errors.
|
// block was imported along with any potential errors.
|
||||||
//
|
//
|
||||||
// NOTE: This is not a safe import as it does not verify chain rules.
|
// NOTE: This is not a safe import as it does not verify DAG rules.
|
||||||
func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
|
func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
|
||||||
// Deserialize the block which includes checks for malformed blocks.
|
// Deserialize the block which includes checks for malformed blocks.
|
||||||
block, err := util.NewBlockFromBytes(serializedBlock)
|
block, err := util.NewBlockFromBytes(serializedBlock)
|
||||||
@ -139,11 +139,11 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
|
|||||||
if !exists {
|
if !exists {
|
||||||
return false, errors.Errorf("import file contains block "+
|
return false, errors.Errorf("import file contains block "+
|
||||||
"%s which does not link to the available "+
|
"%s which does not link to the available "+
|
||||||
"block chain", parentHash)
|
"block DAG", parentHash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put the blocks into the database with no checking of chain rules.
|
// Put the blocks into the database with no checking of DAG rules.
|
||||||
err = bi.db.Update(func(dbTx database.Tx) error {
|
err = bi.db.Update(func(dbTx database.Tx) error {
|
||||||
return dbTx.StoreBlock(block)
|
return dbTx.StoreBlock(block)
|
||||||
})
|
})
|
||||||
|
@ -81,7 +81,7 @@ func realMain() error {
|
|||||||
"Insecurely import bulk block data from bootstrap.dat",
|
"Insecurely import bulk block data from bootstrap.dat",
|
||||||
"Insecurely import bulk block data from bootstrap.dat. "+
|
"Insecurely import bulk block data from bootstrap.dat. "+
|
||||||
"WARNING: This is NOT secure because it does NOT "+
|
"WARNING: This is NOT secure because it does NOT "+
|
||||||
"verify chain rules. It is only provided for testing "+
|
"verify DAG rules. It is only provided for testing "+
|
||||||
"purposes.", &importCfg)
|
"purposes.", &importCfg)
|
||||||
parser.AddCommand("loadheaders",
|
parser.AddCommand("loadheaders",
|
||||||
"Time how long to load headers for all blocks in the database",
|
"Time how long to load headers for all blocks in the database",
|
||||||
|
@ -7,8 +7,6 @@ Package database provides a block and metadata storage database.
|
|||||||
|
|
||||||
Overview
|
Overview
|
||||||
|
|
||||||
As of Feb 2016, there are over 400,000 blocks in the Bitcoin block chain and
|
|
||||||
and over 112 million transactions (which turns out to be over 60GB of data).
|
|
||||||
This package provides a database layer to store and retrieve this data in a
|
This package provides a database layer to store and retrieve this data in a
|
||||||
simple and efficient manner.
|
simple and efficient manner.
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ var (
|
|||||||
blockDataNet = wire.MainNet
|
blockDataNet = wire.MainNet
|
||||||
|
|
||||||
// blockDataFile is the path to a file containing the first 256 blocks
|
// blockDataFile is the path to a file containing the first 256 blocks
|
||||||
// of the block chain.
|
// of the block DAG.
|
||||||
blockDataFile = filepath.Join("..", "testdata", "blocks1-256.bz2")
|
blockDataFile = filepath.Join("..", "testdata", "blocks1-256.bz2")
|
||||||
|
|
||||||
// errSubTestFail is used to signal that a sub test returned false.
|
// errSubTestFail is used to signal that a sub test returned false.
|
||||||
|
@ -31,7 +31,7 @@ var (
|
|||||||
blockDataNet = wire.MainNet
|
blockDataNet = wire.MainNet
|
||||||
|
|
||||||
// blockDataFile is the path to a file containing the first 256 blocks
|
// blockDataFile is the path to a file containing the first 256 blocks
|
||||||
// of the block chain.
|
// of the block DAG.
|
||||||
blockDataFile = filepath.Join("..", "testdata", "blocks1-256.bz2")
|
blockDataFile = filepath.Join("..", "testdata", "blocks1-256.bz2")
|
||||||
|
|
||||||
// errSubTestFail is used to signal that a sub test returned false.
|
// errSubTestFail is used to signal that a sub test returned false.
|
||||||
|
2
doc.go
2
doc.go
@ -75,7 +75,7 @@ Application Options:
|
|||||||
--simnet Use the simulation test network
|
--simnet Use the simulation test network
|
||||||
--uacomment= Comment to add to the user agent --
|
--uacomment= Comment to add to the user agent --
|
||||||
See BIP 14 for more information.
|
See BIP 14 for more information.
|
||||||
--dbtype= Database backend to use for the Block Chain (ffldb)
|
--dbtype= Database backend to use for the Block DAG (ffldb)
|
||||||
--profile= Enable HTTP profiling on given port -- NOTE port
|
--profile= Enable HTTP profiling on given port -- NOTE port
|
||||||
must be between 1024 and 65536
|
must be between 1024 and 65536
|
||||||
--cpuprofile= Write CPU profile to the specified file
|
--cpuprofile= Write CPU profile to the specified file
|
||||||
|
@ -39,7 +39,7 @@ be an exhaustive list.
|
|||||||
- Maintain a pool of fully validated transactions
|
- Maintain a pool of fully validated transactions
|
||||||
- Reject non-fully-spent duplicate transactions
|
- Reject non-fully-spent duplicate transactions
|
||||||
- Reject coinbase transactions
|
- Reject coinbase transactions
|
||||||
- Reject double spends (both from the chain and other transactions in pool)
|
- Reject double spends (both from the DAG and other transactions in pool)
|
||||||
- Reject invalid transactions according to the network consensus rules
|
- Reject invalid transactions according to the network consensus rules
|
||||||
- Full script execution and validation with signature cache support
|
- Full script execution and validation with signature cache support
|
||||||
- Individual transaction query support
|
- Individual transaction query support
|
||||||
@ -68,9 +68,9 @@ Errors
|
|||||||
|
|
||||||
Errors returned by this package are either the raw errors provided by underlying
|
Errors returned by this package are either the raw errors provided by underlying
|
||||||
calls or of type mempool.RuleError. Since there are two classes of rules
|
calls or of type mempool.RuleError. Since there are two classes of rules
|
||||||
(mempool acceptance rules and blockchain (consensus) acceptance rules), the
|
(mempool acceptance rules and blockDAG (consensus) acceptance rules), the
|
||||||
mempool.RuleError type contains a single Err field which will, in turn, either
|
mempool.RuleError type contains a single Err field which will, in turn, either
|
||||||
be a mempool.TxRuleError or a blockchain.RuleError. The first indicates a
|
be a mempool.TxRuleError or a blockdag.RuleError. The first indicates a
|
||||||
violation of mempool acceptance rules while the latter indicates a violation of
|
violation of mempool acceptance rules while the latter indicates a violation of
|
||||||
consensus acceptance rules. This allows the caller to easily differentiate
|
consensus acceptance rules. This allows the caller to easily differentiate
|
||||||
between unexpected errors, such as database errors, versus errors due to rule
|
between unexpected errors, such as database errors, versus errors due to rule
|
||||||
|
@ -14,7 +14,7 @@ import (
|
|||||||
// rules. The caller can use type assertions to determine if a failure was
|
// rules. The caller can use type assertions to determine if a failure was
|
||||||
// specifically due to a rule violation and use the Err field to access the
|
// specifically due to a rule violation and use the Err field to access the
|
||||||
// underlying error, which will be either a TxRuleError or a
|
// underlying error, which will be either a TxRuleError or a
|
||||||
// blockchain.RuleError.
|
// blockdag.RuleError.
|
||||||
type RuleError struct {
|
type RuleError struct {
|
||||||
Err error
|
Err error
|
||||||
}
|
}
|
||||||
@ -69,7 +69,7 @@ func extractRejectCode(err error) (wire.RejectCode, bool) {
|
|||||||
|
|
||||||
switch err := err.(type) {
|
switch err := err.(type) {
|
||||||
case blockdag.RuleError:
|
case blockdag.RuleError:
|
||||||
// Convert the chain error to a reject code.
|
// Convert the DAG error to a reject code.
|
||||||
var code wire.RejectCode
|
var code wire.RejectCode
|
||||||
switch err.ErrorCode {
|
switch err.ErrorCode {
|
||||||
// Rejected due to duplicate.
|
// Rejected due to duplicate.
|
||||||
|
@ -58,13 +58,13 @@ type Config struct {
|
|||||||
// associated with.
|
// associated with.
|
||||||
DAGParams *dagconfig.Params
|
DAGParams *dagconfig.Params
|
||||||
|
|
||||||
// DAGChainHeight defines the function to use to access the block height of
|
// DAGChainHeight defines the function to use to access the chain
|
||||||
// the current best chain.
|
// height of the DAG
|
||||||
DAGChainHeight func() uint64
|
DAGChainHeight func() uint64
|
||||||
|
|
||||||
// MedianTimePast defines the function to use in order to access the
|
// MedianTimePast defines the function to use in order to access the
|
||||||
// median time past calculated from the point-of-view of the current
|
// median time past calculated from the point-of-view of the current
|
||||||
// chain tip within the best chain.
|
// selected tip.
|
||||||
MedianTimePast func() time.Time
|
MedianTimePast func() time.Time
|
||||||
|
|
||||||
// CalcSequenceLockNoLock defines the function to use in order to generate
|
// CalcSequenceLockNoLock defines the function to use in order to generate
|
||||||
@ -661,7 +661,7 @@ func (mp *TxPool) RemoveTransactions(txs []*util.Tx) error {
|
|||||||
// RemoveDoubleSpends removes all transactions which spend outputs spent by the
|
// RemoveDoubleSpends removes all transactions which spend outputs spent by the
|
||||||
// passed transaction from the memory pool. Removing those transactions then
|
// passed transaction from the memory pool. Removing those transactions then
|
||||||
// leads to removing all transactions which rely on them, recursively. This is
|
// leads to removing all transactions which rely on them, recursively. This is
|
||||||
// necessary when a block is connected to the main chain because the block may
|
// necessary when a block is connected to the DAG because the block may
|
||||||
// contain transactions which were previously unknown to the memory pool.
|
// contain transactions which were previously unknown to the memory pool.
|
||||||
//
|
//
|
||||||
// This function is safe for concurrent access.
|
// This function is safe for concurrent access.
|
||||||
@ -874,9 +874,9 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, isNew, rejectDupOrphans bo
|
|||||||
// transactions already in the pool as that would ultimately result in a
|
// transactions already in the pool as that would ultimately result in a
|
||||||
// double spend. This check is intended to be quick and therefore only
|
// double spend. This check is intended to be quick and therefore only
|
||||||
// detects double spends within the transaction pool itself. The
|
// detects double spends within the transaction pool itself. The
|
||||||
// transaction could still be double spending coins from the main chain
|
// transaction could still be double spending coins from the DAG
|
||||||
// at this point. There is a more in-depth check that happens later
|
// at this point. There is a more in-depth check that happens later
|
||||||
// after fetching the referenced transaction inputs from the main chain
|
// after fetching the referenced transaction inputs from the DAG
|
||||||
// which examines the actual spend data and prevents double spends.
|
// which examines the actual spend data and prevents double spends.
|
||||||
err = mp.checkPoolDoubleSpend(tx)
|
err = mp.checkPoolDoubleSpend(tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -945,7 +945,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, isNew, rejectDupOrphans bo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Perform several checks on the transaction inputs using the invariant
|
// Perform several checks on the transaction inputs using the invariant
|
||||||
// rules in blockchain for what transactions are allowed into blocks.
|
// rules in blockDAG for what transactions are allowed into blocks.
|
||||||
// Also returns the fees associated with the transaction which will be
|
// Also returns the fees associated with the transaction which will be
|
||||||
// used later.
|
// used later.
|
||||||
txFee, err := blockdag.CheckTransactionInputsAndCalulateFee(tx, nextBlockBlueScore,
|
txFee, err := blockdag.CheckTransactionInputsAndCalulateFee(tx, nextBlockBlueScore,
|
||||||
|
@ -30,34 +30,34 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/wire"
|
"github.com/kaspanet/kaspad/wire"
|
||||||
)
|
)
|
||||||
|
|
||||||
// fakeChain is used by the pool harness to provide generated test utxos and
|
// fakeDAG is used by the pool harness to provide generated test utxos and
|
||||||
// a current faked chain height to the pool callbacks. This, in turn, allows
|
// a current faked blueScore to the pool callbacks. This, in turn, allows
|
||||||
// transactions to appear as though they are spending completely valid utxos.
|
// transactions to appear as though they are spending completely valid utxos.
|
||||||
type fakeChain struct {
|
type fakeDAG struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
currentHeight uint64
|
currentBlueScore uint64
|
||||||
medianTimePast time.Time
|
medianTimePast time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// BestHeight returns the current height associated with the fake chain
|
// BlueScore returns the current blue score associated with the fake DAG
|
||||||
// instance.
|
// instance.
|
||||||
func (s *fakeChain) BestHeight() uint64 {
|
func (s *fakeDAG) BlueScore() uint64 {
|
||||||
s.RLock()
|
s.RLock()
|
||||||
height := s.currentHeight
|
blueScore := s.currentBlueScore
|
||||||
s.RUnlock()
|
s.RUnlock()
|
||||||
return height
|
return blueScore
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetHeight sets the current height associated with the fake chain instance.
|
// SetBlueScore sets the current blueScore associated with the fake DAG instance.
|
||||||
func (s *fakeChain) SetHeight(height uint64) {
|
func (s *fakeDAG) SetBlueScore(blueScore uint64) {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
s.currentHeight = height
|
s.currentBlueScore = blueScore
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// MedianTimePast returns the current median time past associated with the fake
|
// MedianTimePast returns the current median time past associated with the fake
|
||||||
// chain instance.
|
// DAG instance.
|
||||||
func (s *fakeChain) MedianTimePast() time.Time {
|
func (s *fakeDAG) MedianTimePast() time.Time {
|
||||||
s.RLock()
|
s.RLock()
|
||||||
mtp := s.medianTimePast
|
mtp := s.medianTimePast
|
||||||
s.RUnlock()
|
s.RUnlock()
|
||||||
@ -65,8 +65,8 @@ func (s *fakeChain) MedianTimePast() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetMedianTimePast sets the current median time past associated with the fake
|
// SetMedianTimePast sets the current median time past associated with the fake
|
||||||
// chain instance.
|
// DAG instance.
|
||||||
func (s *fakeChain) SetMedianTimePast(mtp time.Time) {
|
func (s *fakeDAG) SetMedianTimePast(mtp time.Time) {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
s.medianTimePast = mtp
|
s.medianTimePast = mtp
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
@ -99,14 +99,14 @@ func txOutToSpendableOutpoint(tx *util.Tx, outputNum uint32) spendableOutpoint {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// poolHarness provides a harness that includes functionality for creating and
|
// poolHarness provides a harness that includes functionality for creating and
|
||||||
// signing transactions as well as a fake chain that provides utxos for use in
|
// signing transactions as well as a fake DAG that provides utxos for use in
|
||||||
// generating valid transactions.
|
// generating valid transactions.
|
||||||
type poolHarness struct {
|
type poolHarness struct {
|
||||||
signatureScript []byte
|
signatureScript []byte
|
||||||
payScript []byte
|
payScript []byte
|
||||||
dagParams *dagconfig.Params
|
dagParams *dagconfig.Params
|
||||||
|
|
||||||
chain *fakeChain
|
dag *fakeDAG
|
||||||
txPool *TxPool
|
txPool *TxPool
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -303,8 +303,8 @@ func (tc *testContext) mineTransactions(transactions []*util.Tx, numberOfBlocks
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newPoolHarness returns a new instance of a pool harness initialized with a
|
// newPoolHarness returns a new instance of a pool harness initialized with a
|
||||||
// fake chain and a TxPool bound to it that is configured with a policy suitable
|
// fake DAG and a TxPool bound to it that is configured with a policy suitable
|
||||||
// for testing. Also, the fake chain is populated with the returned spendable
|
// for testing. Also, the fake DAG is populated with the returned spendable
|
||||||
// outputs so the caller can easily create new valid transactions which build
|
// outputs so the caller can easily create new valid transactions which build
|
||||||
// off of it.
|
// off of it.
|
||||||
func newPoolHarness(t *testing.T, dagParams *dagconfig.Params, numOutputs uint32, dbName string) (*testContext, []spendableOutpoint, func(), error) {
|
func newPoolHarness(t *testing.T, dagParams *dagconfig.Params, numOutputs uint32, dbName string) (*testContext, []spendableOutpoint, func(), error) {
|
||||||
@ -316,7 +316,7 @@ func newPoolHarness(t *testing.T, dagParams *dagconfig.Params, numOutputs uint32
|
|||||||
params := *dagParams
|
params := *dagParams
|
||||||
params.BlockCoinbaseMaturity = 0
|
params.BlockCoinbaseMaturity = 0
|
||||||
|
|
||||||
// Create a new database and chain instance to run tests against.
|
// Create a new database and DAG instance to run tests against.
|
||||||
dag, teardownFunc, err := blockdag.DAGSetup(dbName, blockdag.Config{
|
dag, teardownFunc, err := blockdag.DAGSetup(dbName, blockdag.Config{
|
||||||
DAGParams: ¶ms,
|
DAGParams: ¶ms,
|
||||||
})
|
})
|
||||||
@ -334,14 +334,14 @@ func newPoolHarness(t *testing.T, dagParams *dagconfig.Params, numOutputs uint32
|
|||||||
return nil, nil, nil, errors.Errorf("Failed to build harness signature script: %s", err)
|
return nil, nil, nil, errors.Errorf("Failed to build harness signature script: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new fake chain and harness bound to it.
|
// Create a new fake DAG and harness bound to it.
|
||||||
chain := &fakeChain{}
|
fDAG := &fakeDAG{}
|
||||||
harness := &poolHarness{
|
harness := &poolHarness{
|
||||||
signatureScript: signatureScript,
|
signatureScript: signatureScript,
|
||||||
payScript: scriptPubKey,
|
payScript: scriptPubKey,
|
||||||
dagParams: ¶ms,
|
dagParams: ¶ms,
|
||||||
|
|
||||||
chain: chain,
|
dag: fDAG,
|
||||||
txPool: New(&Config{
|
txPool: New(&Config{
|
||||||
DAG: dag,
|
DAG: dag,
|
||||||
Policy: Policy{
|
Policy: Policy{
|
||||||
@ -351,8 +351,8 @@ func newPoolHarness(t *testing.T, dagParams *dagconfig.Params, numOutputs uint32
|
|||||||
MaxTxVersion: 1,
|
MaxTxVersion: 1,
|
||||||
},
|
},
|
||||||
DAGParams: ¶ms,
|
DAGParams: ¶ms,
|
||||||
DAGChainHeight: chain.BestHeight,
|
DAGChainHeight: fDAG.BlueScore,
|
||||||
MedianTimePast: chain.MedianTimePast,
|
MedianTimePast: fDAG.MedianTimePast,
|
||||||
CalcSequenceLockNoLock: calcSequenceLock,
|
CalcSequenceLockNoLock: calcSequenceLock,
|
||||||
SigCache: nil,
|
SigCache: nil,
|
||||||
AddrIndex: nil,
|
AddrIndex: nil,
|
||||||
@ -363,13 +363,13 @@ func newPoolHarness(t *testing.T, dagParams *dagconfig.Params, numOutputs uint32
|
|||||||
|
|
||||||
// Mine numOutputs blocks to get numOutputs coinbase outpoints
|
// Mine numOutputs blocks to get numOutputs coinbase outpoints
|
||||||
outpoints := tc.mineTransactions(nil, uint64(numOutputs))
|
outpoints := tc.mineTransactions(nil, uint64(numOutputs))
|
||||||
curHeight := harness.chain.BestHeight()
|
curHeight := harness.dag.BlueScore()
|
||||||
if params.BlockCoinbaseMaturity != 0 {
|
if params.BlockCoinbaseMaturity != 0 {
|
||||||
harness.chain.SetHeight(params.BlockCoinbaseMaturity + curHeight)
|
harness.dag.SetBlueScore(params.BlockCoinbaseMaturity + curHeight)
|
||||||
} else {
|
} else {
|
||||||
harness.chain.SetHeight(curHeight + 1)
|
harness.dag.SetBlueScore(curHeight + 1)
|
||||||
}
|
}
|
||||||
harness.chain.SetMedianTimePast(time.Now())
|
harness.dag.SetMedianTimePast(time.Now())
|
||||||
|
|
||||||
return tc, outpoints, teardownFunc, nil
|
return tc, outpoints, teardownFunc, nil
|
||||||
}
|
}
|
||||||
@ -544,7 +544,7 @@ func TestProcessTransaction(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//Checks that a coinbase transaction cannot be added to the mempool
|
//Checks that a coinbase transaction cannot be added to the mempool
|
||||||
curHeight := harness.chain.BestHeight()
|
curHeight := harness.dag.BlueScore()
|
||||||
coinbase, err := harness.CreateCoinbaseTx(curHeight+1, 1)
|
coinbase, err := harness.CreateCoinbaseTx(curHeight+1, 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("CreateCoinbaseTx: %v", err)
|
t.Errorf("CreateCoinbaseTx: %v", err)
|
||||||
|
@ -77,11 +77,10 @@ type Config struct {
|
|||||||
ShouldMineOnGenesis func() bool
|
ShouldMineOnGenesis func() bool
|
||||||
|
|
||||||
// IsCurrent defines the function to use to obtain whether or not the
|
// IsCurrent defines the function to use to obtain whether or not the
|
||||||
// block chain is current. This is used by the automatic persistent
|
// block DAG is current. This is used by the automatic persistent
|
||||||
// mining routine to determine whether or it should attempt mining.
|
// mining routine to determine whether or it should attempt mining.
|
||||||
// This is useful because there is no point in mining if the chain is
|
// This is useful because there is no point in mining if the DAG is
|
||||||
// not current since any solved blocks would be on a side chain and and
|
// not current since any solved blocks would end up red anyways.
|
||||||
// up orphaned anyways.
|
|
||||||
IsCurrent func() bool
|
IsCurrent func() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ func MinimumMedianTime(dagMedianTime time.Time) time.Time {
|
|||||||
|
|
||||||
// medianAdjustedTime returns the current time adjusted to ensure it is at least
|
// medianAdjustedTime returns the current time adjusted to ensure it is at least
|
||||||
// one second after the median timestamp of the last several blocks per the
|
// one second after the median timestamp of the last several blocks per the
|
||||||
// chain consensus rules.
|
// DAG consensus rules.
|
||||||
func medianAdjustedTime(dagMedianTime time.Time, timeSource blockdag.MedianTimeSource) time.Time {
|
func medianAdjustedTime(dagMedianTime time.Time, timeSource blockdag.MedianTimeSource) time.Time {
|
||||||
// The timestamp for the block must not be before the median timestamp
|
// The timestamp for the block must not be before the median timestamp
|
||||||
// of the last several blocks. Thus, choose the maximum between the
|
// of the last several blocks. Thus, choose the maximum between the
|
||||||
@ -125,7 +125,7 @@ func medianAdjustedTime(dagMedianTime time.Time, timeSource blockdag.MedianTimeS
|
|||||||
// BlkTmplGenerator provides a type that can be used to generate block templates
|
// BlkTmplGenerator provides a type that can be used to generate block templates
|
||||||
// based on a given mining policy and source of transactions to choose from.
|
// based on a given mining policy and source of transactions to choose from.
|
||||||
// It also houses additional state required in order to ensure the templates
|
// It also houses additional state required in order to ensure the templates
|
||||||
// are built on top of the current best chain and adhere to the consensus rules.
|
// are built on top of the current DAG and adhere to the consensus rules.
|
||||||
type BlkTmplGenerator struct {
|
type BlkTmplGenerator struct {
|
||||||
policy *Policy
|
policy *Policy
|
||||||
dagParams *dagconfig.Params
|
dagParams *dagconfig.Params
|
||||||
@ -139,7 +139,7 @@ type BlkTmplGenerator struct {
|
|||||||
// policy using transactions from the provided transaction source.
|
// policy using transactions from the provided transaction source.
|
||||||
//
|
//
|
||||||
// The additional state-related fields are required in order to ensure the
|
// The additional state-related fields are required in order to ensure the
|
||||||
// templates are built on top of the current best chain and adhere to the
|
// templates are built on top of the current DAG and adhere to the
|
||||||
// consensus rules.
|
// consensus rules.
|
||||||
func NewBlkTmplGenerator(policy *Policy, params *dagconfig.Params,
|
func NewBlkTmplGenerator(policy *Policy, params *dagconfig.Params,
|
||||||
txSource TxSource, dag *blockdag.BlockDAG,
|
txSource TxSource, dag *blockdag.BlockDAG,
|
||||||
@ -174,7 +174,7 @@ func NewBlkTmplGenerator(policy *Policy, params *dagconfig.Params,
|
|||||||
// policy settings are all taken into account.
|
// policy settings are all taken into account.
|
||||||
//
|
//
|
||||||
// Transactions which only spend outputs from other transactions already in the
|
// Transactions which only spend outputs from other transactions already in the
|
||||||
// block chain are immediately added to a priority queue which either
|
// block DAG are immediately added to a priority queue which either
|
||||||
// prioritizes based on the priority (then fee per kilobyte) or the fee per
|
// prioritizes based on the priority (then fee per kilobyte) or the fee per
|
||||||
// kilobyte (then priority) depending on whether or not the BlockPrioritySize
|
// kilobyte (then priority) depending on whether or not the BlockPrioritySize
|
||||||
// policy setting allots space for high-priority transactions. Transactions
|
// policy setting allots space for high-priority transactions. Transactions
|
||||||
@ -229,7 +229,7 @@ func (g *BlkTmplGenerator) NewBlockTemplate(payToAddress util.Address) (*BlockTe
|
|||||||
|
|
||||||
// Calculate the required difficulty for the block. The timestamp
|
// Calculate the required difficulty for the block. The timestamp
|
||||||
// is potentially adjusted to ensure it comes after the median time of
|
// is potentially adjusted to ensure it comes after the median time of
|
||||||
// the last several blocks per the chain consensus rules.
|
// the last several blocks per the DAG consensus rules.
|
||||||
ts := medianAdjustedTime(g.dag.CalcPastMedianTime(), g.timeSource)
|
ts := medianAdjustedTime(g.dag.CalcPastMedianTime(), g.timeSource)
|
||||||
requiredDifficulty := g.dag.NextRequiredDifficulty(ts)
|
requiredDifficulty := g.dag.NextRequiredDifficulty(ts)
|
||||||
|
|
||||||
@ -326,13 +326,13 @@ func (g *BlkTmplGenerator) buildUTXOCommitment(transactions []*wire.MsgTx) (*dag
|
|||||||
|
|
||||||
// UpdateBlockTime updates the timestamp in the header of the passed block to
|
// UpdateBlockTime updates the timestamp in the header of the passed block to
|
||||||
// the current time while taking into account the median time of the last
|
// the current time while taking into account the median time of the last
|
||||||
// several blocks to ensure the new time is after that time per the chain
|
// several blocks to ensure the new time is after that time per the DAG
|
||||||
// consensus rules. Finally, it will update the target difficulty if needed
|
// consensus rules. Finally, it will update the target difficulty if needed
|
||||||
// based on the new time for the test networks since their target difficulty can
|
// based on the new time for the test networks since their target difficulty can
|
||||||
// change based upon time.
|
// change based upon time.
|
||||||
func (g *BlkTmplGenerator) UpdateBlockTime(msgBlock *wire.MsgBlock) error {
|
func (g *BlkTmplGenerator) UpdateBlockTime(msgBlock *wire.MsgBlock) error {
|
||||||
// The new timestamp is potentially adjusted to ensure it comes after
|
// The new timestamp is potentially adjusted to ensure it comes after
|
||||||
// the median time of the last several blocks per the chain consensus
|
// the median time of the last several blocks per the DAG consensus
|
||||||
// rules.
|
// rules.
|
||||||
dagMedianTime := g.dag.CalcPastMedianTime()
|
dagMedianTime := g.dag.CalcPastMedianTime()
|
||||||
newTime := medianAdjustedTime(dagMedianTime, g.timeSource)
|
newTime := medianAdjustedTime(dagMedianTime, g.timeSource)
|
||||||
|
@ -14,7 +14,7 @@ import (
|
|||||||
|
|
||||||
// blockProgressLogger provides periodic logging for other services in order
|
// blockProgressLogger provides periodic logging for other services in order
|
||||||
// to show users progress of certain "actions" involving some or all current
|
// to show users progress of certain "actions" involving some or all current
|
||||||
// blocks. Ex: syncing to best chain, indexing all blocks, etc.
|
// blocks. Ex: syncing, indexing all blocks, etc.
|
||||||
type blockProgressLogger struct {
|
type blockProgressLogger struct {
|
||||||
receivedLogBlocks int64
|
receivedLogBlocks int64
|
||||||
receivedLogTx int64
|
receivedLogTx int64
|
||||||
|
@ -5,9 +5,9 @@
|
|||||||
/*
|
/*
|
||||||
Package netsync implements a concurrency safe block syncing protocol. The
|
Package netsync implements a concurrency safe block syncing protocol. The
|
||||||
SyncManager communicates with connected peers to perform an initial block
|
SyncManager communicates with connected peers to perform an initial block
|
||||||
download, keep the chain and unconfirmed transaction pool in sync, and announce
|
download, keep the DAG and unconfirmed transaction pool in sync, and announce
|
||||||
new blocks connected to the chain. Currently the sync manager selects a single
|
new blocks connected to the DAG. Currently the sync manager selects a single
|
||||||
sync peer that it downloads all blocks from until it is up to date with the
|
sync peer that it downloads all blocks from until it is up to date with the
|
||||||
longest chain the sync peer is aware of.
|
selected tip of the sync peer.
|
||||||
*/
|
*/
|
||||||
package netsync
|
package netsync
|
||||||
|
@ -28,6 +28,6 @@ type Config struct {
|
|||||||
PeerNotifier PeerNotifier
|
PeerNotifier PeerNotifier
|
||||||
DAG *blockdag.BlockDAG
|
DAG *blockdag.BlockDAG
|
||||||
TxMemPool *mempool.TxPool
|
TxMemPool *mempool.TxPool
|
||||||
ChainParams *dagconfig.Params
|
DAGParams *dagconfig.Params
|
||||||
MaxPeers int
|
MaxPeers int
|
||||||
}
|
}
|
||||||
|
@ -6,11 +6,6 @@ package netsync
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/kaspanet/kaspad/blockdag"
|
"github.com/kaspanet/kaspad/blockdag"
|
||||||
"github.com/kaspanet/kaspad/dagconfig"
|
"github.com/kaspanet/kaspad/dagconfig"
|
||||||
"github.com/kaspanet/kaspad/database"
|
"github.com/kaspanet/kaspad/database"
|
||||||
@ -19,6 +14,10 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/util"
|
"github.com/kaspanet/kaspad/util"
|
||||||
"github.com/kaspanet/kaspad/util/daghash"
|
"github.com/kaspanet/kaspad/util/daghash"
|
||||||
"github.com/kaspanet/kaspad/wire"
|
"github.com/kaspanet/kaspad/wire"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -92,7 +91,7 @@ type processBlockResponse struct {
|
|||||||
// for requested a block is processed. Note this call differs from blockMsg
|
// for requested a block is processed. Note this call differs from blockMsg
|
||||||
// above in that blockMsg is intended for blocks that came from peers and have
|
// above in that blockMsg is intended for blocks that came from peers and have
|
||||||
// extra handling whereas this message essentially is just a concurrent safe
|
// extra handling whereas this message essentially is just a concurrent safe
|
||||||
// way to call ProcessBlock on the internal block chain instance.
|
// way to call ProcessBlock on the internal block DAG instance.
|
||||||
type processBlockMsg struct {
|
type processBlockMsg struct {
|
||||||
block *util.Block
|
block *util.Block
|
||||||
flags blockdag.BehaviorFlags
|
flags blockdag.BehaviorFlags
|
||||||
@ -132,7 +131,7 @@ type peerSyncState struct {
|
|||||||
// SyncManager is used to communicate block related messages with peers. The
|
// SyncManager is used to communicate block related messages with peers. The
|
||||||
// SyncManager is started as by executing Start() in a goroutine. Once started,
|
// SyncManager is started as by executing Start() in a goroutine. Once started,
|
||||||
// it selects peers to sync from and starts the initial block download. Once the
|
// it selects peers to sync from and starts the initial block download. Once the
|
||||||
// chain is in sync, the SyncManager handles incoming block and header
|
// DAG is in sync, the SyncManager handles incoming block and header
|
||||||
// notifications and relays announcements of new blocks to peers.
|
// notifications and relays announcements of new blocks to peers.
|
||||||
type SyncManager struct {
|
type SyncManager struct {
|
||||||
peerNotifier PeerNotifier
|
peerNotifier PeerNotifier
|
||||||
@ -155,7 +154,7 @@ type SyncManager struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// startSync will choose the best peer among the available candidate peers to
|
// startSync will choose the best peer among the available candidate peers to
|
||||||
// download/sync the blockchain from. When syncing is already running, it
|
// download/sync the blockDAG from. When syncing is already running, it
|
||||||
// simply returns. It also examines the candidates for any which are no longer
|
// simply returns. It also examines the candidates for any which are no longer
|
||||||
// candidates and removes them as needed.
|
// candidates and removes them as needed.
|
||||||
func (sm *SyncManager) startSync() {
|
func (sm *SyncManager) startSync() {
|
||||||
@ -339,7 +338,7 @@ func (sm *SyncManager) handleTxMsg(tmsg *txMsg) {
|
|||||||
acceptedTxs, err := sm.txMemPool.ProcessTransaction(tmsg.tx,
|
acceptedTxs, err := sm.txMemPool.ProcessTransaction(tmsg.tx,
|
||||||
true, mempool.Tag(peer.ID()))
|
true, mempool.Tag(peer.ID()))
|
||||||
|
|
||||||
// Remove transaction from request maps. Either the mempool/chain
|
// Remove transaction from request maps. Either the mempool/DAG
|
||||||
// already knows about it and as such we shouldn't have any more
|
// already knows about it and as such we shouldn't have any more
|
||||||
// instances of trying to fetch it, or we failed to insert and thus
|
// instances of trying to fetch it, or we failed to insert and thus
|
||||||
// we'll retry next time we get an inv.
|
// we'll retry next time we get an inv.
|
||||||
@ -421,7 +420,7 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) {
|
|||||||
// The regression test intentionally sends some blocks twice
|
// The regression test intentionally sends some blocks twice
|
||||||
// to test duplicate block insertion fails. Don't disconnect
|
// to test duplicate block insertion fails. Don't disconnect
|
||||||
// the peer or ignore the block when we're in regression test
|
// the peer or ignore the block when we're in regression test
|
||||||
// mode in this case so the chain code is actually fed the
|
// mode in this case so the DAG code is actually fed the
|
||||||
// duplicate blocks.
|
// duplicate blocks.
|
||||||
if sm.dagParams != &dagconfig.RegressionNetParams {
|
if sm.dagParams != &dagconfig.RegressionNetParams {
|
||||||
log.Warnf("Got unrequested block %s from %s -- "+
|
log.Warnf("Got unrequested block %s from %s -- "+
|
||||||
@ -549,8 +548,8 @@ func (state *peerSyncState) addInvToRequestQueue(iv *wire.InvVect) {
|
|||||||
// haveInventory returns whether or not the inventory represented by the passed
|
// haveInventory returns whether or not the inventory represented by the passed
|
||||||
// inventory vector is known. This includes checking all of the various places
|
// inventory vector is known. This includes checking all of the various places
|
||||||
// inventory can be when it is in different states such as blocks that are part
|
// inventory can be when it is in different states such as blocks that are part
|
||||||
// of the main chain, on a side chain, in the orphan pool, and transactions that
|
// of the DAG, in the orphan pool, and transactions that are in the memory pool
|
||||||
// are in the memory pool (either the main pool or orphan pool).
|
// (either the main pool or orphan pool).
|
||||||
func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) {
|
func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) {
|
||||||
switch invVect.Type {
|
switch invVect.Type {
|
||||||
case wire.InvTypeSyncBlock:
|
case wire.InvTypeSyncBlock:
|
||||||
@ -567,7 +566,7 @@ func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if the transaction exists from the point of view of the
|
// Check if the transaction exists from the point of view of the
|
||||||
// end of the main chain. Note that this is only a best effort
|
// DAG's virtual block. Note that this is only a best effort
|
||||||
// since it is expensive to check existence of every output and
|
// since it is expensive to check existence of every output and
|
||||||
// the only purpose of this check is to avoid downloading
|
// the only purpose of this check is to avoid downloading
|
||||||
// already known transactions. Only the first two outputs are
|
// already known transactions. Only the first two outputs are
|
||||||
@ -617,7 +616,7 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
|
|||||||
|
|
||||||
// Request the advertised inventory if we don't already have it. Also,
|
// Request the advertised inventory if we don't already have it. Also,
|
||||||
// request parent blocks of orphans if we receive one we already have.
|
// request parent blocks of orphans if we receive one we already have.
|
||||||
// Finally, attempt to detect potential stalls due to long side chains
|
// Finally, attempt to detect potential stalls due to big orphan DAGs
|
||||||
// we already have and request more blocks to prevent them.
|
// we already have and request more blocks to prevent them.
|
||||||
for i, iv := range invVects {
|
for i, iv := range invVects {
|
||||||
// Ignore unsupported inventory types.
|
// Ignore unsupported inventory types.
|
||||||
@ -1087,7 +1086,7 @@ func New(config *Config) (*SyncManager, error) {
|
|||||||
peerNotifier: config.PeerNotifier,
|
peerNotifier: config.PeerNotifier,
|
||||||
dag: config.DAG,
|
dag: config.DAG,
|
||||||
txMemPool: config.TxMemPool,
|
txMemPool: config.TxMemPool,
|
||||||
dagParams: config.ChainParams,
|
dagParams: config.DAGParams,
|
||||||
rejectedTxns: make(map[daghash.TxID]struct{}),
|
rejectedTxns: make(map[daghash.TxID]struct{}),
|
||||||
requestedTxns: make(map[daghash.TxID]struct{}),
|
requestedTxns: make(map[daghash.TxID]struct{}),
|
||||||
requestedBlocks: make(map[daghash.Hash]struct{}),
|
requestedBlocks: make(map[daghash.Hash]struct{}),
|
||||||
|
@ -95,15 +95,6 @@ commands.
|
|||||||
The automatic reconnection can be disabled by setting the DisableAutoReconnect
|
The automatic reconnection can be disabled by setting the DisableAutoReconnect
|
||||||
flag to true in the connection config when creating the client.
|
flag to true in the connection config when creating the client.
|
||||||
|
|
||||||
Minor RPC Server Differences and Chain/Wallet Separation
|
|
||||||
|
|
||||||
Some of the commands are extensions specific to a particular RPC server. For
|
|
||||||
example, the DebugLevel call is an extension only provided by kaspad.
|
|
||||||
Therefore if you call one of these commands against
|
|
||||||
an RPC server that doesn't provide them, you will get an unimplemented error
|
|
||||||
from the server. An effort has been made to call out which commmands are
|
|
||||||
extensions in their documentation.
|
|
||||||
|
|
||||||
Errors
|
Errors
|
||||||
|
|
||||||
There are 3 categories of errors that will be returned throughout this package:
|
There are 3 categories of errors that will be returned throughout this package:
|
||||||
|
@ -1126,10 +1126,6 @@ type ConnConfig struct {
|
|||||||
// ConnectionTimeout is the time it'll take for to try to connect
|
// ConnectionTimeout is the time it'll take for to try to connect
|
||||||
// to the RPC server before the connection times out.
|
// to the RPC server before the connection times out.
|
||||||
ConnectionTimeout time.Duration
|
ConnectionTimeout time.Duration
|
||||||
|
|
||||||
// EnableBCInfoHacks is an option provided to enable compatibility hacks
|
|
||||||
// when connecting to blockchain.info RPC server
|
|
||||||
EnableBCInfoHacks bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHTTPClient returns a new http client that is configured according to the
|
// newHTTPClient returns a new http client that is configured according to the
|
||||||
|
@ -496,7 +496,7 @@ func (c *Client) GetHeadersAsync(startHash, stopHash *daghash.Hash) FutureGetHea
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetHeaders mimics the wire protocol getheaders and headers messages by
|
// GetHeaders mimics the wire protocol getheaders and headers messages by
|
||||||
// returning all headers on the main chain after the first known block in the
|
// returning all headers in the DAG after the first known block in the
|
||||||
// locators, up until a block hash matches stopHash.
|
// locators, up until a block hash matches stopHash.
|
||||||
func (c *Client) GetHeaders(startHash, stopHash *daghash.Hash) ([]wire.BlockHeader, error) {
|
func (c *Client) GetHeaders(startHash, stopHash *daghash.Hash) ([]wire.BlockHeader, error) {
|
||||||
return c.GetHeadersAsync(startHash, stopHash).Receive()
|
return c.GetHeadersAsync(startHash, stopHash).Receive()
|
||||||
|
@ -385,50 +385,6 @@ func parseRelevantTxAcceptedParams(params []json.RawMessage) (transaction []byte
|
|||||||
return parseHexParam(params[0])
|
return parseHexParam(params[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseChainTxNtfnParams parses out the transaction and optional details about
|
|
||||||
// the block it's mined in from the parameters of recvtx and redeemingtx
|
|
||||||
// notifications.
|
|
||||||
func parseChainTxNtfnParams(params []json.RawMessage) (*util.Tx,
|
|
||||||
*rpcmodel.BlockDetails, error) {
|
|
||||||
|
|
||||||
if len(params) == 0 || len(params) > 2 {
|
|
||||||
return nil, nil, wrongNumParams(len(params))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal first parameter as a string.
|
|
||||||
var txHex string
|
|
||||||
err := json.Unmarshal(params[0], &txHex)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If present, unmarshal second optional parameter as the block details
|
|
||||||
// JSON object.
|
|
||||||
var block *rpcmodel.BlockDetails
|
|
||||||
if len(params) > 1 {
|
|
||||||
err = json.Unmarshal(params[1], &block)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hex decode and deserialize the transaction.
|
|
||||||
serializedTx, err := hex.DecodeString(txHex)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
var msgTx wire.MsgTx
|
|
||||||
err = msgTx.Deserialize(bytes.NewReader(serializedTx))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Change recvtx and redeemingtx callback signatures to use
|
|
||||||
// nicer types for details about the block (block hash as a
|
|
||||||
// daghash.Hash, block time as a time.Time, etc.).
|
|
||||||
return util.NewTx(&msgTx), block, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTxAcceptedNtfnParams parses out the transaction hash and total amount
|
// parseTxAcceptedNtfnParams parses out the transaction hash and total amount
|
||||||
// from the parameters of a txaccepted notification.
|
// from the parameters of a txaccepted notification.
|
||||||
func parseTxAcceptedNtfnParams(params []json.RawMessage) (*daghash.Hash,
|
func parseTxAcceptedNtfnParams(params []json.RawMessage) (*daghash.Hash,
|
||||||
@ -522,10 +478,10 @@ func (c *Client) NotifyBlocksAsync() FutureNotifyBlocksResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NotifyBlocks registers the client to receive notifications when blocks are
|
// NotifyBlocks registers the client to receive notifications when blocks are
|
||||||
// connected and disconnected from the main chain. The notifications are
|
// connected to the DAG. The notifications are delivered to the notification
|
||||||
// delivered to the notification handlers associated with the client. Calling
|
// handlers associated with the client. Calling this function has no effect
|
||||||
// this function has no effect if there are no notification handlers and will
|
// if there are no notification handlers and will result in an error if the
|
||||||
// result in an error if the client is configured to run in HTTP POST mode.
|
// client is configured to run in HTTP POST mode.
|
||||||
//
|
//
|
||||||
// The notifications delivered as a result of this call will be via OnBlockAdded
|
// The notifications delivered as a result of this call will be via OnBlockAdded
|
||||||
func (c *Client) NotifyBlocks() error {
|
func (c *Client) NotifyBlocks() error {
|
||||||
|
@ -318,7 +318,7 @@ func (c *Client) SearchRawTransactionsAsync(address util.Address, skip, count in
|
|||||||
|
|
||||||
// SearchRawTransactions returns transactions that involve the passed address.
|
// SearchRawTransactions returns transactions that involve the passed address.
|
||||||
//
|
//
|
||||||
// NOTE: Chain servers do not typically provide this capability unless it has
|
// NOTE: RPC servers do not typically provide this capability unless it has
|
||||||
// specifically been enabled.
|
// specifically been enabled.
|
||||||
//
|
//
|
||||||
// See SearchRawTransactionsVerbose to retrieve a list of data structures with
|
// See SearchRawTransactionsVerbose to retrieve a list of data structures with
|
||||||
@ -372,7 +372,7 @@ func (c *Client) SearchRawTransactionsVerboseAsync(address util.Address, skip,
|
|||||||
// SearchRawTransactionsVerbose returns a list of data structures that describe
|
// SearchRawTransactionsVerbose returns a list of data structures that describe
|
||||||
// transactions which involve the passed address.
|
// transactions which involve the passed address.
|
||||||
//
|
//
|
||||||
// NOTE: Chain servers do not typically provide this capability unless it has
|
// NOTE: RPC servers do not typically provide this capability unless it has
|
||||||
// specifically been enabled.
|
// specifically been enabled.
|
||||||
//
|
//
|
||||||
// See SearchRawTransactions to retrieve a list of raw transactions instead.
|
// See SearchRawTransactions to retrieve a list of raw transactions instead.
|
||||||
|
@ -4,8 +4,8 @@
|
|||||||
; Data settings
|
; Data settings
|
||||||
; ------------------------------------------------------------------------------
|
; ------------------------------------------------------------------------------
|
||||||
|
|
||||||
; The directory to store data such as the block chain and peer addresses. The
|
; The directory to store data such as the block DAG and peer addresses. The
|
||||||
; block chain takes several GB, so this location must have a lot of free space.
|
; block DAG takes several GB, so this location must have a lot of free space.
|
||||||
; The default is ~/.kaspad/data on POSIX OSes, $LOCALAPPDATA/Kaspad/data on Windows,
|
; The default is ~/.kaspad/data on POSIX OSes, $LOCALAPPDATA/Kaspad/data on Windows,
|
||||||
; ~/Library/Application Support/Kaspad/data on Mac OS, and $home/kaspad/data on
|
; ~/Library/Application Support/Kaspad/data on Mac OS, and $home/kaspad/data on
|
||||||
; Plan9. Environment variables are expanded so they may be used. NOTE: Windows
|
; Plan9. Environment variables are expanded so they may be used. NOTE: Windows
|
||||||
|
@ -142,7 +142,7 @@ type outboundPeerConnectedMsg struct {
|
|||||||
// updatePeerHeightsMsg is a message sent from the blockmanager to the server
|
// updatePeerHeightsMsg is a message sent from the blockmanager to the server
|
||||||
// after a new block has been accepted. The purpose of the message is to update
|
// after a new block has been accepted. The purpose of the message is to update
|
||||||
// the heights of peers that were known to announce the block before we
|
// the heights of peers that were known to announce the block before we
|
||||||
// connected it to the main chain or recognized it as an orphan. With these
|
// connected it to the DAG or recognized it as an orphan. With these
|
||||||
// updates, peer heights will be kept up to date, allowing for fresh data when
|
// updates, peer heights will be kept up to date, allowing for fresh data when
|
||||||
// selecting sync peer candidacy.
|
// selecting sync peer candidacy.
|
||||||
type updatePeerHeightsMsg struct {
|
type updatePeerHeightsMsg struct {
|
||||||
@ -1660,7 +1660,7 @@ func NewServer(listenAddrs []string, db database.DB, dagParams *dagconfig.Params
|
|||||||
indexManager = indexers.NewManager(indexes)
|
indexManager = indexers.NewManager(indexes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new block chain instance with the appropriate configuration.
|
// Create a new block DAG instance with the appropriate configuration.
|
||||||
var err error
|
var err error
|
||||||
s.DAG, err = blockdag.New(&blockdag.Config{
|
s.DAG, err = blockdag.New(&blockdag.Config{
|
||||||
DB: s.db,
|
DB: s.db,
|
||||||
@ -1700,7 +1700,7 @@ func NewServer(listenAddrs []string, db database.DB, dagParams *dagconfig.Params
|
|||||||
PeerNotifier: &s,
|
PeerNotifier: &s,
|
||||||
DAG: s.DAG,
|
DAG: s.DAG,
|
||||||
TxMemPool: s.TxMemPool,
|
TxMemPool: s.TxMemPool,
|
||||||
ChainParams: s.DAGParams,
|
DAGParams: s.DAGParams,
|
||||||
MaxPeers: maxPeers,
|
MaxPeers: maxPeers,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1822,7 +1822,7 @@ func initListeners(amgr *addrmgr.AddrManager, listenAddrs []string, services wir
|
|||||||
if len(config.ActiveConfig().ExternalIPs) != 0 {
|
if len(config.ActiveConfig().ExternalIPs) != 0 {
|
||||||
defaultPort, err := strconv.ParseUint(config.ActiveConfig().NetParams().DefaultPort, 10, 16)
|
defaultPort, err := strconv.ParseUint(config.ActiveConfig().NetParams().DefaultPort, 10, 16)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
srvrLog.Errorf("Can not parse default port %s for active chain: %s",
|
srvrLog.Errorf("Can not parse default port %s for active DAG: %s",
|
||||||
config.ActiveConfig().NetParams().DefaultPort, err)
|
config.ActiveConfig().NetParams().DefaultPort, err)
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -92,7 +92,7 @@ func createVinList(mtx *wire.MsgTx) []rpcmodel.Vin {
|
|||||||
|
|
||||||
// createVoutList returns a slice of JSON objects for the outputs of the passed
|
// createVoutList returns a slice of JSON objects for the outputs of the passed
|
||||||
// transaction.
|
// transaction.
|
||||||
func createVoutList(mtx *wire.MsgTx, chainParams *dagconfig.Params, filterAddrMap map[string]struct{}) []rpcmodel.Vout {
|
func createVoutList(mtx *wire.MsgTx, dagParams *dagconfig.Params, filterAddrMap map[string]struct{}) []rpcmodel.Vout {
|
||||||
voutList := make([]rpcmodel.Vout, 0, len(mtx.TxOut))
|
voutList := make([]rpcmodel.Vout, 0, len(mtx.TxOut))
|
||||||
for i, v := range mtx.TxOut {
|
for i, v := range mtx.TxOut {
|
||||||
// The disassembled string will contain [error] inline if the
|
// The disassembled string will contain [error] inline if the
|
||||||
@ -103,7 +103,7 @@ func createVoutList(mtx *wire.MsgTx, chainParams *dagconfig.Params, filterAddrMa
|
|||||||
// couldn't parse and there is no additional information about
|
// couldn't parse and there is no additional information about
|
||||||
// it anyways.
|
// it anyways.
|
||||||
scriptClass, addr, _ := txscript.ExtractScriptPubKeyAddress(
|
scriptClass, addr, _ := txscript.ExtractScriptPubKeyAddress(
|
||||||
v.ScriptPubKey, chainParams)
|
v.ScriptPubKey, dagParams)
|
||||||
|
|
||||||
// Encode the addresses while checking if the address passes the
|
// Encode the addresses while checking if the address passes the
|
||||||
// filter when needed.
|
// filter when needed.
|
||||||
|
@ -13,7 +13,7 @@ import (
|
|||||||
func handleGetBlockHeader(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
func handleGetBlockHeader(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
||||||
c := cmd.(*rpcmodel.GetBlockHeaderCmd)
|
c := cmd.(*rpcmodel.GetBlockHeaderCmd)
|
||||||
|
|
||||||
// Fetch the header from chain.
|
// Fetch the header from DAG.
|
||||||
hash, err := daghash.NewHashFromStr(c.Hash)
|
hash, err := daghash.NewHashFromStr(c.Hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, rpcDecodeHexError(c.Hash)
|
return nil, rpcDecodeHexError(c.Hash)
|
||||||
|
@ -213,7 +213,7 @@ func handleGetBlockTemplateRequest(s *Server, request *rpcmodel.TemplateRequest,
|
|||||||
// is not sent until the caller should stop working on the previous block
|
// is not sent until the caller should stop working on the previous block
|
||||||
// template in favor of the new one. In particular, this is the case when the
|
// template in favor of the new one. In particular, this is the case when the
|
||||||
// old block template is no longer valid due to a solution already being found
|
// old block template is no longer valid due to a solution already being found
|
||||||
// and added to the block chain, or new transactions have shown up and some time
|
// and added to the block DAG, or new transactions have shown up and some time
|
||||||
// has passed without finding a solution.
|
// has passed without finding a solution.
|
||||||
func handleGetBlockTemplateLongPoll(s *Server, longPollID string, useCoinbaseValue bool, closeChan <-chan struct{}) (interface{}, error) {
|
func handleGetBlockTemplateLongPoll(s *Server, longPollID string, useCoinbaseValue bool, closeChan <-chan struct{}) (interface{}, error) {
|
||||||
state := s.gbtWorkState
|
state := s.gbtWorkState
|
||||||
@ -353,16 +353,16 @@ func handleGetBlockTemplateProposal(s *Server, request *rpcmodel.TemplateRequest
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Rejected block proposal: %s", err)
|
log.Infof("Rejected block proposal: %s", err)
|
||||||
return chainErrToGBTErrString(err), nil
|
return dagErrToGBTErrString(err), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// chainErrToGBTErrString converts an error returned from kaspa to a string
|
// dagErrToGBTErrString converts an error returned from kaspa to a string
|
||||||
// which matches the reasons and format described in BIP0022 for rejection
|
// which matches the reasons and format described in BIP0022 for rejection
|
||||||
// reasons.
|
// reasons.
|
||||||
func chainErrToGBTErrString(err error) string {
|
func dagErrToGBTErrString(err error) string {
|
||||||
// When the passed error is not a RuleError, just return a generic
|
// When the passed error is not a RuleError, just return a generic
|
||||||
// rejected string with the error text.
|
// rejected string with the error text.
|
||||||
ruleErr, ok := err.(blockdag.RuleError)
|
ruleErr, ok := err.(blockdag.RuleError)
|
||||||
@ -447,9 +447,8 @@ func chainErrToGBTErrString(err error) string {
|
|||||||
//
|
//
|
||||||
// This function MUST be called with the state locked.
|
// This function MUST be called with the state locked.
|
||||||
func (state *gbtWorkState) notifyLongPollers(tipHashes []*daghash.Hash, lastGenerated time.Time) {
|
func (state *gbtWorkState) notifyLongPollers(tipHashes []*daghash.Hash, lastGenerated time.Time) {
|
||||||
// Notify anything that is waiting for a block template update from a
|
// Notify anything that is waiting for a block template update from
|
||||||
// hash which is not the hash of the tip of the best chain since their
|
// hashes which are not the current tip hashes.
|
||||||
// work is now invalid.
|
|
||||||
tipHashesStr := daghash.JoinHashesStrings(tipHashes, "")
|
tipHashesStr := daghash.JoinHashesStrings(tipHashes, "")
|
||||||
for hashesStr, channels := range state.notifyMap {
|
for hashesStr, channels := range state.notifyMap {
|
||||||
if hashesStr != tipHashesStr {
|
if hashesStr != tipHashesStr {
|
||||||
@ -617,7 +616,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool)
|
|||||||
util.CompactToBig(msgBlock.Header.Bits))
|
util.CompactToBig(msgBlock.Header.Bits))
|
||||||
|
|
||||||
// Get the minimum allowed timestamp for the block based on the
|
// Get the minimum allowed timestamp for the block based on the
|
||||||
// median timestamp of the last several blocks per the chain
|
// median timestamp of the last several blocks per the DAG
|
||||||
// consensus rules.
|
// consensus rules.
|
||||||
minTimestamp := mining.MinimumMedianTime(s.cfg.DAG.CalcPastMedianTime())
|
minTimestamp := mining.MinimumMedianTime(s.cfg.DAG.CalcPastMedianTime())
|
||||||
|
|
||||||
@ -677,7 +676,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *Server, useCoinbaseValue bool)
|
|||||||
|
|
||||||
// Update the time of the block template to the current time
|
// Update the time of the block template to the current time
|
||||||
// while accounting for the median time of the past several
|
// while accounting for the median time of the past several
|
||||||
// blocks per the chain consensus rules.
|
// blocks per the DAG consensus rules.
|
||||||
generator.UpdateBlockTime(msgBlock)
|
generator.UpdateBlockTime(msgBlock)
|
||||||
msgBlock.Header.Nonce = 0
|
msgBlock.Header.Nonce = 0
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ func handleGetRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct
|
|||||||
return nil, &rpcmodel.RPCError{
|
return nil, &rpcmodel.RPCError{
|
||||||
Code: rpcmodel.ErrRPCNoTxInfo,
|
Code: rpcmodel.ErrRPCNoTxInfo,
|
||||||
Message: "The transaction index must be " +
|
Message: "The transaction index must be " +
|
||||||
"enabled to query the blockchain " +
|
"enabled to query the blockDAG " +
|
||||||
"(specify --txindex)",
|
"(specify --txindex)",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -90,7 +90,7 @@ func handleGetRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct
|
|||||||
var blkHeader *wire.BlockHeader
|
var blkHeader *wire.BlockHeader
|
||||||
var blkHashStr string
|
var blkHashStr string
|
||||||
if blkHash != nil {
|
if blkHash != nil {
|
||||||
// Fetch the header from chain.
|
// Fetch the header from DAG.
|
||||||
header, err := s.cfg.DAG.HeaderByHash(blkHash)
|
header, err := s.cfg.DAG.HeaderByHash(blkHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
context := "Failed to fetch block header"
|
context := "Failed to fetch block header"
|
||||||
|
@ -70,7 +70,7 @@ func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte
|
|||||||
|
|
||||||
// To match the behavior of the reference client, return nil
|
// To match the behavior of the reference client, return nil
|
||||||
// (JSON null) if the transaction output is spent by another
|
// (JSON null) if the transaction output is spent by another
|
||||||
// transaction already in the main chain. Mined transactions
|
// transaction already in the DAG. Mined transactions
|
||||||
// that are spent by a mempool transaction are not affected by
|
// that are spent by a mempool transaction are not affected by
|
||||||
// this.
|
// this.
|
||||||
if entry == nil {
|
if entry == nil {
|
||||||
|
@ -250,7 +250,7 @@ func handleSearchRawTransactions(s *Server, cmd interface{}, closeChan <-chan st
|
|||||||
var blkHeader *wire.BlockHeader
|
var blkHeader *wire.BlockHeader
|
||||||
var blkHashStr string
|
var blkHashStr string
|
||||||
if blkHash := rtx.blkHash; blkHash != nil {
|
if blkHash := rtx.blkHash; blkHash != nil {
|
||||||
// Fetch the header from chain.
|
// Fetch the header from DAG.
|
||||||
header, err := s.cfg.DAG.HeaderByHash(blkHash)
|
header, err := s.cfg.DAG.HeaderByHash(blkHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &rpcmodel.RPCError{
|
return nil, &rpcmodel.RPCError{
|
||||||
@ -288,7 +288,7 @@ func handleSearchRawTransactions(s *Server, cmd interface{}, closeChan <-chan st
|
|||||||
|
|
||||||
// createVinListPrevOut returns a slice of JSON objects for the inputs of the
|
// createVinListPrevOut returns a slice of JSON objects for the inputs of the
|
||||||
// passed transaction.
|
// passed transaction.
|
||||||
func createVinListPrevOut(s *Server, mtx *wire.MsgTx, chainParams *dagconfig.Params, vinExtra bool, filterAddrMap map[string]struct{}) ([]rpcmodel.VinPrevOut, error) {
|
func createVinListPrevOut(s *Server, mtx *wire.MsgTx, dagParams *dagconfig.Params, vinExtra bool, filterAddrMap map[string]struct{}) ([]rpcmodel.VinPrevOut, error) {
|
||||||
// Use a dynamically sized list to accommodate the address filter.
|
// Use a dynamically sized list to accommodate the address filter.
|
||||||
vinList := make([]rpcmodel.VinPrevOut, 0, len(mtx.TxIn))
|
vinList := make([]rpcmodel.VinPrevOut, 0, len(mtx.TxIn))
|
||||||
|
|
||||||
@ -345,7 +345,7 @@ func createVinListPrevOut(s *Server, mtx *wire.MsgTx, chainParams *dagconfig.Par
|
|||||||
// couldn't parse and there is no additional information about
|
// couldn't parse and there is no additional information about
|
||||||
// it anyways.
|
// it anyways.
|
||||||
_, addr, _ := txscript.ExtractScriptPubKeyAddress(
|
_, addr, _ := txscript.ExtractScriptPubKeyAddress(
|
||||||
originTxOut.ScriptPubKey, chainParams)
|
originTxOut.ScriptPubKey, dagParams)
|
||||||
|
|
||||||
var encodedAddr *string
|
var encodedAddr *string
|
||||||
if addr != nil {
|
if addr != nil {
|
||||||
|
@ -234,7 +234,7 @@ type rpcSyncMgr struct {
|
|||||||
// Ensure rpcSyncMgr implements the rpcserverSyncManager interface.
|
// Ensure rpcSyncMgr implements the rpcserverSyncManager interface.
|
||||||
var _ rpcserverSyncManager = (*rpcSyncMgr)(nil)
|
var _ rpcserverSyncManager = (*rpcSyncMgr)(nil)
|
||||||
|
|
||||||
// IsCurrent returns whether or not the sync manager believes the chain is
|
// IsCurrent returns whether or not the sync manager believes the DAG is
|
||||||
// current as compared to the rest of the network.
|
// current as compared to the rest of the network.
|
||||||
//
|
//
|
||||||
// This function is safe for concurrent access and is part of the
|
// This function is safe for concurrent access and is part of the
|
||||||
|
@ -163,7 +163,7 @@ func handleUnimplemented(s *Server, cmd interface{}, closeChan <-chan struct{})
|
|||||||
return nil, ErrRPCUnimplemented
|
return nil, ErrRPCUnimplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server provides a concurrent safe RPC server to a chain server.
|
// Server provides a concurrent safe RPC server to a kaspa node.
|
||||||
type Server struct {
|
type Server struct {
|
||||||
started int32
|
started int32
|
||||||
shutdown int32
|
shutdown int32
|
||||||
@ -726,7 +726,7 @@ type rpcserverConnManager interface {
|
|||||||
// The interface contract requires that all of these methods are safe for
|
// The interface contract requires that all of these methods are safe for
|
||||||
// concurrent access.
|
// concurrent access.
|
||||||
type rpcserverSyncManager interface {
|
type rpcserverSyncManager interface {
|
||||||
// IsCurrent returns whether or not the sync manager believes the chain
|
// IsCurrent returns whether or not the sync manager believes the DAG
|
||||||
// is current as compared to the rest of the network.
|
// is current as compared to the rest of the network.
|
||||||
IsCurrent() bool
|
IsCurrent() bool
|
||||||
|
|
||||||
@ -770,7 +770,7 @@ type rpcserverConfig struct {
|
|||||||
SyncMgr rpcserverSyncManager
|
SyncMgr rpcserverSyncManager
|
||||||
|
|
||||||
// These fields allow the RPC server to interface with the local block
|
// These fields allow the RPC server to interface with the local block
|
||||||
// chain data and state.
|
// DAG data and state.
|
||||||
TimeSource blockdag.MedianTimeSource
|
TimeSource blockdag.MedianTimeSource
|
||||||
DAG *blockdag.BlockDAG
|
DAG *blockdag.BlockDAG
|
||||||
DAGParams *dagconfig.Params
|
DAGParams *dagconfig.Params
|
||||||
|
@ -194,15 +194,15 @@ var helpDescsEnUS = map[string]string{
|
|||||||
|
|
||||||
// GetBlockDAGInfoResult help.
|
// GetBlockDAGInfoResult help.
|
||||||
"getBlockDagInfoResult-dag": "The name of the DAG the daemon is on (testnet, mainnet, etc)",
|
"getBlockDagInfoResult-dag": "The name of the DAG the daemon is on (testnet, mainnet, etc)",
|
||||||
"getBlockDagInfoResult-blocks": "The number of blocks in the best known chain",
|
"getBlockDagInfoResult-blocks": "The number of blocks in the DAG",
|
||||||
"getBlockDagInfoResult-headers": "The number of headers that we've gathered for in the best known chain",
|
"getBlockDagInfoResult-headers": "The number of headers that we've gathered for in the DAG",
|
||||||
"getBlockDagInfoResult-tipHashes": "The block hashes for the tips in the DAG",
|
"getBlockDagInfoResult-tipHashes": "The block hashes for the tips in the DAG",
|
||||||
"getBlockDagInfoResult-difficulty": "The current chain difficulty",
|
"getBlockDagInfoResult-difficulty": "The current DAG difficulty",
|
||||||
"getBlockDagInfoResult-medianTime": "The median time from the PoV of the best block in the chain",
|
"getBlockDagInfoResult-medianTime": "The median time from the PoV of the selected tip in the DAG",
|
||||||
"getBlockDagInfoResult-utxoCommitment": "Commitment to the dag's UTXOSet",
|
"getBlockDagInfoResult-utxoCommitment": "Commitment to the dag's UTXOSet",
|
||||||
"getBlockDagInfoResult-verificationProgress": "An estimate for how much of the best chain we've verified",
|
"getBlockDagInfoResult-verificationProgress": "An estimate for how much of the DAG we've verified",
|
||||||
"getBlockDagInfoResult-pruned": "A bool that indicates if the node is pruned or not",
|
"getBlockDagInfoResult-pruned": "A bool that indicates if the node is pruned or not",
|
||||||
"getBlockDagInfoResult-pruneHeight": "The lowest block retained in the current pruned chain",
|
"getBlockDagInfoResult-pruneHeight": "The lowest block retained in the current pruned DAG",
|
||||||
"getBlockDagInfoResult-dagWork": "The total cumulative work in the DAG",
|
"getBlockDagInfoResult-dagWork": "The total cumulative work in the DAG",
|
||||||
"getBlockDagInfoResult-softForks": "The status of the super-majority soft-forks",
|
"getBlockDagInfoResult-softForks": "The status of the super-majority soft-forks",
|
||||||
"getBlockDagInfoResult-bip9SoftForks": "JSON object describing active BIP0009 deployments",
|
"getBlockDagInfoResult-bip9SoftForks": "JSON object describing active BIP0009 deployments",
|
||||||
@ -257,7 +257,7 @@ var helpDescsEnUS = map[string]string{
|
|||||||
"getBlockVerboseResult-confirmations": "The number of confirmations",
|
"getBlockVerboseResult-confirmations": "The number of confirmations",
|
||||||
"getBlockVerboseResult-size": "The size of the block",
|
"getBlockVerboseResult-size": "The size of the block",
|
||||||
"getBlockVerboseResult-mass": "The mass of the block",
|
"getBlockVerboseResult-mass": "The mass of the block",
|
||||||
"getBlockVerboseResult-height": "The height of the block in the block chain",
|
"getBlockVerboseResult-height": "The height of the block in the block DAG",
|
||||||
"getBlockVerboseResult-version": "The block version",
|
"getBlockVerboseResult-version": "The block version",
|
||||||
"getBlockVerboseResult-versionHex": "The block version in hexadecimal",
|
"getBlockVerboseResult-versionHex": "The block version in hexadecimal",
|
||||||
"getBlockVerboseResult-hashMerkleRoot": "Merkle tree reference to hash of all transactions for the block",
|
"getBlockVerboseResult-hashMerkleRoot": "Merkle tree reference to hash of all transactions for the block",
|
||||||
@ -275,7 +275,7 @@ var helpDescsEnUS = map[string]string{
|
|||||||
"getBlockVerboseResult-nextHashes": "The hashes of the next blocks (only if there are any)",
|
"getBlockVerboseResult-nextHashes": "The hashes of the next blocks (only if there are any)",
|
||||||
|
|
||||||
// GetBlockCountCmd help.
|
// GetBlockCountCmd help.
|
||||||
"getBlockCount--synopsis": "Returns the number of blocks in the longest block chain.",
|
"getBlockCount--synopsis": "Returns the number of blocks in the block DAG.",
|
||||||
"getBlockCount--result0": "The current block count",
|
"getBlockCount--result0": "The current block count",
|
||||||
|
|
||||||
// GetBlockHeaderCmd help.
|
// GetBlockHeaderCmd help.
|
||||||
@ -289,7 +289,7 @@ var helpDescsEnUS = map[string]string{
|
|||||||
// GetBlockHeaderVerboseResult help.
|
// GetBlockHeaderVerboseResult help.
|
||||||
"getBlockHeaderVerboseResult-hash": "The hash of the block (same as provided)",
|
"getBlockHeaderVerboseResult-hash": "The hash of the block (same as provided)",
|
||||||
"getBlockHeaderVerboseResult-confirmations": "The number of confirmations",
|
"getBlockHeaderVerboseResult-confirmations": "The number of confirmations",
|
||||||
"getBlockHeaderVerboseResult-height": "The height of the block in the block chain",
|
"getBlockHeaderVerboseResult-height": "The height of the block in the block DAG",
|
||||||
"getBlockHeaderVerboseResult-version": "The block version",
|
"getBlockHeaderVerboseResult-version": "The block version",
|
||||||
"getBlockHeaderVerboseResult-versionHex": "The block version in hexadecimal",
|
"getBlockHeaderVerboseResult-versionHex": "The block version in hexadecimal",
|
||||||
"getBlockHeaderVerboseResult-hashMerkleRoot": "Merkle tree reference to hash of all transactions for the block",
|
"getBlockHeaderVerboseResult-hashMerkleRoot": "Merkle tree reference to hash of all transactions for the block",
|
||||||
@ -593,10 +593,10 @@ var helpDescsEnUS = map[string]string{
|
|||||||
"sessionResult-sessionId": "The unique session ID for a client's websocket connection.",
|
"sessionResult-sessionId": "The unique session ID for a client's websocket connection.",
|
||||||
|
|
||||||
// NotifyBlocksCmd help.
|
// NotifyBlocksCmd help.
|
||||||
"notifyBlocks--synopsis": "Request notifications for whenever a block is connected or disconnected from the main (best) chain.",
|
"notifyBlocks--synopsis": "Request notifications for whenever a block is connected to the DAG.",
|
||||||
|
|
||||||
// StopNotifyBlocksCmd help.
|
// StopNotifyBlocksCmd help.
|
||||||
"stopNotifyBlocks--synopsis": "Cancel registered notifications for whenever a block is connected or disconnected from the main (best) chain.",
|
"stopNotifyBlocks--synopsis": "Cancel registered notifications for whenever a block is connected to the DAG.",
|
||||||
|
|
||||||
// NotifyChainChangesCmd help.
|
// NotifyChainChangesCmd help.
|
||||||
"notifyChainChanges--synopsis": "Request notifications for whenever the selected parent chain changes.",
|
"notifyChainChanges--synopsis": "Request notifications for whenever the selected parent chain changes.",
|
||||||
|
@ -1149,7 +1149,7 @@ func opcodeCheckLockTimeVerify(op *parsedOpcode, vm *Engine) error {
|
|||||||
// The lock time feature can also be disabled, thereby bypassing
|
// The lock time feature can also be disabled, thereby bypassing
|
||||||
// OP_CHECKLOCKTIMEVERIFY, if every transaction input has been finalized by
|
// OP_CHECKLOCKTIMEVERIFY, if every transaction input has been finalized by
|
||||||
// setting its sequence to the maximum value (wire.MaxTxInSequenceNum). This
|
// setting its sequence to the maximum value (wire.MaxTxInSequenceNum). This
|
||||||
// condition would result in the transaction being allowed into the blockchain
|
// condition would result in the transaction being allowed into the blockDAG
|
||||||
// making the opcode ineffective.
|
// making the opcode ineffective.
|
||||||
//
|
//
|
||||||
// This condition is prevented by enforcing that the input being used by
|
// This condition is prevented by enforcing that the input being used by
|
||||||
|
@ -55,12 +55,12 @@ func SignatureScript(tx *wire.MsgTx, idx int, script []byte, hashType SigHashTyp
|
|||||||
return NewScriptBuilder().AddData(sig).AddData(pkData).Script()
|
return NewScriptBuilder().AddData(sig).AddData(pkData).Script()
|
||||||
}
|
}
|
||||||
|
|
||||||
func sign(chainParams *dagconfig.Params, tx *wire.MsgTx, idx int,
|
func sign(dagParams *dagconfig.Params, tx *wire.MsgTx, idx int,
|
||||||
script []byte, hashType SigHashType, kdb KeyDB, sdb ScriptDB) ([]byte,
|
script []byte, hashType SigHashType, kdb KeyDB, sdb ScriptDB) ([]byte,
|
||||||
ScriptClass, util.Address, error) {
|
ScriptClass, util.Address, error) {
|
||||||
|
|
||||||
class, address, err := ExtractScriptPubKeyAddress(script,
|
class, address, err := ExtractScriptPubKeyAddress(script,
|
||||||
chainParams)
|
dagParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, NonStandardTy, nil, err
|
return nil, NonStandardTy, nil, err
|
||||||
}
|
}
|
||||||
@ -98,7 +98,7 @@ func sign(chainParams *dagconfig.Params, tx *wire.MsgTx, idx int,
|
|||||||
// The return value is the best effort merging of the two scripts. Calling this
|
// The return value is the best effort merging of the two scripts. Calling this
|
||||||
// function with addresses, class and nrequired that do not match scriptPubKey is
|
// function with addresses, class and nrequired that do not match scriptPubKey is
|
||||||
// an error and results in undefined behaviour.
|
// an error and results in undefined behaviour.
|
||||||
func mergeScripts(chainParams *dagconfig.Params, tx *wire.MsgTx, idx int,
|
func mergeScripts(dagParams *dagconfig.Params, tx *wire.MsgTx, idx int,
|
||||||
class ScriptClass, sigScript, prevScript []byte) ([]byte, error) {
|
class ScriptClass, sigScript, prevScript []byte) ([]byte, error) {
|
||||||
|
|
||||||
// TODO: the scripthash and multisig paths here are overly
|
// TODO: the scripthash and multisig paths here are overly
|
||||||
@ -124,14 +124,14 @@ func mergeScripts(chainParams *dagconfig.Params, tx *wire.MsgTx, idx int,
|
|||||||
|
|
||||||
// We already know this information somewhere up the stack.
|
// We already know this information somewhere up the stack.
|
||||||
class, _, _ :=
|
class, _, _ :=
|
||||||
ExtractScriptPubKeyAddress(script, chainParams)
|
ExtractScriptPubKeyAddress(script, dagParams)
|
||||||
|
|
||||||
// regenerate scripts.
|
// regenerate scripts.
|
||||||
sigScript, _ := unparseScript(sigPops)
|
sigScript, _ := unparseScript(sigPops)
|
||||||
prevScript, _ := unparseScript(prevPops)
|
prevScript, _ := unparseScript(prevPops)
|
||||||
|
|
||||||
// Merge
|
// Merge
|
||||||
mergedScript, err := mergeScripts(chainParams, tx, idx, class, sigScript, prevScript)
|
mergedScript, err := mergeScripts(dagParams, tx, idx, class, sigScript, prevScript)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -192,11 +192,11 @@ func (sc ScriptClosure) GetScript(address util.Address) ([]byte, error) {
|
|||||||
// getScript. If previousScript is provided then the results in previousScript
|
// getScript. If previousScript is provided then the results in previousScript
|
||||||
// will be merged in a type-dependent manner with the newly generated.
|
// will be merged in a type-dependent manner with the newly generated.
|
||||||
// signature script.
|
// signature script.
|
||||||
func SignTxOutput(chainParams *dagconfig.Params, tx *wire.MsgTx, idx int,
|
func SignTxOutput(dagParams *dagconfig.Params, tx *wire.MsgTx, idx int,
|
||||||
scriptPubKey []byte, hashType SigHashType, kdb KeyDB, sdb ScriptDB,
|
scriptPubKey []byte, hashType SigHashType, kdb KeyDB, sdb ScriptDB,
|
||||||
previousScript []byte) ([]byte, error) {
|
previousScript []byte) ([]byte, error) {
|
||||||
|
|
||||||
sigScript, class, _, err := sign(chainParams, tx,
|
sigScript, class, _, err := sign(dagParams, tx,
|
||||||
idx, scriptPubKey, hashType, kdb, sdb)
|
idx, scriptPubKey, hashType, kdb, sdb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -204,7 +204,7 @@ func SignTxOutput(chainParams *dagconfig.Params, tx *wire.MsgTx, idx int,
|
|||||||
|
|
||||||
if class == ScriptHashTy {
|
if class == ScriptHashTy {
|
||||||
// TODO keep the sub addressed and pass down to merge.
|
// TODO keep the sub addressed and pass down to merge.
|
||||||
realSigScript, _, _, err := sign(chainParams, tx, idx,
|
realSigScript, _, _, err := sign(dagParams, tx, idx,
|
||||||
sigScript, hashType, kdb, sdb)
|
sigScript, hashType, kdb, sdb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -220,5 +220,5 @@ func SignTxOutput(chainParams *dagconfig.Params, tx *wire.MsgTx, idx int,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Merge scripts. with any previous data, if any.
|
// Merge scripts. with any previous data, if any.
|
||||||
return mergeScripts(chainParams, tx, idx, class, sigScript, previousScript)
|
return mergeScripts(dagParams, tx, idx, class, sigScript, previousScript)
|
||||||
}
|
}
|
||||||
|
@ -869,7 +869,7 @@ var sigScriptTests = []tstSigScript{
|
|||||||
// Test the sigscript generation for valid and invalid inputs, all
|
// Test the sigscript generation for valid and invalid inputs, all
|
||||||
// hashTypes, and with and without compression. This test creates
|
// hashTypes, and with and without compression. This test creates
|
||||||
// sigscripts to spend fake coinbase inputs, as sigscripts cannot be
|
// sigscripts to spend fake coinbase inputs, as sigscripts cannot be
|
||||||
// created for the MsgTxs in txTests, since they come from the blockchain
|
// created for the MsgTxs in txTests, since they come from the blockDAG
|
||||||
// and we don't have the private keys.
|
// and we don't have the private keys.
|
||||||
func TestSignatureScript(t *testing.T) {
|
func TestSignatureScript(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
@ -275,7 +275,7 @@ func PushedData(script []byte) ([][]byte, error) {
|
|||||||
// ExtractScriptPubKeyAddress returns the type of script and its addresses.
|
// ExtractScriptPubKeyAddress returns the type of script and its addresses.
|
||||||
// Note that it only works for 'standard' transaction script types. Any data such
|
// Note that it only works for 'standard' transaction script types. Any data such
|
||||||
// as public keys which are invalid will return a nil address.
|
// as public keys which are invalid will return a nil address.
|
||||||
func ExtractScriptPubKeyAddress(scriptPubKey []byte, chainParams *dagconfig.Params) (ScriptClass, util.Address, error) {
|
func ExtractScriptPubKeyAddress(scriptPubKey []byte, dagParams *dagconfig.Params) (ScriptClass, util.Address, error) {
|
||||||
// No valid address if the script doesn't parse.
|
// No valid address if the script doesn't parse.
|
||||||
pops, err := parseScript(scriptPubKey)
|
pops, err := parseScript(scriptPubKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -290,7 +290,7 @@ func ExtractScriptPubKeyAddress(scriptPubKey []byte, chainParams *dagconfig.Para
|
|||||||
// Therefore the pubkey hash is the 3rd item on the stack.
|
// Therefore the pubkey hash is the 3rd item on the stack.
|
||||||
// If the pubkey hash is invalid for some reason, return a nil address.
|
// If the pubkey hash is invalid for some reason, return a nil address.
|
||||||
addr, err := util.NewAddressPubKeyHash(pops[2].data,
|
addr, err := util.NewAddressPubKeyHash(pops[2].data,
|
||||||
chainParams.Prefix)
|
dagParams.Prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return scriptClass, nil, nil
|
return scriptClass, nil, nil
|
||||||
}
|
}
|
||||||
@ -302,7 +302,7 @@ func ExtractScriptPubKeyAddress(scriptPubKey []byte, chainParams *dagconfig.Para
|
|||||||
// Therefore the script hash is the 2nd item on the stack.
|
// Therefore the script hash is the 2nd item on the stack.
|
||||||
// If the script hash ss invalid for some reason, return a nil address.
|
// If the script hash ss invalid for some reason, return a nil address.
|
||||||
addr, err := util.NewAddressScriptHashFromHash(pops[1].data,
|
addr, err := util.NewAddressScriptHashFromHash(pops[1].data,
|
||||||
chainParams.Prefix)
|
dagParams.Prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return scriptClass, nil, nil
|
return scriptClass, nil, nil
|
||||||
}
|
}
|
||||||
|
@ -210,8 +210,6 @@ func TestCalcScriptInfo(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// from 567a53d1ce19ce3d07711885168484439965501536d0d0294c5d46d46c10e53b
|
|
||||||
// from the blockchain.
|
|
||||||
name: "p2sh nonstandard script",
|
name: "p2sh nonstandard script",
|
||||||
sigScript: "1 81 DATA_8 2DUP EQUAL NOT VERIFY ABS " +
|
sigScript: "1 81 DATA_8 2DUP EQUAL NOT VERIFY ABS " +
|
||||||
"SWAP ABS EQUAL",
|
"SWAP ABS EQUAL",
|
||||||
|
@ -19,7 +19,7 @@ var (
|
|||||||
// ErrUnknownAddressType describes an error where an address can not
|
// ErrUnknownAddressType describes an error where an address can not
|
||||||
// decoded as a specific address type due to the string encoding
|
// decoded as a specific address type due to the string encoding
|
||||||
// begining with an identifier byte unknown to any standard or
|
// begining with an identifier byte unknown to any standard or
|
||||||
// registered (via chaincfg.Register) network.
|
// registered (via dagconfig.Register) network.
|
||||||
ErrUnknownAddressType = errors.New("unknown address type")
|
ErrUnknownAddressType = errors.New("unknown address type")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -301,7 +301,7 @@ func TestBlockErrors(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Block100000 defines block 100,000 of the block chain. It is used to
|
// Block100000 defines block 100,000 of the block DAG. It is used to
|
||||||
// test Block operations.
|
// test Block operations.
|
||||||
var Block100000 = wire.MsgBlock{
|
var Block100000 = wire.MsgBlock{
|
||||||
Header: wire.BlockHeader{
|
Header: wire.BlockHeader{
|
||||||
|
@ -126,12 +126,11 @@ func BigToCompact(n *big.Int) uint32 {
|
|||||||
// the difficulty for generating a block by decreasing the value which the
|
// the difficulty for generating a block by decreasing the value which the
|
||||||
// generated hash must be less than. This difficulty target is stored in each
|
// generated hash must be less than. This difficulty target is stored in each
|
||||||
// block header using a compact representation as described in the documentation
|
// block header using a compact representation as described in the documentation
|
||||||
// for CompactToBig. The main chain is selected by choosing the chain that has
|
// for CompactToBig. Since a lower target difficulty value equates to higher
|
||||||
// the most proof of work (highest difficulty). Since a lower target difficulty
|
// actual difficulty, the work value which will be accumulated must be the
|
||||||
// value equates to higher actual difficulty, the work value which will be
|
// inverse of the difficulty. Also, in order to avoid potential division by
|
||||||
// accumulated must be the inverse of the difficulty. Also, in order to avoid
|
// zero and really small floating point numbers, the result adds 1 to the
|
||||||
// potential division by zero and really small floating point numbers, the
|
// denominator and multiplies the numerator by 2^256.
|
||||||
// result adds 1 to the denominator and multiplies the numerator by 2^256.
|
|
||||||
func CalcWork(bits uint32) *big.Int {
|
func CalcWork(bits uint32) *big.Int {
|
||||||
// Return a work value of zero if the passed difficulty bits represent
|
// Return a work value of zero if the passed difficulty bits represent
|
||||||
// a negative number. Note this should not happen in practice with valid
|
// a negative number. Note this should not happen in practice with valid
|
||||||
|
@ -13,7 +13,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// mainNetGenesisHash is the hash of the first block in the block chain for the
|
// mainNetGenesisHash is the hash of the first block in the block DAG for the
|
||||||
// main network (genesis block).
|
// main network (genesis block).
|
||||||
var mainNetGenesisHash = Hash([HashSize]byte{
|
var mainNetGenesisHash = Hash([HashSize]byte{
|
||||||
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25,
|
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25,
|
||||||
|
@ -35,7 +35,7 @@ To decode/encode an address:
|
|||||||
addrString := "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962" +
|
addrString := "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962" +
|
||||||
"e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d57" +
|
"e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d57" +
|
||||||
"8a4c702b6bf11d5f"
|
"8a4c702b6bf11d5f"
|
||||||
defaultNet := &chaincfg.MainNetParams
|
defaultNet := &dagconfig.MainNetParams
|
||||||
addr, err := util.DecodeAddress(addrString, defaultNet)
|
addr, err := util.DecodeAddress(addrString, defaultNet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
|
@ -80,7 +80,6 @@ func ExampleAmount_unitConversions() {
|
|||||||
// which represent the target difficulty to a big integer and display it using
|
// which represent the target difficulty to a big integer and display it using
|
||||||
// the typical hex notation.
|
// the typical hex notation.
|
||||||
func ExampleCompactToBig() {
|
func ExampleCompactToBig() {
|
||||||
// Convert the bits from block 300000 in the main block chain.
|
|
||||||
bits := uint32(419465580)
|
bits := uint32(419465580)
|
||||||
targetDifficulty := util.CompactToBig(bits)
|
targetDifficulty := util.CompactToBig(bits)
|
||||||
|
|
||||||
@ -94,8 +93,8 @@ func ExampleCompactToBig() {
|
|||||||
// This example demonstrates how to convert a target difficulty into the compact
|
// This example demonstrates how to convert a target difficulty into the compact
|
||||||
// "bits" in a block header which represent that target difficulty .
|
// "bits" in a block header which represent that target difficulty .
|
||||||
func ExampleBigToCompact() {
|
func ExampleBigToCompact() {
|
||||||
// Convert the target difficulty from block 300000 in the main block
|
// Convert the target difficulty from block 300000 in the bitcoin
|
||||||
// chain to compact form.
|
// main chain to compact form.
|
||||||
t := "0000000000000000896c00000000000000000000000000000000000000000000"
|
t := "0000000000000000896c00000000000000000000000000000000000000000000"
|
||||||
targetDifficulty, success := new(big.Int).SetString(t, 16)
|
targetDifficulty, success := new(big.Int).SetString(t, 16)
|
||||||
if !success {
|
if !success {
|
||||||
|
@ -311,8 +311,6 @@ func BenchmarkDeserializeTxSmall(b *testing.B) {
|
|||||||
// BenchmarkDeserializeTxLarge performs a benchmark on how long it takes to
|
// BenchmarkDeserializeTxLarge performs a benchmark on how long it takes to
|
||||||
// deserialize a very large transaction.
|
// deserialize a very large transaction.
|
||||||
func BenchmarkDeserializeTxLarge(b *testing.B) {
|
func BenchmarkDeserializeTxLarge(b *testing.B) {
|
||||||
// tx bb41a757f405890fb0f5856228e23b715702d714d59bf2b1feb70d8b2b4e3e08
|
|
||||||
// from the main block chain.
|
|
||||||
fi, err := os.Open("testdata/megatx.bin.bz2")
|
fi, err := os.Open("testdata/megatx.bin.bz2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("Failed to read transaction data: %v", err)
|
b.Fatalf("Failed to read transaction data: %v", err)
|
||||||
|
@ -15,7 +15,7 @@ import (
|
|||||||
"github.com/kaspanet/kaspad/util/daghash"
|
"github.com/kaspanet/kaspad/util/daghash"
|
||||||
)
|
)
|
||||||
|
|
||||||
// mainNetGenesisHash is the hash of the first block in the block chain for the
|
// mainNetGenesisHash is the hash of the first block in the block DAG for the
|
||||||
// main network (genesis block).
|
// main network (genesis block).
|
||||||
var mainNetGenesisHash = &daghash.Hash{
|
var mainNetGenesisHash = &daghash.Hash{
|
||||||
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25,
|
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25,
|
||||||
@ -24,7 +24,7 @@ var mainNetGenesisHash = &daghash.Hash{
|
|||||||
0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,
|
0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,
|
||||||
}
|
}
|
||||||
|
|
||||||
// simNetGenesisHash is the hash of the first block in the block chain for the
|
// simNetGenesisHash is the hash of the first block in the block DAG for the
|
||||||
// simulation test network.
|
// simulation test network.
|
||||||
var simNetGenesisHash = &daghash.Hash{
|
var simNetGenesisHash = &daghash.Hash{
|
||||||
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a,
|
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a,
|
||||||
|
@ -483,7 +483,7 @@ func TestBlockSerializeSize(t *testing.T) {
|
|||||||
// Block with no transactions.
|
// Block with no transactions.
|
||||||
{noTxBlock, 186},
|
{noTxBlock, 186},
|
||||||
|
|
||||||
// First block in the mainnet block chain.
|
// First block in the mainnet block DAG.
|
||||||
{&blockOne, len(blockOneBytes)},
|
{&blockOne, len(blockOneBytes)},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -499,7 +499,7 @@ func TestBlockSerializeSize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// blockOne is the first block in the mainnet block chain.
|
// blockOne is the first block in the mainnet block DAG.
|
||||||
var blockOne = MsgBlock{
|
var blockOne = MsgBlock{
|
||||||
Header: BlockHeader{
|
Header: BlockHeader{
|
||||||
Version: 1,
|
Version: 1,
|
||||||
|
@ -12,8 +12,8 @@ import (
|
|||||||
const MaxBlockLocatorsPerMsg = 500
|
const MaxBlockLocatorsPerMsg = 500
|
||||||
|
|
||||||
// MsgBlockLocator implements the Message interface and represents a kaspa
|
// MsgBlockLocator implements the Message interface and represents a kaspa
|
||||||
// locator message. It is used to find the highest known chain block with
|
// locator message. It is used to find the blockLocator of a peer that is
|
||||||
// a peer that is syncing with you.
|
// syncing with you.
|
||||||
type MsgBlockLocator struct {
|
type MsgBlockLocator struct {
|
||||||
BlockLocatorHashes []*daghash.Hash
|
BlockLocatorHashes []*daghash.Hash
|
||||||
}
|
}
|
||||||
|
@ -318,7 +318,7 @@ func TestMerkleBlockOverflowErrors(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// merkleBlockOne is a merkle block created from block one of the block chain
|
// merkleBlockOne is a merkle block created from block one of the block DAG
|
||||||
// where the first transaction matches.
|
// where the first transaction matches.
|
||||||
var merkleBlockOne = MsgMerkleBlock{
|
var merkleBlockOne = MsgMerkleBlock{
|
||||||
Header: BlockHeader{
|
Header: BlockHeader{
|
||||||
@ -349,7 +349,7 @@ var merkleBlockOne = MsgMerkleBlock{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// merkleBlockOneBytes is the serialized bytes for a merkle block created from
|
// merkleBlockOneBytes is the serialized bytes for a merkle block created from
|
||||||
// block one of the block chain where the first transaction matches.
|
// block one of the block DAG where the first transaction matches.
|
||||||
var merkleBlockOneBytes = []byte{
|
var merkleBlockOneBytes = []byte{
|
||||||
0x01, 0x00, 0x00, 0x00, // Version 1
|
0x01, 0x00, 0x00, 0x00, // Version 1
|
||||||
0x02, // NumParentBlocks
|
0x02, // NumParentBlocks
|
||||||
|
Loading…
x
Reference in New Issue
Block a user