mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-03-30 15:08:33 +00:00
[NOD-1518] Fix genesis block insertion and integration tests (#1013)
* Revert "[NOD-1500] Delete integration tests" This reverts commit fcb57a206690a884fa6afb69d5d493282954a8bf. * [NOD-1518] hashserialization -> consenusserialization * [NOD-1518] Fix add genesis to virtual * [NOD-1518] Fix a bug in SerializeCoinbasePayload. * [NOD-1518] Fix a loop error and make pastMedianTime behave correctly everywhere on genesis. * [NOD-1518] Fix another bug and an infinite loop. * [NOD-1518] Fix uninitialized slice. * [NOD-1518] Fix bad should-commit checks and another infinite loop. * [NOD-1518] Fix nil serialization. * [NOD-1518] Rename blockHash to currentBlockHash. * [NOD-1518] Move the check whether stagedVirtualUTXOSet != nil to the top of commitVirtualUTXODiff. * [NOD-1518] Simplify utxoDiffStore.Commit. * [NOD-1518] Unextract resolveBlockStatusAndCheckFinality. * [NOD-1518] Move no-transactions logic into CalculateIDMerkleRoot. * [NOD-1518] Remove redundant is-staged check. * [NOD-1518] Fix merge errors. * [NOD-1518] Don't write anything if utxoDiffChild is nil. * [NOD-1518] Stage virtualAcceptanceData and virtualMultiset. * [NOD-1518] Fix bugs in getBlockTemplate and submitBlock. * [NOD-1518] Fix bad validation order in validateHeaderInContext. * [NOD-1518] Fix bug in Next(). * [NOD-1518] Fix nil dereference of subnetworks in AddressCache. * [NOD-1518] Fix multisetStore.Get returning a pointer to a multiset that is changed in place. * [NOD-1518] Break on genesis in countSubtrees. * [NOD-1518] Fix createBlockLocator. * [NOD-1518] Fix MsgTxToDomainTransaction. * [NOD-1518] Set MaxTxVersion to 1. * [NOD-1518] Fix missing error handling, bug in MsgTxToDomainTransaction, and bad subnetwork equality check. * [NOD-1518] Fix bug in hasUTXOByOutpointFromStagedVirtualUTXODiff. * [NOD-1518] Remove irrelevant comments. * [NOD-1518] Generate transactions with sufficient fee in tx_relay_test. * [NOD-1518] Fix broken RPC handlers. * [NOD-1518] Fix merge errors. * [NOD-1518] Fix bad exists check in restorePastUTXO and missing genesis check in CalculatePastUTXOAndAcceptanceData. * [NOD-1518] Add a comment. * [NOD-1518] Use a regular mutex instead of a read-write mutex in consensus to avoid dealing with sneaky not-actually-read functions. * [NOD-1518] Fix a deadlock in GetVirtualSelectedParent. * [NOD-1518] Fix missing handler registration for CmdHeader. * [NOD-1518] Fix processHeader calling OnNewBlock and LogBlock. Also fix conversion errors in IBDRootUTXOSetAndBlock. * [NOD-1518] Fix bad Command() in MsgIBDRootUTXOSetAndBlock. * [NOD-1518] Fix bad SyncStateMissingUTXOSet logic in resolveSyncState. * [NOD-1518] Rename mode to syncState. * [NOD-1518] Fix headers-only blocks coming in after the consensus thinks it's synced. * [NOD-1518] Fix selectedChildIterator.Next not ignoring virtual, infinite loop in HashSet.Length(). * [NOD-1518] Fix not-properly wrapped IBD blocks. * [NOD-1518] Fix bad conversion in RequestIBDBlocks. * [NOD-1518] Fix bad string for CmdRequestHeaders. * [NOD-1518] Fix bad string for CmdDoneHeaders. * [NOD-1518] Fix bad Command() for MsgIBDRootNotFound. * [NOD-1518] Fix bad areHeaderTipsSyncedMaxTimeDifference value. * [NOD-1518] Add missing string for CmdRequestIBDBlocks. * [NOD-1518] Fix bad check for SyncStateMissingBlockBodies. * [NOD-1518] Fix bad timeout durations in tests. * [NOD-1518] Fix IBD blocks not calling OnNewBlock. * [NOD-1518] Change when IBD finishes. * [NOD-1518] Properly clone utxoDiffChild. * [NOD-1518] Fix merge errors. * [NOD-1518] Move call to LogBlock to into OnNewBlock. * [NOD-1518] Return "not implemented" in unimplemented RPC handlers. * [NOD-1518] Extract cloning of hashes to a method over DomainHash. * [NOD-1518] Use isHeaderOnlyBlock. * [NOD-1518] Use constants.TransactionVersion. * [NOD-1518] Break immediately if we reached the virtual in SelectedChildIterator. * [NOD-1518] Don't stage nil utxoDiffChild. * [NOD-1518] Properly check the genesis hash in CalculatePastUTXOAndAcceptanceData. * [NOD-1518] Explain why we break on current == nil in countSubtrees. * [NOD-1518] Add a comment explaining why we check against StatusValid in resolveSyncState. Co-authored-by: Mike Zak <feanorr@gmail.com> Co-authored-by: Ori Newman <orinewman1@gmail.com>
This commit is contained in:
parent
7a7821e1c8
commit
eef5e3768c
@ -114,6 +114,12 @@ func MsgTxToDomainTransaction(msgTx *MsgTx) *externalapi.DomainTransaction {
|
||||
for _, txOut := range msgTx.TxOut {
|
||||
transactionOutputs = append(transactionOutputs, txOutToDomainTransactionOutput(txOut))
|
||||
}
|
||||
|
||||
payload := make([]byte, 0)
|
||||
if msgTx.Payload != nil {
|
||||
payload = msgTx.Payload
|
||||
}
|
||||
|
||||
return &externalapi.DomainTransaction{
|
||||
Version: msgTx.Version,
|
||||
Inputs: transactionInputs,
|
||||
@ -122,7 +128,7 @@ func MsgTxToDomainTransaction(msgTx *MsgTx) *externalapi.DomainTransaction {
|
||||
SubnetworkID: msgTx.SubnetworkID,
|
||||
Gas: msgTx.Gas,
|
||||
PayloadHash: msgTx.PayloadHash,
|
||||
Payload: msgTx.Payload,
|
||||
Payload: payload,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
||||
CmdVerAck: "VerAck",
|
||||
CmdRequestAddresses: "RequestAddresses",
|
||||
CmdAddresses: "Addresses",
|
||||
CmdRequestHeaders: "RequestBlocks",
|
||||
CmdRequestHeaders: "RequestHeaders",
|
||||
CmdBlock: "Block",
|
||||
CmdTx: "Tx",
|
||||
CmdPing: "Ping",
|
||||
@ -130,13 +130,14 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
||||
CmdInvTransaction: "InvTransaction",
|
||||
CmdRequestTransactions: "RequestTransactions",
|
||||
CmdIBDBlock: "IBDBlock",
|
||||
CmdDoneHeaders: "DoneIBDBlocks",
|
||||
CmdDoneHeaders: "DoneHeaders",
|
||||
CmdTransactionNotFound: "TransactionNotFound",
|
||||
CmdReject: "Reject",
|
||||
CmdHeader: "Header",
|
||||
CmdRequestNextHeaders: "RequestNextHeaders",
|
||||
CmdRequestIBDRootUTXOSetAndBlock: "RequestPruningUTXOSetAndBlock",
|
||||
CmdIBDRootUTXOSetAndBlock: "IBDRootUTXOSetAndBlock",
|
||||
CmdRequestIBDBlocks: "RequestIBDBlocks",
|
||||
CmdIBDRootNotFound: "IBDRootNotFound",
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@ type MsgIBDRootNotFound struct {
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgIBDRootNotFound) Command() MessageCommand {
|
||||
return CmdDoneHeaders
|
||||
return CmdIBDRootNotFound
|
||||
}
|
||||
|
||||
// NewMsgIBDRootNotFound returns a new kaspa IBDRootNotFound message that conforms to the
|
||||
|
@ -11,7 +11,7 @@ type MsgIBDRootUTXOSetAndBlock struct {
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgIBDRootUTXOSetAndBlock) Command() MessageCommand {
|
||||
return CmdRequestIBDRootUTXOSetAndBlock
|
||||
return CmdIBDRootUTXOSetAndBlock
|
||||
}
|
||||
|
||||
// NewMsgIBDRootUTXOSetAndBlock returns a new MsgIBDRootUTXOSetAndBlock.
|
||||
|
@ -1,6 +1,7 @@
|
||||
package flowcontext
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
@ -14,6 +15,11 @@ import (
|
||||
// relays newly unorphaned transactions and possibly rebroadcast
|
||||
// manually added transactions when not in IBD.
|
||||
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock) error {
|
||||
err := blocklogger.LogBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.Domain().MiningManager().HandleNewBlockTransactions(block.Transactions)
|
||||
|
||||
if f.onBlockAddedToDAGHandler != nil {
|
||||
|
@ -2,7 +2,6 @@ package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
@ -93,7 +92,6 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
||||
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv *appmessage.MsgInvRelayBlock
|
||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
||||
@ -180,9 +178,7 @@ func (flow *handleRelayInvsFlow) requestBlocks(requestQueue *hashesQueueSet) err
|
||||
// readMsgBlock returns the next msgBlock in msgChan, and populates invsQueue with any inv messages that meanwhile arrive.
|
||||
//
|
||||
// Note: this function assumes msgChan can contain only appmessage.MsgInvRelayBlock and appmessage.MsgBlock messages.
|
||||
func (flow *handleRelayInvsFlow) readMsgBlock() (
|
||||
msgBlock *appmessage.MsgBlock, err error) {
|
||||
|
||||
func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock, err error) {
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
@ -244,10 +240,6 @@ func (flow *handleRelayInvsFlow) processAndRelayBlock(requestQueue *hashesQueueS
|
||||
return protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
}
|
||||
|
||||
err = blocklogger.LogBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -40,7 +40,9 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
|
||||
|
||||
// TODO (Partial nodes): Convert block to partial block if needed
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
|
||||
blockMessage := appmessage.DomainBlockToMsgBlock(block)
|
||||
ibdBlockMessage := appmessage.NewMsgIBDBlock(blockMessage)
|
||||
err = outgoingRoute.Enqueue(ibdBlockMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -115,8 +115,8 @@ func (flow *handleRequestBlocksFlow) buildMsgBlockHeaders(lowHash *externalapi.D
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlocksFlow) sendHeaders(headers []*appmessage.MsgBlockHeader) error {
|
||||
for _, msgIBDBlock := range headers {
|
||||
err := flow.outgoingRoute.Enqueue(msgIBDBlock)
|
||||
for _, msgBlockHeader := range headers {
|
||||
err := flow.outgoingRoute.Enqueue(msgBlockHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ package ibd
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
@ -153,6 +152,10 @@ func (flow *handleIBDFlow) syncMissingBlockBodies() error {
|
||||
if err != nil {
|
||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -348,13 +351,5 @@ func (flow *handleIBDFlow) processHeader(msgBlockHeader *appmessage.MsgBlockHead
|
||||
|
||||
return protocolerrors.Wrapf(true, err, "got invalid block %s during IBD", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = blocklogger.LogBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -180,7 +180,8 @@ func (m *Manager) registerIBDFlows(router *routerpkg.Router, isStopping *uint32,
|
||||
|
||||
return []*flow{
|
||||
m.registerFlow("HandleIBD", router, []appmessage.MessageCommand{appmessage.CmdBlockLocator, appmessage.CmdIBDBlock,
|
||||
appmessage.CmdDoneHeaders, appmessage.CmdIBDRootNotFound, appmessage.CmdIBDRootUTXOSetAndBlock}, isStopping, errChan,
|
||||
appmessage.CmdDoneHeaders, appmessage.CmdIBDRootNotFound, appmessage.CmdIBDRootUTXOSetAndBlock, appmessage.CmdHeader},
|
||||
isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ibd.HandleIBD(m.context, incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
|
@ -32,7 +32,7 @@ var handlers = map[appmessage.MessageCommand]handler{
|
||||
appmessage.CmdResolveFinalityConflictRequestMessage: rpchandlers.HandleResolveFinalityConflict,
|
||||
appmessage.CmdNotifyFinalityConflictsRequestMessage: rpchandlers.HandleNotifyFinalityConflicts,
|
||||
appmessage.CmdGetMempoolEntriesRequestMessage: rpchandlers.HandleGetMempoolEntries,
|
||||
appmessage.CmdShutDownRequestMessage: rpchandlers.HandleGetMempoolEntries,
|
||||
appmessage.CmdShutDownRequestMessage: rpchandlers.HandleShutDown,
|
||||
appmessage.CmdGetHeadersRequestMessage: rpchandlers.HandleGetHeaders,
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
|
||||
// HandleGetBlockCount handles the respectively named RPC command
|
||||
func HandleGetBlockCount(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
response := appmessage.NewGetBlockCountResponseMessage(0) // TODO
|
||||
response := &appmessage.GetBlockCountResponseMessage{}
|
||||
response.Error = appmessage.RPCErrorf("not implemented")
|
||||
return response, nil
|
||||
}
|
||||
|
@ -27,7 +27,11 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey}
|
||||
|
||||
templateBlock := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
|
||||
templateBlock, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgBlock := appmessage.DomainBlockToMsgBlock(templateBlock)
|
||||
|
||||
return appmessage.DomainBlockToMsgBlock(templateBlock), nil
|
||||
return appmessage.NewGetBlockTemplateResponseMessage(msgBlock), nil
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ const (
|
||||
|
||||
// HandleGetBlocks handles the respectively named RPC command
|
||||
func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
|
||||
return nil, nil
|
||||
response := &appmessage.GetBlocksResponseMessage{}
|
||||
response.Error = appmessage.RPCErrorf("not implemented")
|
||||
return response, nil
|
||||
}
|
||||
|
@ -14,5 +14,7 @@ const (
|
||||
|
||||
// HandleGetChainFromBlock handles the respectively named RPC command
|
||||
func HandleGetChainFromBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
return nil, nil
|
||||
response := &appmessage.GetChainFromBlockResponseMessage{}
|
||||
response.Error = appmessage.RPCErrorf("not implemented")
|
||||
return response, nil
|
||||
}
|
||||
|
@ -8,5 +8,7 @@ import (
|
||||
|
||||
// HandleGetHeaders handles the respectively named RPC command
|
||||
func HandleGetHeaders(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
return nil, nil
|
||||
response := &appmessage.GetHeadersResponseMessage{}
|
||||
response.Error = appmessage.RPCErrorf("not implemented")
|
||||
return response, nil
|
||||
}
|
||||
|
@ -8,5 +8,7 @@ import (
|
||||
|
||||
// HandleGetMempoolEntries handles the respectively named RPC command
|
||||
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
return nil, nil
|
||||
response := &appmessage.GetMempoolEntriesResponseMessage{}
|
||||
response.Error = appmessage.RPCErrorf("not implemented")
|
||||
return response, nil
|
||||
}
|
||||
|
@ -8,5 +8,5 @@ import (
|
||||
|
||||
// HandleGetMempoolEntry handles the respectively named RPC command
|
||||
func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
return nil, nil
|
||||
return &appmessage.GetMempoolEntryResponseMessage{}, nil
|
||||
}
|
||||
|
@ -8,5 +8,7 @@ import (
|
||||
|
||||
// HandleGetSubnetwork handles the respectively named RPC command
|
||||
func HandleGetSubnetwork(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
return nil, nil
|
||||
response := &appmessage.GetSubnetworkResponseMessage{}
|
||||
response.Error = appmessage.RPCErrorf("not implemented")
|
||||
return response, nil
|
||||
}
|
||||
|
@ -8,5 +8,7 @@ import (
|
||||
|
||||
// HandleResolveFinalityConflict handles the respectively named RPC command
|
||||
func HandleResolveFinalityConflict(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
return nil, nil
|
||||
response := &appmessage.ResolveFinalityConflictResponseMessage{}
|
||||
response.Error = appmessage.RPCErrorf("not implemented")
|
||||
return response, nil
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
type consensus struct {
|
||||
lock *sync.RWMutex
|
||||
lock *sync.Mutex
|
||||
databaseContext model.DBReader
|
||||
|
||||
blockProcessor model.BlockProcessor
|
||||
@ -48,8 +48,8 @@ type consensus struct {
|
||||
func (s *consensus) BuildBlock(coinbaseData *externalapi.DomainCoinbaseData,
|
||||
transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlock, error) {
|
||||
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.blockBuilder.BuildBlock(coinbaseData, transactions)
|
||||
}
|
||||
@ -66,8 +66,8 @@ func (s *consensus) ValidateAndInsertBlock(block *externalapi.DomainBlock) error
|
||||
// ValidateTransactionAndPopulateWithConsensusData validates the given transaction
|
||||
// and populates it with any missing consensus data
|
||||
func (s *consensus) ValidateTransactionAndPopulateWithConsensusData(transaction *externalapi.DomainTransaction) error {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
err := s.transactionValidator.ValidateTransactionInIsolation(transaction)
|
||||
if err != nil {
|
||||
@ -79,11 +79,7 @@ func (s *consensus) ValidateTransactionAndPopulateWithConsensusData(transaction
|
||||
return err
|
||||
}
|
||||
|
||||
virtualGHOSTDAGData, err := s.ghostdagDataStore.Get(s.databaseContext, model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
virtualSelectedParentMedianTime, err := s.pastMedianTimeManager.PastMedianTime(virtualGHOSTDAGData.SelectedParent)
|
||||
virtualSelectedParentMedianTime, err := s.pastMedianTimeManager.PastMedianTime(model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -93,22 +89,22 @@ func (s *consensus) ValidateTransactionAndPopulateWithConsensusData(transaction
|
||||
}
|
||||
|
||||
func (s *consensus) GetBlock(blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.blockStore.Block(s.databaseContext, blockHash)
|
||||
}
|
||||
|
||||
func (s *consensus) GetBlockHeader(blockHash *externalapi.DomainHash) (*externalapi.DomainBlockHeader, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.blockHeaderStore.BlockHeader(s.databaseContext, blockHash)
|
||||
}
|
||||
|
||||
func (s *consensus) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalapi.BlockInfo, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
blockInfo := &externalapi.BlockInfo{}
|
||||
|
||||
@ -137,22 +133,22 @@ func (s *consensus) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalap
|
||||
}
|
||||
|
||||
func (s *consensus) GetHashesBetween(lowHash, highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.syncManager.GetHashesBetween(lowHash, highHash)
|
||||
}
|
||||
|
||||
func (s *consensus) GetMissingBlockBodyHashes(highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.syncManager.GetMissingBlockBodyHashes(highHash)
|
||||
}
|
||||
|
||||
func (s *consensus) GetPruningPointUTXOSet(expectedPruningPointHash *externalapi.DomainHash) ([]byte, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
pruningPointHash, err := s.pruningStore.PruningPoint(s.databaseContext)
|
||||
if err != nil {
|
||||
@ -173,40 +169,40 @@ func (s *consensus) GetPruningPointUTXOSet(expectedPruningPointHash *externalapi
|
||||
}
|
||||
|
||||
func (s *consensus) SetPruningPointUTXOSet(serializedUTXOSet []byte) error {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.consensusStateManager.SetPruningPointUTXOSet(serializedUTXOSet)
|
||||
}
|
||||
|
||||
func (s *consensus) GetVirtualSelectedParent() (*externalapi.DomainBlock, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
virtualGHOSTDAGData, err := s.ghostdagDataStore.Get(s.databaseContext, model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.GetBlock(virtualGHOSTDAGData.SelectedParent)
|
||||
return s.blockStore.Block(s.databaseContext, virtualGHOSTDAGData.SelectedParent)
|
||||
}
|
||||
|
||||
func (s *consensus) CreateBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.syncManager.CreateBlockLocator(lowHash, highHash)
|
||||
}
|
||||
|
||||
func (s *consensus) FindNextBlockLocatorBoundaries(blockLocator externalapi.BlockLocator) (lowHash, highHash *externalapi.DomainHash, err error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.syncManager.FindNextBlockLocatorBoundaries(blockLocator)
|
||||
}
|
||||
|
||||
func (s *consensus) GetSyncInfo() (*externalapi.SyncInfo, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.syncManager.GetSyncInfo()
|
||||
}
|
||||
|
@ -28,10 +28,6 @@ func (c *consensusStateStore) StageVirtualUTXODiff(virtualUTXODiff *model.UTXODi
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction) error {
|
||||
if c.stagedVirtualUTXOSet != nil {
|
||||
return errors.New("cannot commit virtual UTXO diff while virtual UTXO set is staged")
|
||||
}
|
||||
|
||||
if c.stagedVirtualUTXODiff == nil {
|
||||
return nil
|
||||
}
|
||||
@ -66,8 +62,8 @@ func (c *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction) er
|
||||
}
|
||||
|
||||
func (c *consensusStateStore) commitVirtualUTXOSet(dbTx model.DBTransaction) error {
|
||||
if c.stagedVirtualUTXODiff != nil {
|
||||
return errors.New("cannot commit virtual UTXO set while virtual UTXO diff is staged")
|
||||
if c.stagedVirtualUTXOSet == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for outpoint, utxoEntry := range c.stagedVirtualUTXOSet {
|
||||
@ -143,11 +139,14 @@ func (c *consensusStateStore) HasUTXOByOutpoint(dbContext model.DBReader, outpoi
|
||||
|
||||
func (c *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXODiff(dbContext model.DBReader,
|
||||
outpoint *externalapi.DomainOutpoint) (bool, error) {
|
||||
if _, ok := c.stagedVirtualUTXODiff.ToRemove[*outpoint]; ok {
|
||||
return false, nil
|
||||
}
|
||||
if _, ok := c.stagedVirtualUTXODiff.ToAdd[*outpoint]; ok {
|
||||
return true, nil
|
||||
|
||||
if c.stagedVirtualUTXODiff != nil {
|
||||
if _, ok := c.stagedVirtualUTXODiff.ToRemove[*outpoint]; ok {
|
||||
return false, nil
|
||||
}
|
||||
if _, ok := c.stagedVirtualUTXODiff.ToAdd[*outpoint]; ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
key, err := utxoKey(outpoint)
|
||||
|
@ -65,7 +65,7 @@ func (ms *multisetStore) Commit(dbTx model.DBTransaction) error {
|
||||
// Get gets the multiset associated with the given blockHash
|
||||
func (ms *multisetStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (model.Multiset, error) {
|
||||
if multiset, ok := ms.staging[*blockHash]; ok {
|
||||
return multiset, nil
|
||||
return multiset.Clone()
|
||||
}
|
||||
|
||||
multisetBytes, err := dbContext.Get(ms.hashAsKey(blockHash))
|
||||
|
@ -29,13 +29,16 @@ func New() model.UTXODiffStore {
|
||||
|
||||
// Stage stages the given utxoDiff for the given blockHash
|
||||
func (uds *utxoDiffStore) Stage(blockHash *externalapi.DomainHash, utxoDiff *model.UTXODiff, utxoDiffChild *externalapi.DomainHash) error {
|
||||
clone, err := uds.cloneUTXODiff(utxoDiff)
|
||||
utxoDiffClone, err := uds.cloneUTXODiff(utxoDiff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uds.utxoDiffStaging[*blockHash] = utxoDiffClone
|
||||
|
||||
uds.utxoDiffStaging[*blockHash] = clone
|
||||
uds.utxoDiffChildStaging[*blockHash] = &*utxoDiffChild
|
||||
if utxoDiffChild != nil {
|
||||
utxoDiffChildClone := uds.cloneUTXODiffChild(utxoDiffChild)
|
||||
uds.utxoDiffChildStaging[*blockHash] = utxoDiffChildClone
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -74,7 +77,6 @@ func (uds *utxoDiffStore) Commit(dbTx model.DBTransaction) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Put(uds.utxoDiffHashAsKey(&hash), utxoDiffChildBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -196,3 +198,7 @@ func (uds *utxoDiffStore) cloneUTXODiff(diff *model.UTXODiff) (*model.UTXODiff,
|
||||
|
||||
return uds.deserializeUTXODiff(serialized)
|
||||
}
|
||||
|
||||
func (uds *utxoDiffStore) cloneUTXODiffChild(diffChild *externalapi.DomainHash) *externalapi.DomainHash {
|
||||
return diffChild.Clone()
|
||||
}
|
||||
|
@ -98,7 +98,8 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
dagParams.TimestampDeviationTolerance,
|
||||
dbManager,
|
||||
dagTraversalManager,
|
||||
blockHeaderStore)
|
||||
blockHeaderStore,
|
||||
ghostdagDataStore)
|
||||
transactionValidator := transactionvalidator.New(dagParams.BlockCoinbaseMaturity,
|
||||
dagParams.EnableNonNativeSubnetworks,
|
||||
dbManager,
|
||||
@ -153,6 +154,7 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
dbManager,
|
||||
dagParams.FinalityDepth(),
|
||||
dagParams.PruningDepth(),
|
||||
genesisHash,
|
||||
ghostdagManager,
|
||||
dagTopologyManager,
|
||||
dagTraversalManager,
|
||||
@ -250,7 +252,7 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
headerTipsStore)
|
||||
|
||||
c := &consensus{
|
||||
lock: &sync.RWMutex{},
|
||||
lock: &sync.Mutex{},
|
||||
databaseContext: dbManager,
|
||||
|
||||
blockProcessor: blockProcessor,
|
||||
|
@ -17,6 +17,14 @@ func (hash DomainHash) String() string {
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// Clone clones the hash
|
||||
func (hash *DomainHash) Clone() *DomainHash {
|
||||
if hash == nil {
|
||||
return nil
|
||||
}
|
||||
return &*hash
|
||||
}
|
||||
|
||||
// DomainHashesToStrings returns a slice of strings representing the hashes in the given slice of hashes
|
||||
func DomainHashesToStrings(hashes []*DomainHash) []string {
|
||||
strings := make([]string, len(hashes))
|
||||
|
@ -19,6 +19,8 @@ func (s SyncState) String() string {
|
||||
switch s {
|
||||
case SyncStateRelay:
|
||||
return "SyncStateRelay"
|
||||
case SyncStateMissingGenesis:
|
||||
return "SyncStateMissingGenesis"
|
||||
case SyncStateHeadersFirst:
|
||||
return "SyncStateHeadersFirst"
|
||||
case SyncStateMissingUTXOSet:
|
||||
|
@ -8,4 +8,5 @@ type Multiset interface {
|
||||
Remove(data []byte)
|
||||
Hash() *externalapi.DomainHash
|
||||
Serialize() []byte
|
||||
Clone() (Multiset, error)
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ func (bb *blockBuilder) newBlockCoinbaseTransaction(
|
||||
return bb.coinbaseManager.ExpectedCoinbaseTransaction(model.VirtualBlockHash, coinbaseData)
|
||||
}
|
||||
|
||||
func (bb blockBuilder) buildHeader(transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlockHeader, error) {
|
||||
func (bb *blockBuilder) buildHeader(transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlockHeader, error) {
|
||||
parentHashes, err := bb.newBlockParentHashes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -104,7 +104,7 @@ func (bb blockBuilder) buildHeader(transactions []*externalapi.DomainTransaction
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
timeInMilliseconds, err := bb.newBlockTime(virtualGHOSTDAGData)
|
||||
timeInMilliseconds, err := bb.newBlockTime()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -133,7 +133,7 @@ func (bb blockBuilder) buildHeader(transactions []*externalapi.DomainTransaction
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bb blockBuilder) newBlockParentHashes() ([]*externalapi.DomainHash, error) {
|
||||
func (bb *blockBuilder) newBlockParentHashes() ([]*externalapi.DomainHash, error) {
|
||||
virtualBlockRelations, err := bb.blockRelationStore.BlockRelation(bb.databaseContext, model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -142,7 +142,7 @@ func (bb blockBuilder) newBlockParentHashes() ([]*externalapi.DomainHash, error)
|
||||
return virtualBlockRelations.Parents, nil
|
||||
}
|
||||
|
||||
func (bb blockBuilder) newBlockTime(virtualGHOSTDAGData *model.BlockGHOSTDAGData) (int64, error) {
|
||||
func (bb *blockBuilder) newBlockTime() (int64, error) {
|
||||
// The timestamp for the block must not be before the median timestamp
|
||||
// of the last several blocks. Thus, choose the maximum between the
|
||||
// current time and one second after the past median time. The current
|
||||
@ -150,7 +150,7 @@ func (bb blockBuilder) newBlockTime(virtualGHOSTDAGData *model.BlockGHOSTDAGData
|
||||
// block timestamp does not supported a precision greater than one
|
||||
// millisecond.
|
||||
newTimestamp := mstime.Now().UnixMilliseconds() + 1
|
||||
minTimestamp, err := bb.pastMedianTimeManager.PastMedianTime(virtualGHOSTDAGData.SelectedParent)
|
||||
minTimestamp, err := bb.pastMedianTimeManager.PastMedianTime(model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -160,7 +160,7 @@ func (bb blockBuilder) newBlockTime(virtualGHOSTDAGData *model.BlockGHOSTDAGData
|
||||
return newTimestamp, nil
|
||||
}
|
||||
|
||||
func (bb blockBuilder) newBlockDifficulty(virtualGHOSTDAGData *model.BlockGHOSTDAGData) (uint32, error) {
|
||||
func (bb *blockBuilder) newBlockDifficulty(virtualGHOSTDAGData *model.BlockGHOSTDAGData) (uint32, error) {
|
||||
virtualGHOSTDAGData, err := bb.ghostdagDataStore.Get(bb.databaseContext, model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -168,11 +168,11 @@ func (bb blockBuilder) newBlockDifficulty(virtualGHOSTDAGData *model.BlockGHOSTD
|
||||
return bb.difficultyManager.RequiredDifficulty(virtualGHOSTDAGData.SelectedParent)
|
||||
}
|
||||
|
||||
func (bb blockBuilder) newBlockHashMerkleRoot(transactions []*externalapi.DomainTransaction) *externalapi.DomainHash {
|
||||
func (bb *blockBuilder) newBlockHashMerkleRoot(transactions []*externalapi.DomainTransaction) *externalapi.DomainHash {
|
||||
return merkle.CalculateHashMerkleRoot(transactions)
|
||||
}
|
||||
|
||||
func (bb blockBuilder) newBlockAcceptedIDMerkleRoot() (*externalapi.DomainHash, error) {
|
||||
func (bb *blockBuilder) newBlockAcceptedIDMerkleRoot() (*externalapi.DomainHash, error) {
|
||||
newBlockAcceptanceData, err := bb.acceptanceDataStore.Get(bb.databaseContext, model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -181,7 +181,7 @@ func (bb blockBuilder) newBlockAcceptedIDMerkleRoot() (*externalapi.DomainHash,
|
||||
return bb.calculateAcceptedIDMerkleRoot(newBlockAcceptanceData)
|
||||
}
|
||||
|
||||
func (bb blockBuilder) calculateAcceptedIDMerkleRoot(acceptanceData model.AcceptanceData) (*externalapi.DomainHash, error) {
|
||||
func (bb *blockBuilder) calculateAcceptedIDMerkleRoot(acceptanceData model.AcceptanceData) (*externalapi.DomainHash, error) {
|
||||
var acceptedTransactions []*externalapi.DomainTransaction
|
||||
for _, blockAcceptanceData := range acceptanceData {
|
||||
for _, transactionAcceptance := range blockAcceptanceData.TransactionAcceptanceData {
|
||||
@ -200,7 +200,7 @@ func (bb blockBuilder) calculateAcceptedIDMerkleRoot(acceptanceData model.Accept
|
||||
return merkle.CalculateIDMerkleRoot(acceptedTransactions), nil
|
||||
}
|
||||
|
||||
func (bb blockBuilder) newBlockUTXOCommitment() (*externalapi.DomainHash, error) {
|
||||
func (bb *blockBuilder) newBlockUTXOCommitment() (*externalapi.DomainHash, error) {
|
||||
newBlockMultiset, err := bb.multisetStore.Get(bb.databaseContext, model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -36,7 +36,7 @@ func (bb testBlockBuilder) buildHeaderWithParents(parentHashes []*externalapi.Do
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
timeInMilliseconds, err := bb.newBlockTime(ghostdagData)
|
||||
timeInMilliseconds, err := bb.newBlockTime()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -8,16 +8,20 @@ import (
|
||||
)
|
||||
|
||||
func (bp *blockProcessor) validateAndInsertBlock(block *externalapi.DomainBlock) error {
|
||||
mode, err := bp.syncManager.GetSyncInfo()
|
||||
syncInfo, err := bp.syncManager.GetSyncInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isHeaderOnlyBlock(block) && syncInfo.State != externalapi.SyncStateRelay {
|
||||
syncInfo.State = externalapi.SyncStateHeadersFirst
|
||||
}
|
||||
|
||||
hash := consensusserialization.HeaderHash(block.Header)
|
||||
if mode.State == externalapi.SyncStateMissingUTXOSet {
|
||||
if syncInfo.State == externalapi.SyncStateMissingUTXOSet {
|
||||
if isHeaderOnlyBlock(block) {
|
||||
// Allow processing headers while in state SyncStateMissingUTXOSet
|
||||
mode.State = externalapi.SyncStateHeadersFirst
|
||||
syncInfo.State = externalapi.SyncStateHeadersFirst
|
||||
} else {
|
||||
headerTipsPruningPoint, err := bp.consensusStateManager.HeaderTipsPruningPoint()
|
||||
if err != nil {
|
||||
@ -26,24 +30,38 @@ func (bp *blockProcessor) validateAndInsertBlock(block *externalapi.DomainBlock)
|
||||
|
||||
if *hash != *headerTipsPruningPoint {
|
||||
return errors.Errorf("cannot insert blocks other than the header pruning point "+
|
||||
"while in %s mode", mode.State)
|
||||
"while in %s mode", syncInfo.State)
|
||||
}
|
||||
|
||||
mode.State = externalapi.SyncStateMissingBlockBodies
|
||||
syncInfo.State = externalapi.SyncStateMissingBlockBodies
|
||||
}
|
||||
}
|
||||
|
||||
if mode.State == externalapi.SyncStateHeadersFirst && !isHeaderOnlyBlock(block) {
|
||||
mode.State = externalapi.SyncStateRelay
|
||||
if syncInfo.State == externalapi.SyncStateHeadersFirst && !isHeaderOnlyBlock(block) {
|
||||
syncInfo.State = externalapi.SyncStateRelay
|
||||
log.Warnf("block %s contains transactions while validating in header only mode", hash)
|
||||
}
|
||||
|
||||
err = bp.checkBlockStatus(hash, mode)
|
||||
if syncInfo.State == externalapi.SyncStateMissingBlockBodies {
|
||||
headerTips, err := bp.headerTipsStore.Tips(bp.databaseContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
selectedHeaderTip, err := bp.ghostdagManager.ChooseSelectedParent(headerTips...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *selectedHeaderTip == *hash {
|
||||
syncInfo.State = externalapi.SyncStateRelay
|
||||
}
|
||||
}
|
||||
|
||||
err = bp.checkBlockStatus(hash, syncInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = bp.validateBlock(block, mode)
|
||||
err = bp.validateBlock(block, syncInfo)
|
||||
if err != nil {
|
||||
bp.discardAllChanges()
|
||||
return err
|
||||
@ -55,9 +73,9 @@ func (bp *blockProcessor) validateAndInsertBlock(block *externalapi.DomainBlock)
|
||||
}
|
||||
|
||||
if !hasHeader {
|
||||
if mode.State == externalapi.SyncStateMissingBlockBodies {
|
||||
if syncInfo.State == externalapi.SyncStateMissingBlockBodies {
|
||||
return errors.Wrapf(ruleerrors.ErrMissingBlockHeaderInIBD, "no block header is stored for block %s. "+
|
||||
"Every block we get during %s mode should have a pre-stored header", mode.State, hash)
|
||||
"Every block we get during %s mode should have a pre-stored header", syncInfo.State, hash)
|
||||
}
|
||||
err = bp.reachabilityManager.AddBlock(hash)
|
||||
if err != nil {
|
||||
@ -65,7 +83,7 @@ func (bp *blockProcessor) validateAndInsertBlock(block *externalapi.DomainBlock)
|
||||
}
|
||||
}
|
||||
|
||||
if mode.State == externalapi.SyncStateHeadersFirst {
|
||||
if syncInfo.State == externalapi.SyncStateHeadersFirst {
|
||||
bp.blockStatusStore.Stage(hash, externalapi.StatusHeaderOnly)
|
||||
} else {
|
||||
bp.blockStatusStore.Stage(hash, externalapi.StatusUTXOPendingVerification)
|
||||
@ -92,12 +110,12 @@ func (bp *blockProcessor) validateAndInsertBlock(block *externalapi.DomainBlock)
|
||||
}
|
||||
}
|
||||
|
||||
if mode.State == externalapi.SyncStateHeadersFirst || mode.State == externalapi.SyncStateMissingGenesis {
|
||||
if syncInfo.State == externalapi.SyncStateHeadersFirst {
|
||||
err = bp.headerTipsManager.AddHeaderTip(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if mode.State == externalapi.SyncStateRelay {
|
||||
} else if syncInfo.State == externalapi.SyncStateRelay || syncInfo.State == externalapi.SyncStateMissingGenesis {
|
||||
// Attempt to add the block to the virtual
|
||||
err = bp.consensusStateManager.AddBlockToVirtual(hash)
|
||||
if err != nil {
|
||||
@ -114,14 +132,14 @@ func (bp *blockProcessor) validateAndInsertBlock(block *externalapi.DomainBlock)
|
||||
}
|
||||
}
|
||||
|
||||
if mode.State != externalapi.SyncStateMissingGenesis {
|
||||
if syncInfo.State != externalapi.SyncStateMissingGenesis {
|
||||
err = bp.updateReachabilityReindexRoot(oldHeadersSelectedTip)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if mode.State == externalapi.SyncStateRelay {
|
||||
if syncInfo.State == externalapi.SyncStateRelay {
|
||||
// Trigger pruning, which will check if the pruning point changed and delete the data if it did.
|
||||
err = bp.pruningManager.FindNextPruningPoint()
|
||||
if err != nil {
|
||||
@ -133,7 +151,6 @@ func (bp *blockProcessor) validateAndInsertBlock(block *externalapi.DomainBlock)
|
||||
}
|
||||
|
||||
func (bp *blockProcessor) updateReachabilityReindexRoot(oldHeadersSelectedTip *externalapi.DomainHash) error {
|
||||
|
||||
headersSelectedTip, err := bp.headerTipsManager.SelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -30,8 +30,7 @@ func (v *blockValidator) checkBlockTransactionsFinalized(blockHash *externalapi.
|
||||
|
||||
// If it's not genesis
|
||||
if len(block.Header.ParentHashes) != 0 {
|
||||
|
||||
blockTime, err = v.pastMedianTimeManager.PastMedianTime(ghostdagData.SelectedParent)
|
||||
blockTime, err = v.pastMedianTimeManager.PastMedianTime(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -21,11 +21,6 @@ func (v *blockValidator) ValidateHeaderInContext(blockHash *externalapi.DomainHa
|
||||
return err
|
||||
}
|
||||
|
||||
err = v.validateMedianTime(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
isHeadersOnlyBlock, err := v.isHeadersOnlyBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -38,6 +33,11 @@ func (v *blockValidator) ValidateHeaderInContext(blockHash *externalapi.DomainHa
|
||||
}
|
||||
}
|
||||
|
||||
err = v.validateMedianTime(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = v.checkMergeSizeLimit(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -99,15 +99,10 @@ func (v *blockValidator) validateMedianTime(header *externalapi.DomainBlockHeade
|
||||
return nil
|
||||
}
|
||||
|
||||
hash := consensusserialization.HeaderHash(header)
|
||||
ghostdagData, err := v.ghostdagDataStore.Get(v.databaseContext, hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure the timestamp for the block header is not before the
|
||||
// median time of the last several blocks (medianTimeBlocks).
|
||||
pastMedianTime, err := v.pastMedianTimeManager.PastMedianTime(ghostdagData.SelectedParent)
|
||||
hash := consensusserialization.HeaderHash(header)
|
||||
pastMedianTime, err := v.pastMedianTimeManager.PastMedianTime(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -14,17 +14,20 @@ func (csm *consensusStateManager) AddBlockToVirtual(blockHash *externalapi.Domai
|
||||
return err
|
||||
}
|
||||
|
||||
if isNextVirtualSelectedParent {
|
||||
blockStatus, err := csm.resolveBlockStatus(blockHash)
|
||||
if !isNextVirtualSelectedParent {
|
||||
return nil
|
||||
}
|
||||
|
||||
blockStatus, err := csm.resolveBlockStatus(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if blockStatus == externalapi.StatusValid {
|
||||
err = csm.checkFinalityViolation(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockStatus == externalapi.StatusValid {
|
||||
err = csm.checkFinalityViolation(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newTips, err := csm.addTip(blockHash)
|
||||
@ -41,6 +44,10 @@ func (csm *consensusStateManager) AddBlockToVirtual(blockHash *externalapi.Domai
|
||||
}
|
||||
|
||||
func (csm *consensusStateManager) isNextVirtualSelectedParent(blockHash *externalapi.DomainHash) (bool, error) {
|
||||
if *blockHash == *csm.genesisHash {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
virtualGhostdagData, err := csm.ghostdagDataStore.Get(csm.databaseContext, model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -2,8 +2,8 @@ package consensusstatemanager
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/multiset"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
@ -15,21 +15,23 @@ import (
|
||||
func (csm *consensusStateManager) CalculatePastUTXOAndAcceptanceData(blockHash *externalapi.DomainHash) (
|
||||
*model.UTXODiff, model.AcceptanceData, model.Multiset, error) {
|
||||
|
||||
// The genesis block has an empty UTXO diff, empty acceptance data, and a blank multiset
|
||||
if *blockHash == *csm.genesisHash {
|
||||
return &model.UTXODiff{}, model.AcceptanceData{}, multiset.New(), nil
|
||||
}
|
||||
|
||||
blockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
selectedParentPastUTXO, err := csm.restorePastUTXO(blockGHOSTDAGData.SelectedParent)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
acceptanceData, utxoDiff, err := csm.applyBlueBlocks(blockHash, selectedParentPastUTXO, blockGHOSTDAGData)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
multiset, err := csm.calculateMultiset(acceptanceData, blockGHOSTDAGData)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@ -44,13 +46,21 @@ func (csm *consensusStateManager) restorePastUTXO(blockHash *externalapi.DomainH
|
||||
// collect the UTXO diffs
|
||||
var utxoDiffs []*model.UTXODiff
|
||||
nextBlockHash := blockHash
|
||||
for nextBlockHash != nil {
|
||||
for {
|
||||
utxoDiff, err := csm.utxoDiffStore.UTXODiff(csm.databaseContext, nextBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoDiffs = append(utxoDiffs, utxoDiff)
|
||||
|
||||
exists, err := csm.utxoDiffStore.HasUTXODiffChild(csm.databaseContext, nextBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
break
|
||||
}
|
||||
|
||||
nextBlockHash, err = csm.utxoDiffStore.UTXODiffChild(csm.databaseContext, nextBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -78,7 +88,7 @@ func (csm *consensusStateManager) applyBlueBlocks(blockHash *externalapi.DomainH
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
selectedParentMedianTime, err := csm.pastMedianTimeManager.PastMedianTime(ghostdagData.SelectedParent)
|
||||
selectedParentMedianTime, err := csm.pastMedianTimeManager.PastMedianTime(blockHash)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -88,8 +98,8 @@ func (csm *consensusStateManager) applyBlueBlocks(blockHash *externalapi.DomainH
|
||||
accumulatedMass := uint64(0)
|
||||
|
||||
for i, blueBlock := range blueBlocks {
|
||||
blockAccepanceData := &model.BlockAcceptanceData{
|
||||
TransactionAcceptanceData: []*model.TransactionAcceptanceData{},
|
||||
blockAcceptanceData := &model.BlockAcceptanceData{
|
||||
TransactionAcceptanceData: make([]*model.TransactionAcceptanceData, len(blueBlock.Transactions)),
|
||||
}
|
||||
isSelectedParent := i == 0
|
||||
|
||||
@ -103,13 +113,13 @@ func (csm *consensusStateManager) applyBlueBlocks(blockHash *externalapi.DomainH
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
blockAccepanceData.TransactionAcceptanceData[j] = &model.TransactionAcceptanceData{
|
||||
blockAcceptanceData.TransactionAcceptanceData[j] = &model.TransactionAcceptanceData{
|
||||
Transaction: transaction,
|
||||
Fee: fee,
|
||||
IsAccepted: isAccepted,
|
||||
}
|
||||
}
|
||||
multiblockAcceptanceData[i] = blockAccepanceData
|
||||
multiblockAcceptanceData[i] = blockAcceptanceData
|
||||
}
|
||||
|
||||
return multiblockAcceptanceData, accumulatedUTXODiff, nil
|
||||
@ -226,7 +236,7 @@ func newUTXOSetIterator(collection model.UTXOCollection) *utxoSetIterator {
|
||||
|
||||
func (u utxoSetIterator) Next() bool {
|
||||
u.index++
|
||||
return u.index != len(u.pairs)
|
||||
return u.index < len(u.pairs)
|
||||
}
|
||||
|
||||
func (u utxoSetIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry *externalapi.UTXOEntry, err error) {
|
||||
|
@ -2,12 +2,14 @@ package consensusstatemanager
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// consensusStateManager manages the node's consensus state
|
||||
type consensusStateManager struct {
|
||||
finalityDepth uint64
|
||||
pruningDepth uint64
|
||||
genesisHash *externalapi.DomainHash
|
||||
databaseContext model.DBManager
|
||||
|
||||
ghostdagManager model.GHOSTDAGManager
|
||||
@ -39,6 +41,7 @@ func New(
|
||||
databaseContext model.DBManager,
|
||||
finalityDepth uint64,
|
||||
pruningDepth uint64,
|
||||
genesisHash *externalapi.DomainHash,
|
||||
ghostdagManager model.GHOSTDAGManager,
|
||||
dagTopologyManager model.DAGTopologyManager,
|
||||
dagTraversalManager model.DAGTraversalManager,
|
||||
@ -63,6 +66,7 @@ func New(
|
||||
csm := &consensusStateManager{
|
||||
finalityDepth: finalityDepth,
|
||||
pruningDepth: pruningDepth,
|
||||
genesisHash: genesisHash,
|
||||
databaseContext: databaseContext,
|
||||
|
||||
ghostdagManager: ghostdagManager,
|
||||
|
@ -24,8 +24,13 @@ func (csm *consensusStateManager) checkFinalityViolation(
|
||||
func (csm *consensusStateManager) virtualFinalityPoint(virtualGHOSTDAGData *model.BlockGHOSTDAGData) (
|
||||
*externalapi.DomainHash, error) {
|
||||
|
||||
blueScore := virtualGHOSTDAGData.BlueScore - csm.finalityDepth
|
||||
if virtualGHOSTDAGData.BlueScore < csm.finalityDepth {
|
||||
blueScore = 0
|
||||
}
|
||||
|
||||
return csm.dagTraversalManager.HighestChainBlockBelowBlueScore(
|
||||
model.VirtualBlockHash, virtualGHOSTDAGData.BlueScore-csm.finalityDepth)
|
||||
model.VirtualBlockHash, blueScore)
|
||||
}
|
||||
|
||||
func (csm *consensusStateManager) isViolatingFinality(
|
||||
|
@ -11,6 +11,10 @@ import (
|
||||
func (csm *consensusStateManager) calculateMultiset(
|
||||
acceptanceData model.AcceptanceData, blockGHOSTDAGData *model.BlockGHOSTDAGData) (model.Multiset, error) {
|
||||
|
||||
if blockGHOSTDAGData.SelectedParent == nil {
|
||||
return multiset.New(), nil
|
||||
}
|
||||
|
||||
ms, err := csm.multisetStore.Get(csm.databaseContext, blockGHOSTDAGData.SelectedParent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -31,7 +31,7 @@ func (csm *consensusStateManager) pickVirtualParents(tips []*externalapi.DomainH
|
||||
|
||||
mergeSetSize := 1 // starts counting from 1 because selectedParent is already in the mergeSet
|
||||
|
||||
for len(selectedVirtualParents) < constants.MaxBlockParents {
|
||||
for candidatesHeap.Len() > 0 && len(selectedVirtualParents) < constants.MaxBlockParents {
|
||||
candidate := candidatesHeap.Pop()
|
||||
mergeSetIncrease, err := csm.mergeSetIncrease(candidate, selectedVirtualParents)
|
||||
if err != nil {
|
||||
|
@ -16,7 +16,7 @@ func (csm *consensusStateManager) resolveBlockStatus(blockHash *externalapi.Doma
|
||||
}
|
||||
|
||||
// resolve the unverified blocks' statuses in opposite order
|
||||
for i := len(unverifiedBlocks); i >= 0; i++ {
|
||||
for i := len(unverifiedBlocks) - 1; i >= 0; i-- {
|
||||
unverifiedBlockHash := unverifiedBlocks[i]
|
||||
|
||||
var blockStatus externalapi.BlockStatus
|
||||
@ -47,6 +47,10 @@ func (csm *consensusStateManager) getUnverifiedChainBlocksAndSelectedParentStatu
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if ghostdagData.SelectedParent == nil {
|
||||
return unverifiedBlocks, externalapi.StatusValid, nil
|
||||
}
|
||||
|
||||
selectedParentStatus, err := csm.blockStatusStore.Get(csm.databaseContext, ghostdagData.SelectedParent)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
|
@ -116,7 +116,7 @@ type protoUTXOSetIterator struct {
|
||||
|
||||
func (p protoUTXOSetIterator) Next() bool {
|
||||
p.index++
|
||||
return p.index != len(p.utxoSet.Utxos)
|
||||
return p.index < len(p.utxoSet.Utxos)
|
||||
}
|
||||
|
||||
func (p protoUTXOSetIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry *externalapi.UTXOEntry, err error) {
|
||||
|
@ -23,10 +23,12 @@ func (csm *consensusStateManager) updateVirtual(newBlockHash *externalapi.Domain
|
||||
return err
|
||||
}
|
||||
|
||||
virtualUTXODiff, _, _, err := csm.CalculatePastUTXOAndAcceptanceData(model.VirtualBlockHash)
|
||||
virtualUTXODiff, virtualAcceptanceData, virtualMultiset, err := csm.CalculatePastUTXOAndAcceptanceData(model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
csm.acceptanceDataStore.Stage(model.VirtualBlockHash, virtualAcceptanceData)
|
||||
csm.multisetStore.Stage(model.VirtualBlockHash, virtualMultiset)
|
||||
|
||||
err = csm.consensusStateStore.StageVirtualUTXODiff(virtualUTXODiff)
|
||||
if err != nil {
|
||||
|
@ -43,11 +43,7 @@ func (csm *consensusStateManager) verifyAndBuildUTXO(block *externalapi.DomainBl
|
||||
func (csm *consensusStateManager) validateBlockTransactionsAgainstPastUTXO(block *externalapi.DomainBlock,
|
||||
blockHash *externalapi.DomainHash, pastUTXODiff *model.UTXODiff, err error) error {
|
||||
|
||||
ghostdagData, err := csm.ghostdagDataStore.Get(csm.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
selectedParentMedianTime, err := csm.pastMedianTimeManager.PastMedianTime(ghostdagData.SelectedParent)
|
||||
selectedParentMedianTime, err := csm.pastMedianTimeManager.PastMedianTime(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ func (dtm *dagTraversalManager) SelectedParentIterator(highHash *externalapi.Dom
|
||||
// blueScore in the block with the given highHash's selected
|
||||
// parent chain
|
||||
func (dtm *dagTraversalManager) HighestChainBlockBelowBlueScore(highHash *externalapi.DomainHash, blueScore uint64) (*externalapi.DomainHash, error) {
|
||||
blockHash := highHash
|
||||
currentBlockHash := highHash
|
||||
chainBlock, err := dtm.ghostdagDataStore.Get(dtm.databaseContext, highHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -82,15 +82,15 @@ func (dtm *dagTraversalManager) HighestChainBlockBelowBlueScore(highHash *extern
|
||||
// If we used `BlockIterator` we'd need to do more calls to `ghostdagDataStore` so we can get the blueScore
|
||||
for chainBlock.BlueScore >= requiredBlueScore {
|
||||
if chainBlock.SelectedParent == nil { // genesis
|
||||
return blockHash, nil
|
||||
return currentBlockHash, nil
|
||||
}
|
||||
blockHash = chainBlock.SelectedParent
|
||||
chainBlock, err = dtm.ghostdagDataStore.Get(dtm.databaseContext, highHash)
|
||||
currentBlockHash = chainBlock.SelectedParent
|
||||
chainBlock, err = dtm.ghostdagDataStore.Get(dtm.databaseContext, currentBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return blockHash, nil
|
||||
return currentBlockHash, nil
|
||||
}
|
||||
|
||||
func (dtm *dagTraversalManager) LowestChainBlockAboveOrEqualToBlueScore(highHash *externalapi.DomainHash, blueScore uint64) (*externalapi.DomainHash, error) {
|
||||
|
@ -20,6 +20,10 @@ func (s selectedChildIterator) Next() bool {
|
||||
}
|
||||
|
||||
for _, child := range children {
|
||||
if *child == *model.VirtualBlockHash {
|
||||
break
|
||||
}
|
||||
|
||||
isChildInSelectedParentChainOfHighHash, err := s.dagTopologyManager.IsInSelectedParentChainOf(child, s.highHash)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -16,25 +16,47 @@ type pastMedianTimeManager struct {
|
||||
|
||||
dagTraversalManager model.DAGTraversalManager
|
||||
|
||||
blockHeaderStore model.BlockHeaderStore
|
||||
blockHeaderStore model.BlockHeaderStore
|
||||
ghostdagDataStore model.GHOSTDAGDataStore
|
||||
}
|
||||
|
||||
// New instantiates a new PastMedianTimeManager
|
||||
func New(timestampDeviationTolerance uint64,
|
||||
databaseContext model.DBReader,
|
||||
dagTraversalManager model.DAGTraversalManager,
|
||||
blockHeaderStore model.BlockHeaderStore) model.PastMedianTimeManager {
|
||||
blockHeaderStore model.BlockHeaderStore,
|
||||
ghostdagDataStore model.GHOSTDAGDataStore) model.PastMedianTimeManager {
|
||||
|
||||
return &pastMedianTimeManager{
|
||||
timestampDeviationTolerance: timestampDeviationTolerance,
|
||||
databaseContext: databaseContext,
|
||||
dagTraversalManager: dagTraversalManager,
|
||||
blockHeaderStore: blockHeaderStore,
|
||||
|
||||
dagTraversalManager: dagTraversalManager,
|
||||
|
||||
blockHeaderStore: blockHeaderStore,
|
||||
ghostdagDataStore: ghostdagDataStore,
|
||||
}
|
||||
}
|
||||
|
||||
// PastMedianTime returns the past median time for some block
|
||||
func (pmtm *pastMedianTimeManager) PastMedianTime(blockHash *externalapi.DomainHash) (int64, error) {
|
||||
window, err := pmtm.dagTraversalManager.BlueWindow(blockHash, 2*pmtm.timestampDeviationTolerance-1)
|
||||
blockGHOSTDAGData, err := pmtm.ghostdagDataStore.Get(pmtm.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
selectedParentHash := blockGHOSTDAGData.SelectedParent
|
||||
|
||||
// Genesis block
|
||||
if selectedParentHash == nil {
|
||||
header, err := pmtm.blockHeaderStore.BlockHeader(pmtm.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return header.TimeInMilliseconds, nil
|
||||
}
|
||||
|
||||
window, err := pmtm.dagTraversalManager.BlueWindow(selectedParentHash, 2*pmtm.timestampDeviationTolerance-1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -322,6 +322,13 @@ func (rt *reachabilityManager) countSubtrees(node *externalapi.DomainHash, subTr
|
||||
return err
|
||||
}
|
||||
|
||||
// If the current is now nil, it means that the previous
|
||||
// `current` was the genesis block -- the only block that
|
||||
// does not have parents
|
||||
if current == nil {
|
||||
break
|
||||
}
|
||||
|
||||
calculatedChildrenCount[*current]++
|
||||
|
||||
currentChildren, err := rt.children(current)
|
||||
|
@ -36,7 +36,7 @@ func (sm *syncManager) createBlockLocator(lowHash, highHash *externalapi.DomainH
|
||||
|
||||
// Nothing more to add once the low node has been added.
|
||||
if currentBlockBlueScore <= lowBlockBlueScore {
|
||||
if currentHash != lowHash {
|
||||
if *currentHash != *lowHash {
|
||||
return nil, errors.Errorf("highHash and lowHash are " +
|
||||
"not in the same selected parent chain.")
|
||||
}
|
||||
@ -46,7 +46,7 @@ func (sm *syncManager) createBlockLocator(lowHash, highHash *externalapi.DomainH
|
||||
// Calculate blueScore of previous node to include ensuring the
|
||||
// final node is lowNode.
|
||||
nextBlueScore := currentBlockBlueScore - step
|
||||
if nextBlueScore < lowBlockGHOSTDAGData.BlueScore {
|
||||
if currentBlockBlueScore < step {
|
||||
nextBlueScore = lowBlockGHOSTDAGData.BlueScore
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
// areHeaderTipsSyncedMaxTimeDifference is the number of blocks from
|
||||
// the header virtual selected parent (estimated by timestamps) for
|
||||
// kaspad to be considered not synced
|
||||
const areHeaderTipsSyncedMaxTimeDifference = 300
|
||||
const areHeaderTipsSyncedMaxTimeDifference = 300_000 // 5 minutes
|
||||
|
||||
func (sm *syncManager) syncInfo() (*externalapi.SyncInfo, error) {
|
||||
syncState, err := sm.resolveSyncState()
|
||||
@ -36,7 +36,6 @@ func (sm *syncManager) resolveSyncState() (externalapi.SyncState, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if !hasTips {
|
||||
return externalapi.SyncStateMissingGenesis, nil
|
||||
}
|
||||
@ -45,7 +44,6 @@ func (sm *syncManager) resolveSyncState() (externalapi.SyncState, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
isSynced, err := sm.areHeaderTipsSynced(headerVirtualSelectedParentHash)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -54,11 +52,21 @@ func (sm *syncManager) resolveSyncState() (externalapi.SyncState, error) {
|
||||
return externalapi.SyncStateHeadersFirst, nil
|
||||
}
|
||||
|
||||
headerVirtualSelectedParentBlockStatus, err := sm.blockStatusStore.Get(sm.databaseContext, headerVirtualSelectedParentHash)
|
||||
// Once the header tips are synced, check the status of
|
||||
// the pruning point from the point of view of the header
|
||||
// tips. We check it against StatusValid (rather than
|
||||
// StatusHeaderOnly) because once we do receive the
|
||||
// UTXO set of said pruning point, the state is explicitly
|
||||
// set to StatusValid.
|
||||
headerTipsPruningPoint, err := sm.consensusStateManager.HeaderTipsPruningPoint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if headerVirtualSelectedParentBlockStatus != externalapi.StatusValid {
|
||||
headerTipsPruningPointStatus, err := sm.blockStatusStore.Get(sm.databaseContext, headerTipsPruningPoint)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if headerTipsPruningPointStatus != externalapi.StatusValid {
|
||||
return externalapi.SyncStateMissingUTXOSet, nil
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,7 @@ func (v *transactionValidator) checkTransactionSubnetwork(tx *externalapi.Domain
|
||||
// If we are a partial node, only transactions on built in subnetworks
|
||||
// or our own subnetwork may have a payload
|
||||
isLocalNodeFull := subnetworkID == nil
|
||||
shouldTxBeFull := subnetworks.IsBuiltIn(tx.SubnetworkID) || tx.SubnetworkID == *subnetworkID
|
||||
shouldTxBeFull := subnetworks.IsBuiltIn(tx.SubnetworkID) || subnetworks.IsEqual(&tx.SubnetworkID, subnetworkID)
|
||||
if !isLocalNodeFull && !shouldTxBeFull && len(tx.Payload) > 0 {
|
||||
return errors.Wrapf(ruleerrors.ErrInvalidPayload,
|
||||
"transaction that was expected to be partial has a payload "+
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
var byteOrder = binary.LittleEndian
|
||||
|
||||
const uint64Len = 8
|
||||
const scriptPubKeyLengthLength = 1
|
||||
const lengthOfscriptPubKeyLength = 1
|
||||
|
||||
// SerializeCoinbasePayload builds the coinbase payload based on the provided scriptPubKey and extra data.
|
||||
func SerializeCoinbasePayload(blueScore uint64, coinbaseData *externalapi.DomainCoinbaseData) ([]byte, error) {
|
||||
@ -23,14 +23,14 @@ func SerializeCoinbasePayload(blueScore uint64, coinbaseData *externalapi.Domain
|
||||
"longer than the max allowed length of %d", constants.CoinbasePayloadScriptPublicKeyMaxLength)
|
||||
}
|
||||
|
||||
payload := make([]byte, uint64Len+scriptPubKeyLengthLength+scriptPubKeyLength+len(coinbaseData.ExtraData))
|
||||
payload := make([]byte, uint64Len+lengthOfscriptPubKeyLength+scriptPubKeyLength+len(coinbaseData.ExtraData))
|
||||
byteOrder.PutUint64(payload[:uint64Len], blueScore)
|
||||
if len(coinbaseData.ScriptPublicKey) > math.MaxUint8 {
|
||||
return nil, errors.Errorf("script public key is bigger than %d", math.MaxUint8)
|
||||
}
|
||||
payload[uint64Len] = uint8(len(coinbaseData.ScriptPublicKey))
|
||||
copy(payload[uint64Len+scriptPubKeyLengthLength:], coinbaseData.ScriptPublicKey)
|
||||
copy(payload[uint64Len+scriptPubKeyLengthLength+scriptPubKeyLength:], coinbaseData.ExtraData)
|
||||
copy(payload[uint64Len+lengthOfscriptPubKeyLength:], coinbaseData.ScriptPublicKey)
|
||||
copy(payload[uint64Len+lengthOfscriptPubKeyLength+scriptPubKeyLength:], coinbaseData.ExtraData)
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
@ -38,7 +38,7 @@ func SerializeCoinbasePayload(blueScore uint64, coinbaseData *externalapi.Domain
|
||||
func ExtractCoinbaseDataAndBlueScore(coinbaseTx *externalapi.DomainTransaction) (blueScore uint64,
|
||||
coinbaseData *externalapi.DomainCoinbaseData, err error) {
|
||||
|
||||
minLength := uint64Len + scriptPubKeyLengthLength
|
||||
minLength := uint64Len + lengthOfscriptPubKeyLength
|
||||
if len(coinbaseTx.Payload) < minLength {
|
||||
return 0, nil, errors.Wrapf(ruleerrors.ErrBadCoinbasePayloadLen,
|
||||
"coinbase payload is less than the minimum length of %d", minLength)
|
||||
@ -58,7 +58,7 @@ func ExtractCoinbaseDataAndBlueScore(coinbaseTx *externalapi.DomainTransaction)
|
||||
}
|
||||
|
||||
return blueScore, &externalapi.DomainCoinbaseData{
|
||||
ScriptPublicKey: coinbaseTx.Payload[uint64Len+scriptPubKeyLengthLength : uint64Len+scriptPubKeyLengthLength+scriptPubKeyLength],
|
||||
ExtraData: coinbaseTx.Payload[uint64Len+scriptPubKeyLengthLength+scriptPubKeyLengthLength:],
|
||||
ScriptPublicKey: coinbaseTx.Payload[uint64Len+lengthOfscriptPubKeyLength : uint64Len+lengthOfscriptPubKeyLength+scriptPubKeyLength],
|
||||
ExtraData: coinbaseTx.Payload[uint64Len+lengthOfscriptPubKeyLength+scriptPubKeyLength:],
|
||||
}, nil
|
||||
}
|
||||
|
@ -87,5 +87,5 @@ func (hs HashSet) ToSlice() []*externalapi.DomainHash {
|
||||
|
||||
// Length returns the length of this HashSet
|
||||
func (hs HashSet) Length() int {
|
||||
return hs.Length()
|
||||
return len(hs)
|
||||
}
|
||||
|
@ -56,6 +56,10 @@ func CalculateHashMerkleRoot(transactions []*externalapi.DomainTransaction) *ext
|
||||
// CalculateIDMerkleRoot calculates the merkle root of a tree consisted of the given transaction IDs.
|
||||
// See `merkleRoot` for more info.
|
||||
func CalculateIDMerkleRoot(transactions []*externalapi.DomainTransaction) *externalapi.DomainHash {
|
||||
if len(transactions) == 0 {
|
||||
return &externalapi.DomainHash{}
|
||||
}
|
||||
|
||||
txIDs := make([]*externalapi.DomainHash, len(transactions))
|
||||
for i, tx := range transactions {
|
||||
txIDs[i] = (*externalapi.DomainHash)(consensusserialization.TransactionID(tx))
|
||||
|
@ -31,7 +31,11 @@ func (m multiset) Hash() *externalapi.DomainHash {
|
||||
}
|
||||
|
||||
func (m multiset) Serialize() []byte {
|
||||
return m.Serialize()
|
||||
return m.ms.Serialize()[:]
|
||||
}
|
||||
|
||||
func (m multiset) Clone() (model.Multiset, error) {
|
||||
return FromBytes(m.Serialize())
|
||||
}
|
||||
|
||||
// FromBytes deserializes the given bytes slice and returns a multiset.
|
||||
|
@ -129,19 +129,19 @@ var simnetGenesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(1,
|
||||
// simnetGenesisHash is the hash of the first block in the block DAG for
|
||||
// the simnet (genesis block).
|
||||
var simnetGenesisHash = externalapi.DomainHash{
|
||||
0xad, 0x47, 0xba, 0xd5, 0x6e, 0x1e, 0x62, 0x99,
|
||||
0x43, 0x81, 0xd2, 0xaf, 0xda, 0x1d, 0xe6, 0xda,
|
||||
0x0b, 0x50, 0xcb, 0x76, 0x8e, 0x5d, 0x9e, 0x41,
|
||||
0x20, 0x98, 0x28, 0xb1, 0x7e, 0x88, 0xb9, 0xb5,
|
||||
0x50, 0x01, 0x7e, 0x84, 0x55, 0xc0, 0xab, 0x9c,
|
||||
0xca, 0xf5, 0xc1, 0x5d, 0xbe, 0x57, 0x0a, 0x80,
|
||||
0x1f, 0x93, 0x00, 0x34, 0xe6, 0xee, 0xc2, 0xee,
|
||||
0xff, 0x57, 0xc1, 0x66, 0x2a, 0x63, 0x4b, 0x23,
|
||||
}
|
||||
|
||||
// simnetGenesisMerkleRoot is the hash of the first transaction in the genesis block
|
||||
// for the devopment network.
|
||||
var simnetGenesisMerkleRoot = externalapi.DomainHash{
|
||||
0x47, 0x52, 0xc7, 0x23, 0x70, 0x4d, 0x89, 0x17,
|
||||
0xbd, 0x44, 0x26, 0xfa, 0x82, 0x7e, 0x1b, 0xa9,
|
||||
0xc6, 0x46, 0x1a, 0x37, 0x5a, 0x73, 0x88, 0x09,
|
||||
0xe8, 0x17, 0xff, 0xb1, 0xdb, 0x1a, 0xb3, 0x3f,
|
||||
0x79, 0x77, 0x9c, 0xad, 0x8d, 0x5a, 0x37, 0x57,
|
||||
0x75, 0x8b, 0x2f, 0xa5, 0x82, 0x47, 0x2f, 0xb6,
|
||||
0xbe, 0x24, 0x5f, 0xcb, 0x21, 0x68, 0x21, 0x44,
|
||||
0x45, 0x39, 0x44, 0xaf, 0xab, 0x9f, 0x0f, 0xc1,
|
||||
}
|
||||
|
||||
// simnetGenesisBlock defines the genesis block of the block DAG which serves as the
|
||||
@ -155,7 +155,7 @@ var simnetGenesisBlock = externalapi.DomainBlock{
|
||||
UTXOCommitment: externalapi.DomainHash{},
|
||||
TimeInMilliseconds: 0x173001df3d5,
|
||||
Bits: 0x207fffff,
|
||||
Nonce: 0x0,
|
||||
Nonce: 1,
|
||||
},
|
||||
Transactions: []*externalapi.DomainTransaction{simnetGenesisCoinbaseTx},
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ func New(consensus consensusexternalapi.Consensus, mempool miningmanagerapi.Memp
|
||||
// | <= policy.BlockMinSize) | |
|
||||
// ----------------------------------- --
|
||||
|
||||
func (btb *blockTemplateBuilder) GetBlockTemplate(coinbaseData *consensusexternalapi.DomainCoinbaseData) *consensusexternalapi.DomainBlock {
|
||||
func (btb *blockTemplateBuilder) GetBlockTemplate(coinbaseData *consensusexternalapi.DomainCoinbaseData) (*consensusexternalapi.DomainBlock, error) {
|
||||
mempoolTransactions := btb.mempool.Transactions()
|
||||
candidateTxs := make([]*candidateTx, 0, len(mempoolTransactions))
|
||||
for _, tx := range mempoolTransactions {
|
||||
@ -141,15 +141,16 @@ func (btb *blockTemplateBuilder) GetBlockTemplate(coinbaseData *consensusexterna
|
||||
btb.mempool.RemoveTransactions(invalidTxs)
|
||||
// We can call this recursively without worry because this should almost never happen
|
||||
return btb.GetBlockTemplate(coinbaseData)
|
||||
} else if err != nil {
|
||||
log.Errorf("GetBlockTemplate: Failed building block: %s", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugf("Created new block template (%d transactions, %d in fees, %d mass, target difficulty %064x)",
|
||||
len(blk.Transactions), blockTxs.totalFees, blockTxs.totalMass, util.CompactToBig(blk.Header.Bits))
|
||||
|
||||
return blk
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// calcTxValue calculates a value to be used in transaction selection.
|
||||
|
@ -7,6 +7,7 @@ package mempool
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -90,7 +91,7 @@ type mempool struct {
|
||||
// transactions until they are mined into a block.
|
||||
func New(consensus consensusexternalapi.Consensus) miningmanagermodel.Mempool {
|
||||
policy := policy{
|
||||
MaxTxVersion: 0,
|
||||
MaxTxVersion: constants.TransactionVersion,
|
||||
AcceptNonStd: false,
|
||||
MaxOrphanTxs: 5,
|
||||
MaxOrphanTxSize: 100000,
|
||||
@ -614,8 +615,11 @@ func (mp *mempool) maybeAcceptTransaction(tx *consensusexternalapi.DomainTransac
|
||||
// This will populate the missing UTXOEntries.
|
||||
err = mp.consensus.ValidateTransactionAndPopulateWithConsensusData(tx)
|
||||
missingOutpoints := ruleerrors.ErrMissingTxOut{}
|
||||
if errors.As(err, &missingOutpoints) {
|
||||
return missingOutpoints.MissingOutpoints, nil, nil
|
||||
if err != nil {
|
||||
if errors.As(err, &missingOutpoints) {
|
||||
return missingOutpoints.MissingOutpoints, nil, nil
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Don't allow transactions with non-standard inputs if the network
|
||||
@ -636,22 +640,7 @@ func (mp *mempool) maybeAcceptTransaction(tx *consensusexternalapi.DomainTransac
|
||||
}
|
||||
}
|
||||
|
||||
//// NOTE: if you modify this code to accept non-standard transactions,
|
||||
//// you should add code here to check that the transaction does a
|
||||
//// reasonable number of ECDSA signature verifications.
|
||||
//
|
||||
|
||||
// Don't allow transactions with fees too low to get into a mined block.
|
||||
//
|
||||
// Most miners allow a free transaction area in blocks they mine to go
|
||||
// alongside the area used for high-priority transactions as well as
|
||||
// transactions with fees. A transaction size of up to 1000 bytes is
|
||||
// considered safe to go into this section. Further, the minimum fee
|
||||
// calculated below on its own would encourage several small
|
||||
// transactions to avoid fees rather than one single larger transaction
|
||||
// which is more desirable. Therefore, as long as the size of the
|
||||
// transaction does not exceeed 1000 less than the reserved space for
|
||||
// high-priority transactions, don't require a fee for it.
|
||||
// Don't allow transactions with fees too low to get into a mined block
|
||||
serializedSize := int64(estimatedsize.TransactionEstimatedSerializedSize(tx))
|
||||
minFee := uint64(calcMinRequiredTxRelayFee(serializedSize,
|
||||
mp.policy.MinRelayTxFee))
|
||||
|
@ -55,10 +55,9 @@ const (
|
||||
// pool and relayed.
|
||||
func calcMinRequiredTxRelayFee(serializedSize int64, minRelayTxFee util.Amount) int64 {
|
||||
// Calculate the minimum fee for a transaction to be allowed into the
|
||||
// mempool and relayed by scaling the base fee (which is the minimum
|
||||
// free transaction relay fee). minTxRelayFee is in sompi/kB so
|
||||
// multiply by serializedSize (which is in bytes) and divide by 1000 to
|
||||
// get minimum sompis.
|
||||
// mempool and relayed by scaling the base fee. minTxRelayFee is in
|
||||
// sompi/kB so multiply by serializedSize (which is in bytes) and
|
||||
// divide by 1000 to get minimum sompis.
|
||||
minFee := (serializedSize * int64(minRelayTxFee)) / 1000
|
||||
|
||||
if minFee == 0 && minRelayTxFee > 0 {
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
// MiningManager creates block templates for mining as well as maintaining
|
||||
// known transactions that have no yet been added to any block
|
||||
type MiningManager interface {
|
||||
GetBlockTemplate(coinbaseData *consensusexternalapi.DomainCoinbaseData) *consensusexternalapi.DomainBlock
|
||||
GetBlockTemplate(coinbaseData *consensusexternalapi.DomainCoinbaseData) (*consensusexternalapi.DomainBlock, error)
|
||||
GetTransaction(transactionID *consensusexternalapi.DomainTransactionID) (*consensusexternalapi.DomainTransaction, bool)
|
||||
HandleNewBlockTransactions(txs []*consensusexternalapi.DomainTransaction)
|
||||
ValidateAndInsertTransaction(transaction *consensusexternalapi.DomainTransaction, allowOrphan bool) error
|
||||
@ -20,7 +20,7 @@ type miningManager struct {
|
||||
}
|
||||
|
||||
// GetBlockTemplate creates a block template for a miner to consume
|
||||
func (mm *miningManager) GetBlockTemplate(coinbaseData *consensusexternalapi.DomainCoinbaseData) *consensusexternalapi.DomainBlock {
|
||||
func (mm *miningManager) GetBlockTemplate(coinbaseData *consensusexternalapi.DomainCoinbaseData) (*consensusexternalapi.DomainBlock, error) {
|
||||
return mm.blockTemplateBuilder.GetBlockTemplate(coinbaseData)
|
||||
}
|
||||
|
||||
|
@ -6,5 +6,5 @@ import (
|
||||
|
||||
// BlockTemplateBuilder builds block templates for miners to consume
|
||||
type BlockTemplateBuilder interface {
|
||||
GetBlockTemplate(coinbaseData *consensusexternalapi.DomainCoinbaseData) *consensusexternalapi.DomainBlock
|
||||
GetBlockTemplate(coinbaseData *consensusexternalapi.DomainCoinbaseData) (*consensusexternalapi.DomainBlock, error)
|
||||
}
|
||||
|
@ -14,6 +14,8 @@ func (x *KaspadMessage_IbdRootUTXOSetAndBlock) toAppMessage() (appmessage.Messag
|
||||
}
|
||||
|
||||
func (x *KaspadMessage_IbdRootUTXOSetAndBlock) fromAppMessage(msgIBDRootUTXOSetAndBlock *appmessage.MsgIBDRootUTXOSetAndBlock) error {
|
||||
x.IbdRootUTXOSetAndBlock = &IBDRootUTXOSetAndBlockMessage{}
|
||||
x.IbdRootUTXOSetAndBlock.UtxoSet = msgIBDRootUTXOSetAndBlock.UTXOSet
|
||||
x.IbdRootUTXOSetAndBlock.Block = &BlockMessage{}
|
||||
return x.IbdRootUTXOSetAndBlock.Block.fromAppMessage(msgIBDRootUTXOSetAndBlock.Block)
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ func (x *KaspadMessage_RequestIBDBlocks) toAppMessage() (appmessage.Message, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &appmessage.MsgRequestRelayBlocks{Hashes: hashes}, nil
|
||||
return &appmessage.MsgRequestIBDBlocks{Hashes: hashes}, nil
|
||||
}
|
||||
|
||||
func (x *KaspadMessage_RequestIBDBlocks) fromAppMessage(msgRequestIBDBlocks *appmessage.MsgRequestIBDBlocks) error {
|
||||
|
@ -13,6 +13,8 @@ func (x *KaspadMessage_RequestIBDRootUTXOSetAndBlock) toAppMessage() (appmessage
|
||||
|
||||
func (x *KaspadMessage_RequestIBDRootUTXOSetAndBlock) fromAppMessage(
|
||||
msgRequestIBDRootUTXOSetAndBlock *appmessage.MsgRequestIBDRootUTXOSetAndBlock) error {
|
||||
|
||||
x.RequestIBDRootUTXOSetAndBlock = &RequestIBDRootUTXOSetAndBlockMessage{}
|
||||
x.RequestIBDRootUTXOSetAndBlock.IbdRoot = domainHashToProto(msgRequestIBDRootUTXOSetAndBlock.IBDRoot)
|
||||
return nil
|
||||
}
|
||||
|
@ -14,8 +14,7 @@ func (x *KaspadMessage_SubmitBlockRequest) toAppMessage() (appmessage.Message, e
|
||||
}
|
||||
|
||||
func (x *KaspadMessage_SubmitBlockRequest) fromAppMessage(message *appmessage.SubmitBlockRequestMessage) error {
|
||||
|
||||
x.SubmitBlockRequest = &SubmitBlockRequestMessage{}
|
||||
x.SubmitBlockRequest = &SubmitBlockRequestMessage{Block: &BlockMessage{}}
|
||||
return x.SubmitBlockRequest.Block.fromAppMessage(message.Block)
|
||||
}
|
||||
|
||||
|
@ -12,9 +12,9 @@ func (c *RPCClient) GetConnectedPeerInfo() (*appmessage.GetConnectedPeerInfoResp
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
getMempoolEntryResponse := response.(*appmessage.GetConnectedPeerInfoResponseMessage)
|
||||
if getMempoolEntryResponse.Error != nil {
|
||||
return nil, c.convertRPCError(getMempoolEntryResponse.Error)
|
||||
getConnectedPeerInfoResponse := response.(*appmessage.GetConnectedPeerInfoResponseMessage)
|
||||
if getConnectedPeerInfoResponse.Error != nil {
|
||||
return nil, c.convertRPCError(getConnectedPeerInfoResponse.Error)
|
||||
}
|
||||
return getMempoolEntryResponse, nil
|
||||
return getConnectedPeerInfoResponse, nil
|
||||
}
|
||||
|
61
testing/integration/64_incoming_connections_test.go
Normal file
61
testing/integration/64_incoming_connections_test.go
Normal file
@ -0,0 +1,61 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/locks"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
)
|
||||
|
||||
func Test64IncomingConnections(t *testing.T) {
|
||||
// Much more than 64 hosts creates a risk of running out of available file descriptors for leveldb
|
||||
const numBullies = 64
|
||||
harnessesParams := make([]*harnessParams, numBullies+1)
|
||||
for i := 0; i < numBullies+1; i++ {
|
||||
harnessesParams[i] = &harnessParams{
|
||||
p2pAddress: fmt.Sprintf("127.0.0.1:%d", 12345+i),
|
||||
rpcAddress: fmt.Sprintf("127.0.0.1:%d", 22345+i),
|
||||
miningAddress: miningAddress1,
|
||||
miningAddressPrivateKey: miningAddress1PrivateKey,
|
||||
}
|
||||
}
|
||||
|
||||
appHarnesses, teardown := setupHarnesses(t, harnessesParams)
|
||||
defer teardown()
|
||||
|
||||
victim, bullies := appHarnesses[0], appHarnesses[1:]
|
||||
|
||||
for _, bully := range bullies {
|
||||
connect(t, victim, bully)
|
||||
}
|
||||
|
||||
blockAddedWG := sync.WaitGroup{}
|
||||
blockAddedWG.Add(numBullies)
|
||||
for _, bully := range bullies {
|
||||
blockAdded := false
|
||||
onBlockAdded := func(_ *appmessage.BlockAddedNotificationMessage) {
|
||||
if blockAdded {
|
||||
t.Fatalf("Single bully reported block added twice")
|
||||
}
|
||||
blockAdded = true
|
||||
blockAddedWG.Done()
|
||||
}
|
||||
|
||||
err := bully.rpcClient.RegisterForBlockAddedNotifications(onBlockAdded)
|
||||
if err != nil {
|
||||
t.Fatalf("Error from RegisterForBlockAddedNotifications: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
_ = mineNextBlock(t, victim)
|
||||
|
||||
select {
|
||||
case <-time.After(defaultTimeout):
|
||||
t.Fatalf("Timeout waiting for block added notification from the bullies")
|
||||
case <-locks.ReceiveFromChanWhenDone(func() { blockAddedWG.Wait() }):
|
||||
}
|
||||
}
|
33
testing/integration/address_exchange_test.go
Normal file
33
testing/integration/address_exchange_test.go
Normal file
@ -0,0 +1,33 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAddressExchange(t *testing.T) {
|
||||
appHarness1, appHarness2, appHarness3, teardown := standardSetup(t)
|
||||
defer teardown()
|
||||
|
||||
testAddress := "1.2.3.4:6789"
|
||||
err := addressmanager.AddAddressByIP(appHarness1.app.AddressManager(), testAddress, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Error adding address to addressManager: %+v", err)
|
||||
}
|
||||
|
||||
connect(t, appHarness1, appHarness2)
|
||||
connect(t, appHarness2, appHarness3)
|
||||
|
||||
peerAddresses, err := appHarness3.rpcClient.GetPeerAddresses()
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting peer addresses: %+v", err)
|
||||
}
|
||||
|
||||
for _, peerAddress := range peerAddresses.Addresses {
|
||||
if peerAddress.Addr == testAddress {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t.Errorf("Didn't find testAddress in list of addresses of appHarness3")
|
||||
}
|
55
testing/integration/basic_sync_test.go
Normal file
55
testing/integration/basic_sync_test.go
Normal file
@ -0,0 +1,55 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
)
|
||||
|
||||
func TestIntegrationBasicSync(t *testing.T) {
|
||||
appHarness1, appHarness2, appHarness3, teardown := standardSetup(t)
|
||||
defer teardown()
|
||||
|
||||
// Connect nodes in chain: 1 <--> 2 <--> 3
|
||||
// So that node 3 doesn't directly get blocks from node 1
|
||||
connect(t, appHarness1, appHarness2)
|
||||
connect(t, appHarness2, appHarness3)
|
||||
|
||||
app2OnBlockAddedChan := make(chan *appmessage.MsgBlockHeader)
|
||||
setOnBlockAddedHandler(t, appHarness2, func(notification *appmessage.BlockAddedNotificationMessage) {
|
||||
app2OnBlockAddedChan <- ¬ification.Block.Header
|
||||
})
|
||||
|
||||
app3OnBlockAddedChan := make(chan *appmessage.MsgBlockHeader)
|
||||
setOnBlockAddedHandler(t, appHarness3, func(notification *appmessage.BlockAddedNotificationMessage) {
|
||||
app3OnBlockAddedChan <- ¬ification.Block.Header
|
||||
})
|
||||
|
||||
block := mineNextBlock(t, appHarness1)
|
||||
|
||||
var header *appmessage.MsgBlockHeader
|
||||
select {
|
||||
case header = <-app2OnBlockAddedChan:
|
||||
case <-time.After(defaultTimeout):
|
||||
t.Fatalf("Timeout waiting for block added notification on node directly connected to miner")
|
||||
}
|
||||
|
||||
blockHash := consensusserialization.BlockHash(block)
|
||||
if *header.BlockHash() != *blockHash {
|
||||
t.Errorf("Expected block with hash '%s', but got '%s'", blockHash, header.BlockHash())
|
||||
}
|
||||
|
||||
select {
|
||||
case header = <-app3OnBlockAddedChan:
|
||||
case <-time.After(defaultTimeout):
|
||||
t.Fatalf("Timeout waiting for block added notification on node indirectly connected to miner")
|
||||
}
|
||||
|
||||
blockHash = consensusserialization.BlockHash(block)
|
||||
if *header.BlockHash() != *blockHash {
|
||||
t.Errorf("Expected block with hash '%s', but got '%s'", blockHash, header.BlockHash())
|
||||
}
|
||||
}
|
59
testing/integration/config_test.go
Normal file
59
testing/integration/config_test.go
Normal file
@ -0,0 +1,59 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
)
|
||||
|
||||
const (
|
||||
p2pAddress1 = "127.0.0.1:54321"
|
||||
p2pAddress2 = "127.0.0.1:54322"
|
||||
p2pAddress3 = "127.0.0.1:54323"
|
||||
|
||||
rpcAddress1 = "127.0.0.1:12345"
|
||||
rpcAddress2 = "127.0.0.1:12346"
|
||||
rpcAddress3 = "127.0.0.1:12347"
|
||||
|
||||
miningAddress1 = "kaspasim:qzmdkk8ay8sgvp8cnwts8gtdylz9j7572slwdh85qv"
|
||||
miningAddress1PrivateKey = "be9e9884f03e687166479e22d21b064db7903d69b5a46878aae66521c01a6094"
|
||||
|
||||
miningAddress2 = "kaspasim:qze20hwkc4lzq37jt0hrym5emlsxxs8j3qyf3y4ghs"
|
||||
miningAddress2PrivateKey = "98bd8d8e1f7078abefd017839f83edd0e3c8226ed4989e4d7a8bceb5935de193"
|
||||
|
||||
miningAddress3 = "kaspasim:qretklduvhg5h2aj7jd8w4heq7pvtkpv9q6w4sqfen"
|
||||
miningAddress3PrivateKey = "eb0af684f2cdbb4ed2d85fbfe0b7f40654a7777fb2c47f142ffb5543b594d1e4"
|
||||
|
||||
defaultTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
func setConfig(t *testing.T, harness *appHarness) {
|
||||
harness.config = commonConfig()
|
||||
harness.config.DataDir = randomDirectory(t)
|
||||
harness.config.Listeners = []string{harness.p2pAddress}
|
||||
harness.config.RPCListeners = []string{harness.rpcAddress}
|
||||
}
|
||||
|
||||
func commonConfig() *config.Config {
|
||||
commonConfig := config.DefaultConfig()
|
||||
|
||||
*commonConfig.ActiveNetParams = dagconfig.SimnetParams // Copy so that we can make changes safely
|
||||
commonConfig.ActiveNetParams.BlockCoinbaseMaturity = 10
|
||||
commonConfig.TargetOutboundPeers = 0
|
||||
commonConfig.DisableDNSSeed = true
|
||||
commonConfig.Simnet = true
|
||||
|
||||
return commonConfig
|
||||
}
|
||||
|
||||
func randomDirectory(t *testing.T) string {
|
||||
dir, err := ioutil.TempDir("", "integration-test")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory for test: %+v", err)
|
||||
}
|
||||
|
||||
return dir
|
||||
}
|
70
testing/integration/connect_test.go
Normal file
70
testing/integration/connect_test.go
Normal file
@ -0,0 +1,70 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func connect(t *testing.T, incoming, outgoing *appHarness) {
|
||||
err := outgoing.rpcClient.AddPeer(incoming.p2pAddress, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Error connecting the nodes")
|
||||
}
|
||||
|
||||
onConnectedChan := make(chan struct{})
|
||||
abortConnectionChan := make(chan struct{})
|
||||
defer close(abortConnectionChan)
|
||||
|
||||
spawn("integration.connect-Wait for connection", func() {
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
if isConnected(t, incoming, outgoing) {
|
||||
close(onConnectedChan)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-abortConnectionChan:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
select {
|
||||
case <-onConnectedChan:
|
||||
case <-time.After(defaultTimeout):
|
||||
t.Fatalf("Timed out waiting for the apps to connect")
|
||||
}
|
||||
}
|
||||
func isConnected(t *testing.T, appHarness1, appHarness2 *appHarness) bool {
|
||||
connectedPeerInfo1, err := appHarness1.rpcClient.GetConnectedPeerInfo()
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting connected peer info for app1: %+v", err)
|
||||
}
|
||||
connectedPeerInfo2, err := appHarness2.rpcClient.GetConnectedPeerInfo()
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting connected peer info for app2: %+v", err)
|
||||
}
|
||||
|
||||
var incomingConnected, outgoingConnected bool
|
||||
app1ID, app2ID := appHarness1.app.P2PNodeID().String(), appHarness2.app.P2PNodeID().String()
|
||||
|
||||
for _, connectedPeer := range connectedPeerInfo1.Infos {
|
||||
if connectedPeer.ID == app2ID {
|
||||
incomingConnected = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, connectedPeer := range connectedPeerInfo2.Infos {
|
||||
if connectedPeer.ID == app1ID {
|
||||
outgoingConnected = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return incomingConnected && outgoingConnected
|
||||
}
|
51
testing/integration/ibd_test.go
Normal file
51
testing/integration/ibd_test.go
Normal file
@ -0,0 +1,51 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/locks"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
)
|
||||
|
||||
func TestIBD(t *testing.T) {
|
||||
const numBlocks = 100
|
||||
|
||||
syncer, syncee, _, teardown := standardSetup(t)
|
||||
defer teardown()
|
||||
|
||||
for i := 0; i < numBlocks; i++ {
|
||||
mineNextBlock(t, syncer)
|
||||
}
|
||||
|
||||
blockAddedWG := sync.WaitGroup{}
|
||||
blockAddedWG.Add(numBlocks)
|
||||
receivedBlocks := 0
|
||||
setOnBlockAddedHandler(t, syncee, func(_ *appmessage.BlockAddedNotificationMessage) {
|
||||
receivedBlocks++
|
||||
blockAddedWG.Done()
|
||||
})
|
||||
|
||||
connect(t, syncer, syncee)
|
||||
|
||||
select {
|
||||
case <-time.After(defaultTimeout):
|
||||
t.Fatalf("Timeout waiting for IBD to finish. Received %d blocks out of %d", receivedBlocks, numBlocks)
|
||||
case <-locks.ReceiveFromChanWhenDone(func() { blockAddedWG.Wait() }):
|
||||
}
|
||||
|
||||
tip1Hash, err := syncer.rpcClient.GetSelectedTipHash()
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting tip for syncer")
|
||||
}
|
||||
tip2Hash, err := syncee.rpcClient.GetSelectedTipHash()
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting tip for syncee")
|
||||
}
|
||||
|
||||
if tip1Hash.SelectedTipHash != tip2Hash.SelectedTipHash {
|
||||
t.Errorf("Tips of syncer: '%s' and syncee '%s' are not equal", tip1Hash.SelectedTipHash, tip2Hash.SelectedTipHash)
|
||||
}
|
||||
}
|
14
testing/integration/log_test.go
Normal file
14
testing/integration/log_test.go
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Copyright (c) 2017 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.KASD)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
14
testing/integration/main_test.go
Normal file
14
testing/integration/main_test.go
Normal file
@ -0,0 +1,14 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
logger.SetLogLevels("debug")
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
46
testing/integration/mining_test.go
Normal file
46
testing/integration/mining_test.go
Normal file
@ -0,0 +1,46 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
func solveBlock(block *externalapi.DomainBlock) *externalapi.DomainBlock {
|
||||
targetDifficulty := util.CompactToBig(block.Header.Bits)
|
||||
initialNonce := rand.Uint64()
|
||||
for i := initialNonce; i != initialNonce-1; i++ {
|
||||
block.Header.Nonce = i
|
||||
hash := consensusserialization.BlockHash(block)
|
||||
if hashes.ToBig(hash).Cmp(targetDifficulty) <= 0 {
|
||||
return block
|
||||
}
|
||||
}
|
||||
|
||||
panic("Failed to solve block! This should never happen")
|
||||
}
|
||||
|
||||
func mineNextBlock(t *testing.T, harness *appHarness) *externalapi.DomainBlock {
|
||||
blockTemplate, err := harness.rpcClient.GetBlockTemplate(harness.miningAddress)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting block template: %+v", err)
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(blockTemplate.MsgBlock)
|
||||
|
||||
solveBlock(block)
|
||||
|
||||
err = harness.rpcClient.SubmitBlock(block)
|
||||
if err != nil {
|
||||
t.Fatalf("Error submitting block: %s", err)
|
||||
}
|
||||
|
||||
return block
|
||||
}
|
14
testing/integration/notifications_test.go
Normal file
14
testing/integration/notifications_test.go
Normal file
@ -0,0 +1,14 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
)
|
||||
|
||||
func setOnBlockAddedHandler(t *testing.T, harness *appHarness, handler func(notification *appmessage.BlockAddedNotificationMessage)) {
|
||||
err := harness.rpcClient.RegisterForBlockAddedNotifications(handler)
|
||||
if err != nil {
|
||||
t.Fatalf("Error from RegisterForBlockAddedNotifications: %s", err)
|
||||
}
|
||||
}
|
24
testing/integration/rpc_test.go
Normal file
24
testing/integration/rpc_test.go
Normal file
@ -0,0 +1,24 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient"
|
||||
"time"
|
||||
)
|
||||
|
||||
const rpcTimeout = 1 * time.Second
|
||||
|
||||
type testRPCClient struct {
|
||||
*rpcclient.RPCClient
|
||||
}
|
||||
|
||||
func newTestRPCClient(rpcAddress string) (*testRPCClient, error) {
|
||||
rpcClient, err := rpcclient.NewRPCClient(rpcAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpcClient.SetTimeout(rpcTimeout)
|
||||
|
||||
return &testRPCClient{
|
||||
RPCClient: rpcClient,
|
||||
}, nil
|
||||
}
|
131
testing/integration/setup_test.go
Normal file
131
testing/integration/setup_test.go
Normal file
@ -0,0 +1,131 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
|
||||
"github.com/kaspanet/kaspad/app"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
)
|
||||
|
||||
type appHarness struct {
|
||||
app *app.ComponentManager
|
||||
rpcClient *testRPCClient
|
||||
p2pAddress string
|
||||
rpcAddress string
|
||||
miningAddress string
|
||||
miningAddressPrivateKey string
|
||||
config *config.Config
|
||||
database database.Database
|
||||
}
|
||||
|
||||
type harnessParams struct {
|
||||
p2pAddress string
|
||||
rpcAddress string
|
||||
miningAddress string
|
||||
miningAddressPrivateKey string
|
||||
}
|
||||
|
||||
// setupHarness creates a single appHarness with given parameters
|
||||
func setupHarness(t *testing.T, params *harnessParams) (harness *appHarness, teardownFunc func()) {
|
||||
harness = &appHarness{
|
||||
p2pAddress: params.p2pAddress,
|
||||
rpcAddress: params.rpcAddress,
|
||||
miningAddress: params.miningAddress,
|
||||
miningAddressPrivateKey: params.miningAddressPrivateKey,
|
||||
}
|
||||
|
||||
setConfig(t, harness)
|
||||
setDatabaseContext(t, harness)
|
||||
setApp(t, harness)
|
||||
harness.app.Start()
|
||||
setRPCClient(t, harness)
|
||||
|
||||
return harness, func() {
|
||||
teardownHarness(t, harness)
|
||||
}
|
||||
}
|
||||
|
||||
// setupHarnesses creates multiple appHarnesses, according to number of parameters passed
|
||||
func setupHarnesses(t *testing.T, harnessesParams []*harnessParams) (harnesses []*appHarness, teardownFunc func()) {
|
||||
var teardowns []func()
|
||||
for _, params := range harnessesParams {
|
||||
harness, teardownFunc := setupHarness(t, params)
|
||||
harnesses = append(harnesses, harness)
|
||||
teardowns = append(teardowns, teardownFunc)
|
||||
}
|
||||
|
||||
return harnesses, func() {
|
||||
for _, teardownFunc := range teardowns {
|
||||
teardownFunc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// standardSetup creates a standard setup of 3 appHarnesses that should work for most tests
|
||||
func standardSetup(t *testing.T) (appHarness1, appHarness2, appHarness3 *appHarness, teardownFunc func()) {
|
||||
harnesses, teardown := setupHarnesses(t, []*harnessParams{
|
||||
{
|
||||
p2pAddress: p2pAddress1,
|
||||
rpcAddress: rpcAddress1,
|
||||
miningAddress: miningAddress1,
|
||||
miningAddressPrivateKey: miningAddress1PrivateKey,
|
||||
},
|
||||
{
|
||||
p2pAddress: p2pAddress2,
|
||||
rpcAddress: rpcAddress2,
|
||||
miningAddress: miningAddress2,
|
||||
miningAddressPrivateKey: miningAddress2PrivateKey,
|
||||
}, {
|
||||
p2pAddress: p2pAddress3,
|
||||
rpcAddress: rpcAddress3,
|
||||
miningAddress: miningAddress3,
|
||||
miningAddressPrivateKey: miningAddress3PrivateKey,
|
||||
},
|
||||
})
|
||||
|
||||
return harnesses[0], harnesses[1], harnesses[2], teardown
|
||||
}
|
||||
|
||||
func setRPCClient(t *testing.T, harness *appHarness) {
|
||||
var err error
|
||||
harness.rpcClient, err = newTestRPCClient(harness.rpcAddress)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting RPC client %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func teardownHarness(t *testing.T, harness *appHarness) {
|
||||
harness.rpcClient.Close()
|
||||
harness.app.Stop()
|
||||
|
||||
err := harness.database.Close()
|
||||
if err != nil {
|
||||
t.Errorf("Error closing database context: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func setApp(t *testing.T, harness *appHarness) {
|
||||
var err error
|
||||
harness.app, err = app.NewComponentManager(harness.config, harness.database, make(chan struct{}))
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating app: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func setDatabaseContext(t *testing.T, harness *appHarness) {
|
||||
var err error
|
||||
harness.database, err = openDB(harness.config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error openning database: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func openDB(cfg *config.Config) (database.Database, error) {
|
||||
dbPath := filepath.Join(cfg.DataDir, "db")
|
||||
return ldb.NewLevelDB(dbPath)
|
||||
}
|
125
testing/integration/tx_relay_test.go
Normal file
125
testing/integration/tx_relay_test.go
Normal file
@ -0,0 +1,125 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionhelper"
|
||||
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
func TestTxRelay(t *testing.T) {
|
||||
payer, mediator, payee, teardown := standardSetup(t)
|
||||
defer teardown()
|
||||
|
||||
// Connect nodes in chain: payer <--> mediator <--> payee
|
||||
// So that payee doesn't directly get transactions from payer
|
||||
connect(t, payer, mediator)
|
||||
connect(t, mediator, payee)
|
||||
|
||||
payeeBlockAddedChan := make(chan *appmessage.MsgBlockHeader)
|
||||
setOnBlockAddedHandler(t, payee, func(notification *appmessage.BlockAddedNotificationMessage) {
|
||||
payeeBlockAddedChan <- ¬ification.Block.Header
|
||||
})
|
||||
// skip the first block because it's paying to genesis script
|
||||
mineNextBlock(t, payer)
|
||||
waitForPayeeToReceiveBlock(t, payeeBlockAddedChan)
|
||||
// use the second block to get money to pay with
|
||||
secondBlock := mineNextBlock(t, payer)
|
||||
waitForPayeeToReceiveBlock(t, payeeBlockAddedChan)
|
||||
|
||||
// Mine BlockCoinbaseMaturity more blocks for our money to mature
|
||||
for i := uint64(0); i < payer.config.ActiveNetParams.BlockCoinbaseMaturity; i++ {
|
||||
mineNextBlock(t, payer)
|
||||
waitForPayeeToReceiveBlock(t, payeeBlockAddedChan)
|
||||
}
|
||||
|
||||
tx := generateTx(t, secondBlock.Transactions[transactionhelper.CoinbaseTransactionIndex], payer, payee)
|
||||
response, err := payer.rpcClient.SubmitTransaction(tx)
|
||||
if err != nil {
|
||||
t.Fatalf("Error submitting transaction: %+v", err)
|
||||
}
|
||||
txID := response.TxID
|
||||
|
||||
txAddedToMempoolChan := make(chan struct{})
|
||||
|
||||
spawn("TestTxRelay-WaitForTransactionPropagation", func() {
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
_, err := payee.rpcClient.GetMempoolEntry(txID)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "transaction is not in the pool") {
|
||||
continue
|
||||
}
|
||||
|
||||
t.Fatalf("Error getting mempool entry: %+v", err)
|
||||
}
|
||||
close(txAddedToMempoolChan)
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
select {
|
||||
case <-txAddedToMempoolChan:
|
||||
case <-time.After(defaultTimeout):
|
||||
t.Fatalf("Timeout waiting for transaction to be accepted into mempool")
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPayeeToReceiveBlock(t *testing.T, payeeBlockAddedChan chan *appmessage.MsgBlockHeader) {
|
||||
select {
|
||||
case <-payeeBlockAddedChan:
|
||||
case <-time.After(defaultTimeout):
|
||||
t.Fatalf("Timeout waiting for block added")
|
||||
}
|
||||
}
|
||||
|
||||
func generateTx(t *testing.T, firstBlockCoinbase *externalapi.DomainTransaction, payer, payee *appHarness) *appmessage.MsgTx {
|
||||
txIns := make([]*appmessage.TxIn, 1)
|
||||
txIns[0] = appmessage.NewTxIn(appmessage.NewOutpoint(consensusserialization.TransactionID(firstBlockCoinbase), 0), []byte{})
|
||||
|
||||
payeeAddress, err := util.DecodeAddress(payee.miningAddress, util.Bech32PrefixKaspaSim)
|
||||
if err != nil {
|
||||
t.Fatalf("Error decoding payeeAddress: %+v", err)
|
||||
}
|
||||
toScript, err := txscript.PayToAddrScript(payeeAddress)
|
||||
if err != nil {
|
||||
t.Fatalf("Error generating script: %+v", err)
|
||||
}
|
||||
|
||||
txOuts := []*appmessage.TxOut{appmessage.NewTxOut(firstBlockCoinbase.Outputs[0].Value-1000, toScript)}
|
||||
|
||||
fromScript := firstBlockCoinbase.Outputs[0].ScriptPublicKey
|
||||
|
||||
tx := appmessage.NewNativeMsgTx(constants.TransactionVersion, txIns, txOuts)
|
||||
|
||||
privateKeyBytes, err := hex.DecodeString(payer.miningAddressPrivateKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Error decoding private key: %+v", err)
|
||||
}
|
||||
privateKey, err := secp256k1.DeserializePrivateKeyFromSlice(privateKeyBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error deserializing private key: %+v", err)
|
||||
}
|
||||
|
||||
signatureScript, err := txscript.SignatureScript(appmessage.MsgTxToDomainTransaction(tx), 0,
|
||||
fromScript, txscript.SigHashAll, privateKey, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing transaction: %+v", err)
|
||||
}
|
||||
tx.TxIn[0].SignatureScript = signatureScript
|
||||
|
||||
return tx
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user