mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-21 19:22:53 +00:00
Compare commits
17 Commits
v0.6.11-de
...
v0.7.1-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
22237a4a8d | ||
|
|
6ab8ada9ff | ||
|
|
9a756939d8 | ||
|
|
aea3baf897 | ||
|
|
f8d0f7f67a | ||
|
|
fed34273a1 | ||
|
|
34a1b30006 | ||
|
|
798abf2103 | ||
|
|
75e539f4d2 | ||
|
|
946e65d1c6 | ||
|
|
b8e36eacfd | ||
|
|
cd49c1dac7 | ||
|
|
86411a5ca5 | ||
|
|
1186cad9ca | ||
|
|
e66de86a82 | ||
|
|
1e08bfca9c | ||
|
|
64f5b96295 |
52
app/app.go
52
app/app.go
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/dnsseed"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
@@ -84,13 +83,13 @@ func (a *App) Stop() {
|
||||
// New returns a new App instance configured to listen on addr for the
|
||||
// kaspa network type specified by dagParams. Use start to begin accepting
|
||||
// connections from peers.
|
||||
func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{}) (*App, error) {
|
||||
func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt chan<- struct{}) (*App, error) {
|
||||
indexManager, acceptanceIndex := setupIndexes(cfg)
|
||||
|
||||
sigCache := txscript.NewSigCache(cfg.SigCacheMaxSize)
|
||||
|
||||
// Create a new block DAG instance with the appropriate configuration.
|
||||
dag, err := setupDAG(cfg, databaseContext, interrupt, sigCache, indexManager)
|
||||
dag, err := setupDAG(cfg, databaseContext, sigCache, indexManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -113,7 +112,7 @@ func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrup
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpcManager := setupRPC(cfg, txMempool, dag, sigCache, netAdapter, protocolManager, connectionManager, addressManager, acceptanceIndex)
|
||||
rpcManager := setupRPC(cfg, txMempool, dag, sigCache, netAdapter, protocolManager, connectionManager, addressManager, acceptanceIndex, interrupt)
|
||||
|
||||
return &App{
|
||||
cfg: cfg,
|
||||
@@ -123,6 +122,7 @@ func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrup
|
||||
netAdapter: netAdapter,
|
||||
addressManager: addressManager,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func setupRPC(
|
||||
@@ -134,10 +134,12 @@ func setupRPC(
|
||||
protocolManager *protocol.Manager,
|
||||
connectionManager *connmanager.ConnectionManager,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
acceptanceIndex *indexers.AcceptanceIndex) *rpc.Manager {
|
||||
acceptanceIndex *indexers.AcceptanceIndex,
|
||||
shutDownChan chan<- struct{},
|
||||
) *rpc.Manager {
|
||||
|
||||
blockTemplateGenerator := mining.NewBlkTmplGenerator(&mining.Policy{BlockMaxMass: cfg.BlockMaxMass}, txMempool, dag, sigCache)
|
||||
rpcManager := rpc.NewManager(cfg, netAdapter, dag, protocolManager, connectionManager, blockTemplateGenerator, txMempool, addressManager, acceptanceIndex)
|
||||
rpcManager := rpc.NewManager(cfg, netAdapter, dag, protocolManager, connectionManager, blockTemplateGenerator, txMempool, addressManager, acceptanceIndex, shutDownChan)
|
||||
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
|
||||
protocolManager.SetOnTransactionAddedToMempoolHandler(rpcManager.NotifyTransactionAddedToMempool)
|
||||
dag.Subscribe(func(notification *blockdag.Notification) {
|
||||
@@ -152,13 +154,29 @@ func setupRPC(
|
||||
func handleBlockDAGNotifications(notification *blockdag.Notification,
|
||||
acceptanceIndex *indexers.AcceptanceIndex, rpcManager *rpc.Manager) error {
|
||||
|
||||
if notification.Type == blockdag.NTChainChanged && acceptanceIndex != nil {
|
||||
switch notification.Type {
|
||||
case blockdag.NTChainChanged:
|
||||
if acceptanceIndex == nil {
|
||||
return nil
|
||||
}
|
||||
chainChangedNotificationData := notification.Data.(*blockdag.ChainChangedNotificationData)
|
||||
err := rpcManager.NotifyChainChanged(chainChangedNotificationData.RemovedChainBlockHashes,
|
||||
chainChangedNotificationData.AddedChainBlockHashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case blockdag.NTFinalityConflict:
|
||||
finalityConflictNotificationData := notification.Data.(*blockdag.FinalityConflictNotificationData)
|
||||
err := rpcManager.NotifyFinalityConflict(finalityConflictNotificationData.ViolatingBlockHash.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case blockdag.NTFinalityConflictResolved:
|
||||
finalityConflictResolvedNotificationData := notification.Data.(*blockdag.FinalityConflictResolvedNotificationData)
|
||||
err := rpcManager.NotifyFinalityConflictResolved(finalityConflictResolvedNotificationData.FinalityBlockHash.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -181,17 +199,17 @@ func (a *App) maybeSeedFromDNS() {
|
||||
})
|
||||
}
|
||||
}
|
||||
func setupDAG(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{},
|
||||
func setupDAG(cfg *config.Config, databaseContext *dbaccess.DatabaseContext,
|
||||
sigCache *txscript.SigCache, indexManager blockdag.IndexManager) (*blockdag.BlockDAG, error) {
|
||||
|
||||
dag, err := blockdag.New(&blockdag.Config{
|
||||
Interrupt: interrupt,
|
||||
DatabaseContext: databaseContext,
|
||||
DAGParams: cfg.NetParams(),
|
||||
TimeSource: blockdag.NewTimeSource(),
|
||||
SigCache: sigCache,
|
||||
IndexManager: indexManager,
|
||||
SubnetworkID: cfg.SubnetworkID,
|
||||
DatabaseContext: databaseContext,
|
||||
DAGParams: cfg.NetParams(),
|
||||
TimeSource: blockdag.NewTimeSource(),
|
||||
SigCache: sigCache,
|
||||
IndexManager: indexManager,
|
||||
SubnetworkID: cfg.SubnetworkID,
|
||||
MaxUTXOCacheSize: cfg.MaxUTXOCacheSize,
|
||||
})
|
||||
return dag, err
|
||||
}
|
||||
@@ -223,9 +241,7 @@ func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript
|
||||
MinRelayTxFee: cfg.MinRelayTxFee,
|
||||
MaxTxVersion: 1,
|
||||
},
|
||||
CalcSequenceLockNoLock: func(tx *util.Tx, utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
|
||||
return dag.CalcSequenceLockNoLock(tx, utxoSet)
|
||||
},
|
||||
CalcTxSequenceLockFromReferencedUTXOEntries: dag.CalcTxSequenceLockFromReferencedUTXOEntries,
|
||||
SigCache: sigCache,
|
||||
DAG: dag,
|
||||
}
|
||||
|
||||
@@ -375,30 +375,31 @@ func ReadVarInt(r io.Reader) (uint64, error) {
|
||||
// on its value.
|
||||
func WriteVarInt(w io.Writer, val uint64) error {
|
||||
if val < 0xfd {
|
||||
return binaryserializer.PutUint8(w, uint8(val))
|
||||
_, err := w.Write([]byte{uint8(val)})
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if val <= math.MaxUint16 {
|
||||
err := binaryserializer.PutUint8(w, 0xfd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return binaryserializer.PutUint16(w, littleEndian, uint16(val))
|
||||
var buf [3]byte
|
||||
buf[0] = 0xfd
|
||||
littleEndian.PutUint16(buf[1:], uint16(val))
|
||||
_, err := w.Write(buf[:])
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if val <= math.MaxUint32 {
|
||||
err := binaryserializer.PutUint8(w, 0xfe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return binaryserializer.PutUint32(w, littleEndian, uint32(val))
|
||||
var buf [5]byte
|
||||
buf[0] = 0xfe
|
||||
littleEndian.PutUint32(buf[1:], uint32(val))
|
||||
_, err := w.Write(buf[:])
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
err := binaryserializer.PutUint8(w, 0xff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return binaryserializer.PutUint64(w, littleEndian, val)
|
||||
var buf [9]byte
|
||||
buf[0] = 0xff
|
||||
littleEndian.PutUint64(buf[1:], val)
|
||||
_, err := w.Write(buf[:])
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// VarIntSerializeSize returns the number of bytes it would take to serialize
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
// MaxMessagePayload is the maximum bytes a message can be regardless of other
|
||||
// individual limits imposed by messages themselves.
|
||||
const MaxMessagePayload = (1024 * 1024 * 32) // 32MB
|
||||
const MaxMessagePayload = 1024 * 1024 * 32 // 32MB
|
||||
|
||||
// MessageCommand is a number in the header of a message that represents its type.
|
||||
type MessageCommand uint32
|
||||
@@ -91,6 +91,16 @@ const (
|
||||
CmdGetBlockCountResponseMessage
|
||||
CmdGetBlockDAGInfoRequestMessage
|
||||
CmdGetBlockDAGInfoResponseMessage
|
||||
CmdResolveFinalityConflictRequestMessage
|
||||
CmdResolveFinalityConflictResponseMessage
|
||||
CmdNotifyFinalityConflictsRequestMessage
|
||||
CmdNotifyFinalityConflictsResponseMessage
|
||||
CmdFinalityConflictNotificationMessage
|
||||
CmdFinalityConflictResolvedNotificationMessage
|
||||
CmdGetMempoolEntriesRequestMessage
|
||||
CmdGetMempoolEntriesResponseMessage
|
||||
CmdShutDownRequestMessage
|
||||
CmdShutDownResponseMessage
|
||||
)
|
||||
|
||||
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
||||
@@ -121,43 +131,51 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
||||
|
||||
// RPCMessageCommandToString maps all MessageCommands to their string representation
|
||||
var RPCMessageCommandToString = map[MessageCommand]string{
|
||||
CmdGetCurrentNetworkRequestMessage: "GetCurrentNetworkRequest",
|
||||
CmdGetCurrentNetworkResponseMessage: "GetCurrentNetworkResponse",
|
||||
CmdSubmitBlockRequestMessage: "SubmitBlockRequest",
|
||||
CmdSubmitBlockResponseMessage: "SubmitBlockResponse",
|
||||
CmdGetBlockTemplateRequestMessage: "GetBlockTemplateRequest",
|
||||
CmdGetBlockTemplateResponseMessage: "GetBlockTemplateResponse",
|
||||
CmdGetBlockTemplateTransactionMessage: "CmdGetBlockTemplateTransaction",
|
||||
CmdNotifyBlockAddedRequestMessage: "NotifyBlockAddedRequest",
|
||||
CmdNotifyBlockAddedResponseMessage: "NotifyBlockAddedResponse",
|
||||
CmdBlockAddedNotificationMessage: "BlockAddedNotification",
|
||||
CmdGetPeerAddressesRequestMessage: "GetPeerAddressesRequest",
|
||||
CmdGetPeerAddressesResponseMessage: "GetPeerAddressesResponse",
|
||||
CmdGetSelectedTipHashRequestMessage: "GetSelectedTipHashRequest",
|
||||
CmdGetSelectedTipHashResponseMessage: "GetSelectedTipHashResponse",
|
||||
CmdGetMempoolEntryRequestMessage: "GetMempoolEntryRequest",
|
||||
CmdGetMempoolEntryResponseMessage: "GetMempoolEntryResponse",
|
||||
CmdGetConnectedPeerInfoRequestMessage: "GetConnectedPeerInfoRequest",
|
||||
CmdGetConnectedPeerInfoResponseMessage: "GetConnectedPeerInfoResponse",
|
||||
CmdAddPeerRequestMessage: "AddPeerRequest",
|
||||
CmdAddPeerResponseMessage: "AddPeerResponse",
|
||||
CmdSubmitTransactionRequestMessage: "SubmitTransactionRequest",
|
||||
CmdSubmitTransactionResponseMessage: "SubmitTransactionResponse",
|
||||
CmdNotifyChainChangedRequestMessage: "NotifyChainChangedRequest",
|
||||
CmdNotifyChainChangedResponseMessage: "NotifyChainChangedResponse",
|
||||
CmdChainChangedNotificationMessage: "ChainChangedNotification",
|
||||
CmdGetBlockRequestMessage: "GetBlockRequest",
|
||||
CmdGetBlockResponseMessage: "GetBlockResponse",
|
||||
CmdGetSubnetworkRequestMessage: "GetSubnetworkRequest",
|
||||
CmdGetSubnetworkResponseMessage: "GetSubnetworkResponse",
|
||||
CmdGetChainFromBlockRequestMessage: "GetChainFromBlockRequest",
|
||||
CmdGetChainFromBlockResponseMessage: "GetChainFromBlockResponse",
|
||||
CmdGetBlocksRequestMessage: "GetBlocksRequest",
|
||||
CmdGetBlocksResponseMessage: "GetBlocksResponse",
|
||||
CmdGetBlockCountRequestMessage: "GetBlockCountRequest",
|
||||
CmdGetBlockCountResponseMessage: "GetBlockCountResponse",
|
||||
CmdGetBlockDAGInfoRequestMessage: "GetBlockDAGInfoRequest",
|
||||
CmdGetBlockDAGInfoResponseMessage: "GetBlockDAGInfoResponse",
|
||||
CmdGetCurrentNetworkRequestMessage: "GetCurrentNetworkRequest",
|
||||
CmdGetCurrentNetworkResponseMessage: "GetCurrentNetworkResponse",
|
||||
CmdSubmitBlockRequestMessage: "SubmitBlockRequest",
|
||||
CmdSubmitBlockResponseMessage: "SubmitBlockResponse",
|
||||
CmdGetBlockTemplateRequestMessage: "GetBlockTemplateRequest",
|
||||
CmdGetBlockTemplateResponseMessage: "GetBlockTemplateResponse",
|
||||
CmdGetBlockTemplateTransactionMessage: "CmdGetBlockTemplateTransaction",
|
||||
CmdNotifyBlockAddedRequestMessage: "NotifyBlockAddedRequest",
|
||||
CmdNotifyBlockAddedResponseMessage: "NotifyBlockAddedResponse",
|
||||
CmdBlockAddedNotificationMessage: "BlockAddedNotification",
|
||||
CmdGetPeerAddressesRequestMessage: "GetPeerAddressesRequest",
|
||||
CmdGetPeerAddressesResponseMessage: "GetPeerAddressesResponse",
|
||||
CmdGetSelectedTipHashRequestMessage: "GetSelectedTipHashRequest",
|
||||
CmdGetSelectedTipHashResponseMessage: "GetSelectedTipHashResponse",
|
||||
CmdGetMempoolEntryRequestMessage: "GetMempoolEntryRequest",
|
||||
CmdGetMempoolEntryResponseMessage: "GetMempoolEntryResponse",
|
||||
CmdGetConnectedPeerInfoRequestMessage: "GetConnectedPeerInfoRequest",
|
||||
CmdGetConnectedPeerInfoResponseMessage: "GetConnectedPeerInfoResponse",
|
||||
CmdAddPeerRequestMessage: "AddPeerRequest",
|
||||
CmdAddPeerResponseMessage: "AddPeerResponse",
|
||||
CmdSubmitTransactionRequestMessage: "SubmitTransactionRequest",
|
||||
CmdSubmitTransactionResponseMessage: "SubmitTransactionResponse",
|
||||
CmdNotifyChainChangedRequestMessage: "NotifyChainChangedRequest",
|
||||
CmdNotifyChainChangedResponseMessage: "NotifyChainChangedResponse",
|
||||
CmdChainChangedNotificationMessage: "ChainChangedNotification",
|
||||
CmdGetBlockRequestMessage: "GetBlockRequest",
|
||||
CmdGetBlockResponseMessage: "GetBlockResponse",
|
||||
CmdGetSubnetworkRequestMessage: "GetSubnetworkRequest",
|
||||
CmdGetSubnetworkResponseMessage: "GetSubnetworkResponse",
|
||||
CmdGetChainFromBlockRequestMessage: "GetChainFromBlockRequest",
|
||||
CmdGetChainFromBlockResponseMessage: "GetChainFromBlockResponse",
|
||||
CmdGetBlocksRequestMessage: "GetBlocksRequest",
|
||||
CmdGetBlocksResponseMessage: "GetBlocksResponse",
|
||||
CmdGetBlockCountRequestMessage: "GetBlockCountRequest",
|
||||
CmdGetBlockCountResponseMessage: "GetBlockCountResponse",
|
||||
CmdGetBlockDAGInfoRequestMessage: "GetBlockDAGInfoRequest",
|
||||
CmdGetBlockDAGInfoResponseMessage: "GetBlockDAGInfoResponse",
|
||||
CmdResolveFinalityConflictRequestMessage: "ResolveFinalityConflictRequest",
|
||||
CmdResolveFinalityConflictResponseMessage: "ResolveFinalityConflictResponse",
|
||||
CmdNotifyFinalityConflictsRequestMessage: "NotifyFinalityConflictsRequest",
|
||||
CmdNotifyFinalityConflictsResponseMessage: "NotifyFinalityConflictsResponse",
|
||||
CmdFinalityConflictNotificationMessage: "FinalityConflictNotification",
|
||||
CmdFinalityConflictResolvedNotificationMessage: "FinalityConflictResolvedNotification",
|
||||
CmdGetMempoolEntriesRequestMessage: "GetMempoolEntriesRequestMessage",
|
||||
CmdGetMempoolEntriesResponseMessage: "GetMempoolEntriesResponseMessage",
|
||||
}
|
||||
|
||||
// Message is an interface that describes a kaspa message. A type that
|
||||
|
||||
@@ -6,10 +6,11 @@ package appmessage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"math"
|
||||
)
|
||||
|
||||
// BaseBlockHeaderPayload is the base number of bytes a block header can be,
|
||||
@@ -60,7 +61,11 @@ type BlockHeader struct {
|
||||
|
||||
// NumParentBlocks return the number of entries in ParentHashes
|
||||
func (h *BlockHeader) NumParentBlocks() byte {
|
||||
return byte(len(h.ParentHashes))
|
||||
numParents := len(h.ParentHashes)
|
||||
if numParents > math.MaxUint8 {
|
||||
panic(errors.Errorf("number of parents is %d, which is more than one byte can fit", numParents))
|
||||
}
|
||||
return byte(numParents)
|
||||
}
|
||||
|
||||
// BlockHash computes the block identifier hash for the given block header.
|
||||
|
||||
@@ -21,15 +21,18 @@ import (
|
||||
// backing array multiple times.
|
||||
const defaultTransactionAlloc = 2048
|
||||
|
||||
// MaxMassPerBlock is the maximum total transaction mass a block may have.
|
||||
const MaxMassPerBlock = 10000000
|
||||
// MaxMassAcceptedByBlock is the maximum total transaction mass a block may accept.
|
||||
const MaxMassAcceptedByBlock = 10000000
|
||||
|
||||
// MaxMassPerTx is the maximum total mass a transaction may have.
|
||||
const MaxMassPerTx = MaxMassPerBlock / 2
|
||||
const MaxMassPerTx = MaxMassAcceptedByBlock / 2
|
||||
|
||||
// MaxTxPerBlock is the maximum number of transactions that could
|
||||
// possibly fit into a block.
|
||||
const MaxTxPerBlock = (MaxMassPerBlock / minTxPayload) + 1
|
||||
const MaxTxPerBlock = (MaxMassAcceptedByBlock / minTxPayload) + 1
|
||||
|
||||
// MaxBlockParents is the maximum allowed number of parents for block.
|
||||
const MaxBlockParents = 10
|
||||
|
||||
// TxLoc holds locator data for the offset and length of where a transaction is
|
||||
// located within a MsgBlock data buffer.
|
||||
|
||||
@@ -20,11 +20,12 @@ func NewGetBlockDAGInfoRequestMessage() *GetBlockDAGInfoRequestMessage {
|
||||
// its respective RPC message
|
||||
type GetBlockDAGInfoResponseMessage struct {
|
||||
baseMessage
|
||||
NetworkName string
|
||||
BlockCount uint64
|
||||
TipHashes []string
|
||||
Difficulty float64
|
||||
PastMedianTime int64
|
||||
NetworkName string
|
||||
BlockCount uint64
|
||||
TipHashes []string
|
||||
VirtualParentHashes []string
|
||||
Difficulty float64
|
||||
PastMedianTime int64
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
38
app/appmessage/rpc_get_mempool_entries.go
Normal file
38
app/appmessage/rpc_get_mempool_entries.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package appmessage
|
||||
|
||||
// GetMempoolEntriesRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetMempoolEntriesRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetMempoolEntriesRequestMessage) Command() MessageCommand {
|
||||
return CmdGetMempoolEntriesRequestMessage
|
||||
}
|
||||
|
||||
// NewGetMempoolEntriesRequestMessage returns a instance of the message
|
||||
func NewGetMempoolEntriesRequestMessage() *GetMempoolEntriesRequestMessage {
|
||||
return &GetMempoolEntriesRequestMessage{}
|
||||
}
|
||||
|
||||
// GetMempoolEntriesResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetMempoolEntriesResponseMessage struct {
|
||||
baseMessage
|
||||
Entries []*MempoolEntry
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetMempoolEntriesResponseMessage) Command() MessageCommand {
|
||||
return CmdGetMempoolEntriesResponseMessage
|
||||
}
|
||||
|
||||
// NewGetMempoolEntriesResponseMessage returns a instance of the message
|
||||
func NewGetMempoolEntriesResponseMessage(entries []*MempoolEntry) *GetMempoolEntriesResponseMessage {
|
||||
return &GetMempoolEntriesResponseMessage{
|
||||
Entries: entries,
|
||||
}
|
||||
}
|
||||
@@ -21,15 +21,28 @@ func NewGetMempoolEntryRequestMessage(txID string) *GetMempoolEntryRequestMessag
|
||||
// its respective RPC message
|
||||
type GetMempoolEntryResponseMessage struct {
|
||||
baseMessage
|
||||
Entry *MempoolEntry
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// MempoolEntry represents a transaction in the mempool.
|
||||
type MempoolEntry struct {
|
||||
Fee uint64
|
||||
TransactionVerboseData *TransactionVerboseData
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand {
|
||||
return CmdGetMempoolEntryResponseMessage
|
||||
}
|
||||
|
||||
// NewGetMempoolEntryResponseMessage returns a instance of the message
|
||||
func NewGetMempoolEntryResponseMessage() *GetMempoolEntryResponseMessage {
|
||||
return &GetMempoolEntryResponseMessage{}
|
||||
func NewGetMempoolEntryResponseMessage(fee uint64, transactionVerboseData *TransactionVerboseData) *GetMempoolEntryResponseMessage {
|
||||
return &GetMempoolEntryResponseMessage{
|
||||
Entry: &MempoolEntry{
|
||||
Fee: fee,
|
||||
TransactionVerboseData: transactionVerboseData,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
72
app/appmessage/rpc_notify_finality_conflicts.go
Normal file
72
app/appmessage/rpc_notify_finality_conflicts.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package appmessage
|
||||
|
||||
// NotifyFinalityConflictsRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyFinalityConflictsRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyFinalityConflictsRequestMessage) Command() MessageCommand {
|
||||
return CmdNotifyFinalityConflictsRequestMessage
|
||||
}
|
||||
|
||||
// NewNotifyFinalityConflictsRequestMessage returns a instance of the message
|
||||
func NewNotifyFinalityConflictsRequestMessage() *NotifyFinalityConflictsRequestMessage {
|
||||
return &NotifyFinalityConflictsRequestMessage{}
|
||||
}
|
||||
|
||||
// NotifyFinalityConflictsResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyFinalityConflictsResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyFinalityConflictsResponseMessage) Command() MessageCommand {
|
||||
return CmdNotifyFinalityConflictsResponseMessage
|
||||
}
|
||||
|
||||
// NewNotifyFinalityConflictsResponseMessage returns a instance of the message
|
||||
func NewNotifyFinalityConflictsResponseMessage() *NotifyFinalityConflictsResponseMessage {
|
||||
return &NotifyFinalityConflictsResponseMessage{}
|
||||
}
|
||||
|
||||
// FinalityConflictNotificationMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type FinalityConflictNotificationMessage struct {
|
||||
baseMessage
|
||||
ViolatingBlockHash string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *FinalityConflictNotificationMessage) Command() MessageCommand {
|
||||
return CmdFinalityConflictNotificationMessage
|
||||
}
|
||||
|
||||
// NewFinalityConflictNotificationMessage returns a instance of the message
|
||||
func NewFinalityConflictNotificationMessage(violatingBlockHash string) *FinalityConflictNotificationMessage {
|
||||
return &FinalityConflictNotificationMessage{
|
||||
ViolatingBlockHash: violatingBlockHash,
|
||||
}
|
||||
}
|
||||
|
||||
// FinalityConflictResolvedNotificationMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type FinalityConflictResolvedNotificationMessage struct {
|
||||
baseMessage
|
||||
FinalityBlockHash string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *FinalityConflictResolvedNotificationMessage) Command() MessageCommand {
|
||||
return CmdFinalityConflictResolvedNotificationMessage
|
||||
}
|
||||
|
||||
// NewFinalityConflictResolvedNotificationMessage returns a instance of the message
|
||||
func NewFinalityConflictResolvedNotificationMessage(finalityBlockHash string) *FinalityConflictResolvedNotificationMessage {
|
||||
return &FinalityConflictResolvedNotificationMessage{
|
||||
FinalityBlockHash: finalityBlockHash,
|
||||
}
|
||||
}
|
||||
37
app/appmessage/rpc_resolve_finality_conflict.go
Normal file
37
app/appmessage/rpc_resolve_finality_conflict.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package appmessage
|
||||
|
||||
// ResolveFinalityConflictRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type ResolveFinalityConflictRequestMessage struct {
|
||||
baseMessage
|
||||
FinalityBlockHash string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *ResolveFinalityConflictRequestMessage) Command() MessageCommand {
|
||||
return CmdResolveFinalityConflictRequestMessage
|
||||
}
|
||||
|
||||
// NewResolveFinalityConflictRequestMessage returns a instance of the message
|
||||
func NewResolveFinalityConflictRequestMessage(finalityBlockHash string) *ResolveFinalityConflictRequestMessage {
|
||||
return &ResolveFinalityConflictRequestMessage{
|
||||
FinalityBlockHash: finalityBlockHash,
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveFinalityConflictResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type ResolveFinalityConflictResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *ResolveFinalityConflictResponseMessage) Command() MessageCommand {
|
||||
return CmdResolveFinalityConflictResponseMessage
|
||||
}
|
||||
|
||||
// NewResolveFinalityConflictResponseMessage returns a instance of the message
|
||||
func NewResolveFinalityConflictResponseMessage() *ResolveFinalityConflictResponseMessage {
|
||||
return &ResolveFinalityConflictResponseMessage{}
|
||||
}
|
||||
34
app/appmessage/rpc_shut_down.go
Normal file
34
app/appmessage/rpc_shut_down.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package appmessage
|
||||
|
||||
// ShutDownRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type ShutDownRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *ShutDownRequestMessage) Command() MessageCommand {
|
||||
return CmdShutDownRequestMessage
|
||||
}
|
||||
|
||||
// NewShutDownRequestMessage returns a instance of the message
|
||||
func NewShutDownRequestMessage() *ShutDownRequestMessage {
|
||||
return &ShutDownRequestMessage{}
|
||||
}
|
||||
|
||||
// ShutDownResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type ShutDownResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *ShutDownResponseMessage) Command() MessageCommand {
|
||||
return CmdShutDownResponseMessage
|
||||
}
|
||||
|
||||
// NewShutDownResponseMessage returns a instance of the message
|
||||
func NewShutDownResponseMessage() *ShutDownResponseMessage {
|
||||
return &ShutDownResponseMessage{}
|
||||
}
|
||||
@@ -19,7 +19,10 @@ func (f *FlowContext) OnNewBlock(block *util.Block) error {
|
||||
return err
|
||||
}
|
||||
if f.onBlockAddedToDAGHandler != nil {
|
||||
f.onBlockAddedToDAGHandler(block)
|
||||
err := f.onBlockAddedToDAGHandler(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return f.broadcastTransactionsAfterBlockAdded(block, transactionsAcceptedToMempool)
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
|
||||
// OnBlockAddedToDAGHandler is a handler function that's triggered
|
||||
// when a block is added to the DAG
|
||||
type OnBlockAddedToDAGHandler func(block *util.Block)
|
||||
type OnBlockAddedToDAGHandler func(block *util.Block) error
|
||||
|
||||
// OnTransactionAddedToMempoolHandler is a handler function that's triggered
|
||||
// when a transaction is added to the mempool
|
||||
|
||||
@@ -61,11 +61,6 @@ func (flow *handleIBDFlow) runIBD() error {
|
||||
}
|
||||
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
if flow.DAG().IsKnownFinalizedBlock(highestSharedBlockHash) {
|
||||
return protocolerrors.Errorf(false, "cannot initiate "+
|
||||
"IBD with peer %s because the highest shared chain block (%s) is "+
|
||||
"below the finality point", flow.peer, highestSharedBlockHash)
|
||||
}
|
||||
|
||||
return flow.downloadBlocks(highestSharedBlockHash, peerSelectedTipHash)
|
||||
}
|
||||
|
||||
@@ -31,7 +31,8 @@ func NewManager(
|
||||
blockTemplateGenerator *mining.BlkTmplGenerator,
|
||||
mempool *mempool.TxPool,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
acceptanceIndex *indexers.AcceptanceIndex) *Manager {
|
||||
acceptanceIndex *indexers.AcceptanceIndex,
|
||||
shutDownChan chan<- struct{}) *Manager {
|
||||
|
||||
manager := Manager{
|
||||
context: rpccontext.NewContext(
|
||||
@@ -44,6 +45,7 @@ func NewManager(
|
||||
mempool,
|
||||
addressManager,
|
||||
acceptanceIndex,
|
||||
shutDownChan,
|
||||
),
|
||||
}
|
||||
netAdapter.SetRPCRouterInitializer(manager.routerInitializer)
|
||||
@@ -52,11 +54,11 @@ func NewManager(
|
||||
}
|
||||
|
||||
// NotifyBlockAddedToDAG notifies the manager that a block has been added to the DAG
|
||||
func (m *Manager) NotifyBlockAddedToDAG(block *util.Block) {
|
||||
func (m *Manager) NotifyBlockAddedToDAG(block *util.Block) error {
|
||||
m.context.BlockTemplateState.NotifyBlockAdded(block)
|
||||
|
||||
notification := appmessage.NewBlockAddedNotificationMessage(block.MsgBlock())
|
||||
m.context.NotificationManager.NotifyBlockAdded(notification)
|
||||
return m.context.NotificationManager.NotifyBlockAdded(notification)
|
||||
}
|
||||
|
||||
// NotifyChainChanged notifies the manager that the DAG's selected parent chain has changed
|
||||
@@ -70,8 +72,19 @@ func (m *Manager) NotifyChainChanged(removedChainBlockHashes []*daghash.Hash, ad
|
||||
removedChainBlockHashStrings[i] = removedChainBlockHash.String()
|
||||
}
|
||||
notification := appmessage.NewChainChangedNotificationMessage(removedChainBlockHashStrings, addedChainBlocks)
|
||||
m.context.NotificationManager.NotifyChainChanged(notification)
|
||||
return nil
|
||||
return m.context.NotificationManager.NotifyChainChanged(notification)
|
||||
}
|
||||
|
||||
// NotifyFinalityConflict notifies the manager that there's a finality conflict in the DAG
|
||||
func (m *Manager) NotifyFinalityConflict(violatingBlockHash string) error {
|
||||
notification := appmessage.NewFinalityConflictNotificationMessage(violatingBlockHash)
|
||||
return m.context.NotificationManager.NotifyFinalityConflict(notification)
|
||||
}
|
||||
|
||||
// NotifyFinalityConflictResolved notifies the manager that a finality conflict in the DAG has been resolved
|
||||
func (m *Manager) NotifyFinalityConflictResolved(finalityBlockHash string) error {
|
||||
notification := appmessage.NewFinalityConflictResolvedNotificationMessage(finalityBlockHash)
|
||||
return m.context.NotificationManager.NotifyFinalityConflictResolved(notification)
|
||||
}
|
||||
|
||||
// NotifyTransactionAddedToMempool notifies the manager that a transaction has been added to the mempool
|
||||
|
||||
@@ -12,23 +12,27 @@ import (
|
||||
type handler func(context *rpccontext.Context, router *router.Router, request appmessage.Message) (appmessage.Message, error)
|
||||
|
||||
var handlers = map[appmessage.MessageCommand]handler{
|
||||
appmessage.CmdGetCurrentNetworkRequestMessage: rpchandlers.HandleGetCurrentNetwork,
|
||||
appmessage.CmdSubmitBlockRequestMessage: rpchandlers.HandleSubmitBlock,
|
||||
appmessage.CmdGetBlockTemplateRequestMessage: rpchandlers.HandleGetBlockTemplate,
|
||||
appmessage.CmdNotifyBlockAddedRequestMessage: rpchandlers.HandleNotifyBlockAdded,
|
||||
appmessage.CmdGetPeerAddressesRequestMessage: rpchandlers.HandleGetPeerAddresses,
|
||||
appmessage.CmdGetSelectedTipHashRequestMessage: rpchandlers.HandleGetSelectedTipHash,
|
||||
appmessage.CmdGetMempoolEntryRequestMessage: rpchandlers.HandleGetMempoolEntry,
|
||||
appmessage.CmdGetConnectedPeerInfoRequestMessage: rpchandlers.HandleGetConnectedPeerInfo,
|
||||
appmessage.CmdAddPeerRequestMessage: rpchandlers.HandleAddPeer,
|
||||
appmessage.CmdSubmitTransactionRequestMessage: rpchandlers.HandleSubmitTransaction,
|
||||
appmessage.CmdNotifyChainChangedRequestMessage: rpchandlers.HandleNotifyChainChanged,
|
||||
appmessage.CmdGetBlockRequestMessage: rpchandlers.HandleGetBlock,
|
||||
appmessage.CmdGetSubnetworkRequestMessage: rpchandlers.HandleGetSubnetwork,
|
||||
appmessage.CmdGetChainFromBlockRequestMessage: rpchandlers.HandleGetChainFromBlock,
|
||||
appmessage.CmdGetBlocksRequestMessage: rpchandlers.HandleGetBlocks,
|
||||
appmessage.CmdGetBlockCountRequestMessage: rpchandlers.HandleGetBlockCount,
|
||||
appmessage.CmdGetBlockDAGInfoRequestMessage: rpchandlers.HandleGetBlockDAGInfo,
|
||||
appmessage.CmdGetCurrentNetworkRequestMessage: rpchandlers.HandleGetCurrentNetwork,
|
||||
appmessage.CmdSubmitBlockRequestMessage: rpchandlers.HandleSubmitBlock,
|
||||
appmessage.CmdGetBlockTemplateRequestMessage: rpchandlers.HandleGetBlockTemplate,
|
||||
appmessage.CmdNotifyBlockAddedRequestMessage: rpchandlers.HandleNotifyBlockAdded,
|
||||
appmessage.CmdGetPeerAddressesRequestMessage: rpchandlers.HandleGetPeerAddresses,
|
||||
appmessage.CmdGetSelectedTipHashRequestMessage: rpchandlers.HandleGetSelectedTipHash,
|
||||
appmessage.CmdGetMempoolEntryRequestMessage: rpchandlers.HandleGetMempoolEntry,
|
||||
appmessage.CmdGetConnectedPeerInfoRequestMessage: rpchandlers.HandleGetConnectedPeerInfo,
|
||||
appmessage.CmdAddPeerRequestMessage: rpchandlers.HandleAddPeer,
|
||||
appmessage.CmdSubmitTransactionRequestMessage: rpchandlers.HandleSubmitTransaction,
|
||||
appmessage.CmdNotifyChainChangedRequestMessage: rpchandlers.HandleNotifyChainChanged,
|
||||
appmessage.CmdGetBlockRequestMessage: rpchandlers.HandleGetBlock,
|
||||
appmessage.CmdGetSubnetworkRequestMessage: rpchandlers.HandleGetSubnetwork,
|
||||
appmessage.CmdGetChainFromBlockRequestMessage: rpchandlers.HandleGetChainFromBlock,
|
||||
appmessage.CmdGetBlocksRequestMessage: rpchandlers.HandleGetBlocks,
|
||||
appmessage.CmdGetBlockCountRequestMessage: rpchandlers.HandleGetBlockCount,
|
||||
appmessage.CmdGetBlockDAGInfoRequestMessage: rpchandlers.HandleGetBlockDAGInfo,
|
||||
appmessage.CmdResolveFinalityConflictRequestMessage: rpchandlers.HandleResolveFinalityConflict,
|
||||
appmessage.CmdNotifyFinalityConflictsRequestMessage: rpchandlers.HandleNotifyFinalityConflicts,
|
||||
appmessage.CmdGetMempoolEntriesRequestMessage: rpchandlers.HandleGetMempoolEntries,
|
||||
appmessage.CmdShutDownRequestMessage: rpchandlers.HandleShutDown,
|
||||
}
|
||||
|
||||
func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) {
|
||||
@@ -40,16 +44,12 @@ func (m *Manager) routerInitializer(router *router.Router, netConnection *netada
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
spawn("routerInitializer-handleIncomingMessages", func() {
|
||||
err := m.handleIncomingMessages(router, incomingRoute)
|
||||
m.handleError(err, netConnection)
|
||||
})
|
||||
m.context.NotificationManager.AddListener(router)
|
||||
|
||||
notificationListener := m.context.NotificationManager.AddListener(router)
|
||||
spawn("routerInitializer-handleOutgoingNotifications", func() {
|
||||
spawn("routerInitializer-handleIncomingMessages", func() {
|
||||
defer m.context.NotificationManager.RemoveListener(router)
|
||||
|
||||
err := m.handleOutgoingNotifications(notificationListener)
|
||||
err := m.handleIncomingMessages(router, incomingRoute)
|
||||
m.handleError(err, netConnection)
|
||||
})
|
||||
}
|
||||
@@ -76,15 +76,6 @@ func (m *Manager) handleIncomingMessages(router *router.Router, incomingRoute *r
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) handleOutgoingNotifications(notificationListener *rpccontext.NotificationListener) error {
|
||||
for {
|
||||
err := notificationListener.ProcessNextNotification()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection) {
|
||||
if errors.Is(err, router.ErrTimeout) {
|
||||
log.Warnf("Got timeout from %s. Disconnecting...", netConnection)
|
||||
|
||||
@@ -47,13 +47,13 @@ type BlockTemplateState struct {
|
||||
|
||||
context *Context
|
||||
|
||||
lastTxUpdate mstime.Time
|
||||
lastGenerated mstime.Time
|
||||
tipHashes []*daghash.Hash
|
||||
minTimestamp mstime.Time
|
||||
template *mining.BlockTemplate
|
||||
notifyMap map[string]map[int64]chan struct{}
|
||||
payAddress util.Address
|
||||
lastTxUpdate mstime.Time
|
||||
lastGenerated mstime.Time
|
||||
virtualParentHashes []*daghash.Hash
|
||||
minTimestamp mstime.Time
|
||||
template *mining.BlockTemplate
|
||||
notifyMap map[string]map[int64]chan struct{}
|
||||
payAddress util.Address
|
||||
}
|
||||
|
||||
// NewBlockTemplateState returns a new instance of a BlockTemplateState with all internal
|
||||
@@ -79,10 +79,10 @@ func (bt *BlockTemplateState) Update(payAddress util.Address) error {
|
||||
// generated.
|
||||
var msgBlock *appmessage.MsgBlock
|
||||
var targetDifficulty string
|
||||
tipHashes := bt.context.DAG.TipHashes()
|
||||
virtualParentHashes := bt.context.DAG.VirtualParentHashes()
|
||||
template := bt.template
|
||||
if template == nil || bt.tipHashes == nil ||
|
||||
!daghash.AreEqual(bt.tipHashes, tipHashes) ||
|
||||
if template == nil || bt.virtualParentHashes == nil ||
|
||||
!daghash.AreEqual(bt.virtualParentHashes, virtualParentHashes) ||
|
||||
bt.payAddress.String() != payAddress.String() ||
|
||||
(bt.lastTxUpdate != lastTxUpdate &&
|
||||
mstime.Now().After(bt.lastGenerated.Add(time.Second*
|
||||
@@ -91,7 +91,7 @@ func (bt *BlockTemplateState) Update(payAddress util.Address) error {
|
||||
// Reset the previous best hash the block template was generated
|
||||
// against so any errors below cause the next invocation to try
|
||||
// again.
|
||||
bt.tipHashes = nil
|
||||
bt.virtualParentHashes = nil
|
||||
|
||||
// Create a new block template that has a coinbase which anyone
|
||||
// can redeem. This is only acceptable because the returned
|
||||
@@ -122,7 +122,7 @@ func (bt *BlockTemplateState) Update(payAddress util.Address) error {
|
||||
bt.template = template
|
||||
bt.lastGenerated = mstime.Now()
|
||||
bt.lastTxUpdate = lastTxUpdate
|
||||
bt.tipHashes = tipHashes
|
||||
bt.virtualParentHashes = virtualParentHashes
|
||||
bt.minTimestamp = minTimestamp
|
||||
bt.payAddress = payAddress
|
||||
|
||||
@@ -133,7 +133,7 @@ func (bt *BlockTemplateState) Update(payAddress util.Address) error {
|
||||
|
||||
// Notify any clients that are long polling about the new
|
||||
// template.
|
||||
bt.notifyLongPollers(tipHashes, lastTxUpdate)
|
||||
bt.notifyLongPollers(virtualParentHashes, lastTxUpdate)
|
||||
} else {
|
||||
// At this point, there is a saved block template and another
|
||||
// request for a template was made, but either the available
|
||||
@@ -234,7 +234,7 @@ func (bt *BlockTemplateState) Response() (*appmessage.GetBlockTemplateResponseMe
|
||||
// Including MinTime -> time/decrement
|
||||
// Omitting CoinbaseTxn -> coinbase, generation
|
||||
targetDifficulty := fmt.Sprintf("%064x", util.CompactToBig(header.Bits))
|
||||
longPollID := bt.encodeLongPollID(bt.tipHashes, bt.payAddress, bt.lastGenerated)
|
||||
longPollID := bt.encodeLongPollID(bt.virtualParentHashes, bt.payAddress, bt.lastGenerated)
|
||||
|
||||
// Check whether this node is synced with the rest of of the
|
||||
// network. There's almost never a good reason to mine on top
|
||||
@@ -250,7 +250,7 @@ func (bt *BlockTemplateState) Response() (*appmessage.GetBlockTemplateResponseMe
|
||||
Bits: strconv.FormatInt(int64(header.Bits), 16),
|
||||
CurrentTime: header.Timestamp.UnixMilliseconds(),
|
||||
ParentHashes: daghash.Strings(header.ParentHashes),
|
||||
MassLimit: appmessage.MaxMassPerBlock,
|
||||
MassLimit: appmessage.MaxMassAcceptedByBlock,
|
||||
Transactions: transactions,
|
||||
HashMerkleRoot: header.HashMerkleRoot.String(),
|
||||
AcceptedIDMerkleRoot: header.AcceptedIDMerkleRoot.String(),
|
||||
@@ -273,12 +273,12 @@ func (bt *BlockTemplateState) Response() (*appmessage.GetBlockTemplateResponseMe
|
||||
// notified when block templates are stale.
|
||||
//
|
||||
// This function MUST be called with the state locked.
|
||||
func (bt *BlockTemplateState) notifyLongPollers(tipHashes []*daghash.Hash, lastGenerated mstime.Time) {
|
||||
func (bt *BlockTemplateState) notifyLongPollers(parentHashes []*daghash.Hash, lastGenerated mstime.Time) {
|
||||
// Notify anything that is waiting for a block template update from
|
||||
// hashes which are not the current tip hashes.
|
||||
tipHashesStr := daghash.JoinHashesStrings(tipHashes, "")
|
||||
// hashes which are not the current parent hashes.
|
||||
parentHashesStr := daghash.JoinHashesStrings(parentHashes, "")
|
||||
for hashesStr, channels := range bt.notifyMap {
|
||||
if hashesStr != tipHashesStr {
|
||||
if hashesStr != parentHashesStr {
|
||||
for _, c := range channels {
|
||||
close(c)
|
||||
}
|
||||
@@ -294,7 +294,7 @@ func (bt *BlockTemplateState) notifyLongPollers(tipHashes []*daghash.Hash, lastG
|
||||
|
||||
// Return now if there is nothing registered for updates to the current
|
||||
// best block hash.
|
||||
channels, ok := bt.notifyMap[tipHashesStr]
|
||||
channels, ok := bt.notifyMap[parentHashesStr]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -313,7 +313,7 @@ func (bt *BlockTemplateState) notifyLongPollers(tipHashes []*daghash.Hash, lastG
|
||||
// Remove the entry altogether if there are no more registered
|
||||
// channels.
|
||||
if len(channels) == 0 {
|
||||
delete(bt.notifyMap, tipHashesStr)
|
||||
delete(bt.notifyMap, parentHashesStr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -341,14 +341,14 @@ func (bt *BlockTemplateState) NotifyMempoolTx() {
|
||||
|
||||
// No need to notify anything if no block templates have been generated
|
||||
// yet.
|
||||
if bt.tipHashes == nil || bt.lastGenerated.IsZero() {
|
||||
if bt.virtualParentHashes == nil || bt.lastGenerated.IsZero() {
|
||||
return
|
||||
}
|
||||
|
||||
if mstime.Now().After(bt.lastGenerated.Add(time.Second *
|
||||
blockTemplateRegenerateSeconds)) {
|
||||
|
||||
bt.notifyLongPollers(bt.tipHashes, lastUpdated)
|
||||
bt.notifyLongPollers(bt.virtualParentHashes, lastUpdated)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -412,14 +412,14 @@ func (bt *BlockTemplateState) BlockTemplateOrLongPollChan(longPollID string,
|
||||
// without requiring a different channel for each client.
|
||||
//
|
||||
// This function MUST be called with the state locked.
|
||||
func (bt *BlockTemplateState) templateUpdateChan(tipHashes []*daghash.Hash, lastGenerated int64) chan struct{} {
|
||||
tipHashesStr := daghash.JoinHashesStrings(tipHashes, "")
|
||||
func (bt *BlockTemplateState) templateUpdateChan(parentHashes []*daghash.Hash, lastGenerated int64) chan struct{} {
|
||||
parentHashesStr := daghash.JoinHashesStrings(parentHashes, "")
|
||||
// Either get the current list of channels waiting for updates about
|
||||
// changes to block template for the parent hashes or create a new one.
|
||||
channels, ok := bt.notifyMap[tipHashesStr]
|
||||
channels, ok := bt.notifyMap[parentHashesStr]
|
||||
if !ok {
|
||||
m := make(map[int64]chan struct{})
|
||||
bt.notifyMap[tipHashesStr] = m
|
||||
bt.notifyMap[parentHashesStr] = m
|
||||
channels = m
|
||||
}
|
||||
|
||||
|
||||
@@ -23,14 +23,14 @@ type Context struct {
|
||||
Mempool *mempool.TxPool
|
||||
AddressManager *addressmanager.AddressManager
|
||||
AcceptanceIndex *indexers.AcceptanceIndex
|
||||
ShutDownChan chan<- struct{}
|
||||
|
||||
BlockTemplateState *BlockTemplateState
|
||||
NotificationManager *NotificationManager
|
||||
}
|
||||
|
||||
// NewContext creates a new RPC context
|
||||
func NewContext(
|
||||
cfg *config.Config,
|
||||
func NewContext(cfg *config.Config,
|
||||
netAdapter *netadapter.NetAdapter,
|
||||
dag *blockdag.BlockDAG,
|
||||
protocolManager *protocol.Manager,
|
||||
@@ -38,7 +38,9 @@ func NewContext(
|
||||
blockTemplateGenerator *mining.BlkTmplGenerator,
|
||||
mempool *mempool.TxPool,
|
||||
addressManager *addressmanager.AddressManager,
|
||||
acceptanceIndex *indexers.AcceptanceIndex) *Context {
|
||||
acceptanceIndex *indexers.AcceptanceIndex,
|
||||
shutDownChan chan<- struct{}) *Context {
|
||||
|
||||
context := &Context{
|
||||
Config: cfg,
|
||||
NetAdapter: netAdapter,
|
||||
@@ -49,6 +51,7 @@ func NewContext(
|
||||
Mempool: mempool,
|
||||
AddressManager: addressManager,
|
||||
AcceptanceIndex: acceptanceIndex,
|
||||
ShutDownChan: shutDownChan,
|
||||
}
|
||||
context.BlockTemplateState = NewBlockTemplateState(context)
|
||||
context.NotificationManager = NewNotificationManager()
|
||||
|
||||
@@ -2,7 +2,7 @@ package rpccontext
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
"sync"
|
||||
)
|
||||
@@ -10,58 +10,43 @@ import (
|
||||
// NotificationManager manages notifications for the RPC
|
||||
type NotificationManager struct {
|
||||
sync.RWMutex
|
||||
listeners map[*router.Router]*NotificationListener
|
||||
listeners map[*routerpkg.Router]*NotificationListener
|
||||
}
|
||||
|
||||
// OnBlockAddedListener is a listener function for when a block is added to the DAG
|
||||
type OnBlockAddedListener func(notification *appmessage.BlockAddedNotificationMessage) error
|
||||
|
||||
// OnChainChangedListener is a listener function for when the DAG's selected parent chain changes
|
||||
type OnChainChangedListener func(notification *appmessage.ChainChangedNotificationMessage) error
|
||||
|
||||
// NotificationListener represents a registered RPC notification listener
|
||||
type NotificationListener struct {
|
||||
onBlockAddedListener OnBlockAddedListener
|
||||
onBlockAddedNotificationChan chan *appmessage.BlockAddedNotificationMessage
|
||||
onChainChangedListener OnChainChangedListener
|
||||
onChainChangedNotificationChan chan *appmessage.ChainChangedNotificationMessage
|
||||
|
||||
closeChan chan struct{}
|
||||
propagateBlockAddedNotifications bool
|
||||
propagateChainChangedNotifications bool
|
||||
propagateFinalityConflictNotifications bool
|
||||
propagateFinalityConflictResolvedNotifications bool
|
||||
}
|
||||
|
||||
// NewNotificationManager creates a new NotificationManager
|
||||
func NewNotificationManager() *NotificationManager {
|
||||
return &NotificationManager{
|
||||
listeners: make(map[*router.Router]*NotificationListener),
|
||||
listeners: make(map[*routerpkg.Router]*NotificationListener),
|
||||
}
|
||||
}
|
||||
|
||||
// AddListener registers a listener with the given router
|
||||
func (nm *NotificationManager) AddListener(router *router.Router) *NotificationListener {
|
||||
func (nm *NotificationManager) AddListener(router *routerpkg.Router) {
|
||||
nm.Lock()
|
||||
defer nm.Unlock()
|
||||
|
||||
listener := newNotificationListener()
|
||||
nm.listeners[router] = listener
|
||||
return listener
|
||||
}
|
||||
|
||||
// RemoveListener unregisters the given router
|
||||
func (nm *NotificationManager) RemoveListener(router *router.Router) {
|
||||
func (nm *NotificationManager) RemoveListener(router *routerpkg.Router) {
|
||||
nm.Lock()
|
||||
defer nm.Unlock()
|
||||
|
||||
listener, ok := nm.listeners[router]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
listener.close()
|
||||
|
||||
delete(nm.listeners, router)
|
||||
}
|
||||
|
||||
// Listener retrieves the listener registered with the given router
|
||||
func (nm *NotificationManager) Listener(router *router.Router) (*NotificationListener, error) {
|
||||
func (nm *NotificationManager) Listener(router *routerpkg.Router) (*NotificationListener, error) {
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
@@ -73,67 +58,98 @@ func (nm *NotificationManager) Listener(router *router.Router) (*NotificationLis
|
||||
}
|
||||
|
||||
// NotifyBlockAdded notifies the notification manager that a block has been added to the DAG
|
||||
func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAddedNotificationMessage) {
|
||||
func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAddedNotificationMessage) error {
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for _, listener := range nm.listeners {
|
||||
if listener.onBlockAddedListener != nil {
|
||||
select {
|
||||
case listener.onBlockAddedNotificationChan <- notification:
|
||||
case <-listener.closeChan:
|
||||
continue
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateBlockAddedNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyChainChanged notifies the notification manager that the DAG's selected parent chain has changed
|
||||
func (nm *NotificationManager) NotifyChainChanged(message *appmessage.ChainChangedNotificationMessage) {
|
||||
func (nm *NotificationManager) NotifyChainChanged(notification *appmessage.ChainChangedNotificationMessage) error {
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for _, listener := range nm.listeners {
|
||||
if listener.onChainChangedListener != nil {
|
||||
select {
|
||||
case listener.onChainChangedNotificationChan <- message:
|
||||
case <-listener.closeChan:
|
||||
continue
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateChainChangedNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyFinalityConflict notifies the notification manager that there's a finality conflict in the DAG
|
||||
func (nm *NotificationManager) NotifyFinalityConflict(notification *appmessage.FinalityConflictNotificationMessage) error {
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateFinalityConflictNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyFinalityConflictResolved notifies the notification manager that a finality conflict in the DAG has been resolved
|
||||
func (nm *NotificationManager) NotifyFinalityConflictResolved(notification *appmessage.FinalityConflictResolvedNotificationMessage) error {
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateFinalityConflictResolvedNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newNotificationListener() *NotificationListener {
|
||||
return &NotificationListener{
|
||||
onBlockAddedNotificationChan: make(chan *appmessage.BlockAddedNotificationMessage),
|
||||
onChainChangedNotificationChan: make(chan *appmessage.ChainChangedNotificationMessage),
|
||||
closeChan: make(chan struct{}, 1),
|
||||
propagateBlockAddedNotifications: false,
|
||||
propagateChainChangedNotifications: false,
|
||||
propagateFinalityConflictNotifications: false,
|
||||
propagateFinalityConflictResolvedNotifications: false,
|
||||
}
|
||||
}
|
||||
|
||||
// SetOnBlockAddedListener sets the onBlockAddedListener handler for this listener
|
||||
func (nl *NotificationListener) SetOnBlockAddedListener(onBlockAddedListener OnBlockAddedListener) {
|
||||
nl.onBlockAddedListener = onBlockAddedListener
|
||||
// PropagateBlockAddedNotifications instructs the listener to send block added notifications
|
||||
// to the remote listener
|
||||
func (nl *NotificationListener) PropagateBlockAddedNotifications() {
|
||||
nl.propagateBlockAddedNotifications = true
|
||||
}
|
||||
|
||||
// SetOnChainChangedListener sets the onChainChangedListener handler for this listener
|
||||
func (nl *NotificationListener) SetOnChainChangedListener(onChainChangedListener OnChainChangedListener) {
|
||||
nl.onChainChangedListener = onChainChangedListener
|
||||
// PropagateChainChangedNotifications instructs the listener to send chain changed notifications
|
||||
// to the remote listener
|
||||
func (nl *NotificationListener) PropagateChainChangedNotifications() {
|
||||
nl.propagateChainChangedNotifications = true
|
||||
}
|
||||
|
||||
// ProcessNextNotification waits until a notification arrives and processes it
|
||||
func (nl *NotificationListener) ProcessNextNotification() error {
|
||||
select {
|
||||
case block := <-nl.onBlockAddedNotificationChan:
|
||||
return nl.onBlockAddedListener(block)
|
||||
case notification := <-nl.onChainChangedNotificationChan:
|
||||
return nl.onChainChangedListener(notification)
|
||||
case <-nl.closeChan:
|
||||
return nil
|
||||
}
|
||||
// PropagateFinalityConflictNotifications instructs the listener to send finality conflict notifications
|
||||
// to the remote listener
|
||||
func (nl *NotificationListener) PropagateFinalityConflictNotifications() {
|
||||
nl.propagateFinalityConflictNotifications = true
|
||||
}
|
||||
|
||||
func (nl *NotificationListener) close() {
|
||||
nl.closeChan <- struct{}{}
|
||||
// PropagateFinalityConflictResolvedNotifications instructs the listener to send finality conflict resolved notifications
|
||||
// to the remote listener
|
||||
func (nl *NotificationListener) PropagateFinalityConflictResolvedNotifications() {
|
||||
nl.propagateFinalityConflictResolvedNotifications = true
|
||||
}
|
||||
|
||||
@@ -88,7 +88,7 @@ func (ctx *Context) BuildBlockVerboseData(block *util.Block, includeTransactionV
|
||||
transactions := block.Transactions()
|
||||
transactionVerboseData := make([]*appmessage.TransactionVerboseData, len(transactions))
|
||||
for i, tx := range transactions {
|
||||
data, err := ctx.buildTransactionVerboseData(tx.MsgTx(), tx.ID().String(),
|
||||
data, err := ctx.BuildTransactionVerboseData(tx.MsgTx(), tx.ID().String(),
|
||||
&blockHeader, hash.String(), nil, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -120,7 +120,9 @@ func (ctx *Context) GetDifficultyRatio(bits uint32, params *dagconfig.Params) fl
|
||||
return diff
|
||||
}
|
||||
|
||||
func (ctx *Context) buildTransactionVerboseData(mtx *appmessage.MsgTx,
|
||||
// BuildTransactionVerboseData builds a TransactionVerboseData from
|
||||
// the given parameters
|
||||
func (ctx *Context) BuildTransactionVerboseData(mtx *appmessage.MsgTx,
|
||||
txID string, blockHeader *appmessage.BlockHeader, blockHash string,
|
||||
acceptingBlock *daghash.Hash, isInMempool bool) (*appmessage.TransactionVerboseData, error) {
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ func HandleGetBlockDAGInfo(context *rpccontext.Context, _ *router.Router, _ appm
|
||||
response.NetworkName = params.Name
|
||||
response.BlockCount = dag.BlockCount()
|
||||
response.TipHashes = daghash.Strings(dag.TipHashes())
|
||||
response.VirtualParentHashes = daghash.Strings(dag.VirtualParentHashes())
|
||||
response.Difficulty = context.GetDifficultyRatio(dag.CurrentBits(), params)
|
||||
response.PastMedianTime = dag.CalcPastMedianTime().UnixMilliseconds()
|
||||
return response, nil
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
const (
|
||||
// maxBlocksInGetBlocksResponse is the max amount of blocks that are
|
||||
// allowed in a GetBlocksResult.
|
||||
maxBlocksInGetBlocksResponse = 1000
|
||||
maxBlocksInGetBlocksResponse = 100
|
||||
)
|
||||
|
||||
// HandleGetBlocks handles the respectively named RPC command
|
||||
|
||||
25
app/rpc/rpchandlers/get_mempool_entries.go
Normal file
25
app/rpc/rpchandlers/get_mempool_entries.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleGetMempoolEntries handles the respectively named RPC command
|
||||
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
txDescs := context.Mempool.TxDescs()
|
||||
entries := make([]*appmessage.MempoolEntry, len(txDescs))
|
||||
for i, txDesc := range txDescs {
|
||||
transactionVerboseData, err := context.BuildTransactionVerboseData(txDesc.Tx.MsgTx(), txDesc.Tx.ID().String(),
|
||||
nil, "", nil, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries[i] = &appmessage.MempoolEntry{
|
||||
Fee: txDesc.Fee,
|
||||
TransactionVerboseData: transactionVerboseData,
|
||||
}
|
||||
}
|
||||
return appmessage.NewGetMempoolEntriesResponseMessage(entries), nil
|
||||
}
|
||||
@@ -17,13 +17,19 @@ func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, reques
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
_, ok := context.Mempool.FetchTxDesc(txID)
|
||||
txDesc, ok := context.Mempool.FetchTxDesc(txID)
|
||||
if !ok {
|
||||
errorMessage := &appmessage.GetMempoolEntryResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("transaction is not in the pool")
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
response := appmessage.NewGetMempoolEntryResponseMessage()
|
||||
transactionVerboseData, err := context.BuildTransactionVerboseData(txDesc.Tx.MsgTx(), txID.String(),
|
||||
nil, "", nil, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := appmessage.NewGetMempoolEntryResponseMessage(txDesc.Fee, transactionVerboseData)
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.RPCS)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
|
||||
@@ -12,9 +12,7 @@ func HandleNotifyBlockAdded(context *rpccontext.Context, router *router.Router,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.SetOnBlockAddedListener(func(notification *appmessage.BlockAddedNotificationMessage) error {
|
||||
return router.OutgoingRoute().Enqueue(notification)
|
||||
})
|
||||
listener.PropagateBlockAddedNotifications()
|
||||
|
||||
response := appmessage.NewNotifyBlockAddedResponseMessage()
|
||||
return response, nil
|
||||
|
||||
@@ -18,9 +18,7 @@ func HandleNotifyChainChanged(context *rpccontext.Context, router *router.Router
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.SetOnChainChangedListener(func(message *appmessage.ChainChangedNotificationMessage) error {
|
||||
return router.OutgoingRoute().Enqueue(message)
|
||||
})
|
||||
listener.PropagateChainChangedNotifications()
|
||||
|
||||
response := appmessage.NewNotifyChainChangedResponseMessage()
|
||||
return response, nil
|
||||
|
||||
20
app/rpc/rpchandlers/notify_finality_conflicts.go
Normal file
20
app/rpc/rpchandlers/notify_finality_conflicts.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleNotifyFinalityConflicts handles the respectively named RPC command
|
||||
func HandleNotifyFinalityConflicts(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagateFinalityConflictNotifications()
|
||||
listener.PropagateFinalityConflictResolvedNotifications()
|
||||
|
||||
response := appmessage.NewNotifyFinalityConflictsResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
30
app/rpc/rpchandlers/resolve_finality_conflict.go
Normal file
30
app/rpc/rpchandlers/resolve_finality_conflict.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// HandleResolveFinalityConflict handles the respectively named RPC command
|
||||
func HandleResolveFinalityConflict(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
ResolveFinalityConflictRequest := request.(*appmessage.ResolveFinalityConflictRequestMessage)
|
||||
|
||||
finalityBlockHash, err := daghash.NewHashFromStr(ResolveFinalityConflictRequest.FinalityBlockHash)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.ResolveFinalityConflictResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not parse finalityBlockHash: %s", err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
err = context.DAG.ResolveFinalityConflict(finalityBlockHash)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.ResolveFinalityConflictResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not resolve finality conflict: %s", err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
response := appmessage.NewResolveFinalityConflictResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
25
app/rpc/rpchandlers/shut_down.go
Normal file
25
app/rpc/rpchandlers/shut_down.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
const pauseBeforeShutDown = time.Second
|
||||
|
||||
// HandleShutDown handles the respectively named RPC command
|
||||
func HandleShutDown(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
log.Warn("ShutDown RPC called.")
|
||||
|
||||
// Wait a second before shutting down, to allow time to return the response to the caller
|
||||
spawn("HandleShutDown-pauseAndShutDown", func() {
|
||||
<-time.After(pauseBeforeShutDown)
|
||||
close(context.ShutDownChan)
|
||||
})
|
||||
|
||||
response := appmessage.NewShutDownResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -1,18 +1,19 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/jessevdk/go-flags"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/jessevdk/go-flags"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultRPCServer = "localhost"
|
||||
defaultRPCServer = "localhost"
|
||||
defaultTimeout uint64 = 30
|
||||
)
|
||||
|
||||
type configFlags struct {
|
||||
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
|
||||
Timeout uint64 `short:"t" long:"timeout" description:"Timeout for the request (in seconds)"`
|
||||
RequestJSON string `description:"The request in JSON format"`
|
||||
config.NetworkFlags
|
||||
}
|
||||
@@ -20,6 +21,7 @@ type configFlags struct {
|
||||
func parseConfig() (*configFlags, error) {
|
||||
cfg := &configFlags{
|
||||
RPCServer: defaultRPCServer,
|
||||
Timeout: defaultTimeout,
|
||||
}
|
||||
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
|
||||
args, err := parser.Parse()
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient/grpcclient"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -12,19 +13,36 @@ func main() {
|
||||
printErrorAndExit(fmt.Sprintf("error parsing command-line arguments: %s", err))
|
||||
}
|
||||
|
||||
client, err := grpcclient.Connect(cfg.RPCServer)
|
||||
rpcAddress, err := cfg.NetParams().NormalizeRPCServerAddress(cfg.RPCServer)
|
||||
if err != nil {
|
||||
printErrorAndExit(fmt.Sprintf("error parsing RPC server address: %s", err))
|
||||
}
|
||||
client, err := grpcclient.Connect(rpcAddress)
|
||||
if err != nil {
|
||||
printErrorAndExit(fmt.Sprintf("error connecting to the RPC server: %s", err))
|
||||
}
|
||||
defer client.Disconnect()
|
||||
|
||||
requestString := cfg.RequestJSON
|
||||
responseString, err := client.PostJSON(requestString)
|
||||
if err != nil {
|
||||
printErrorAndExit(fmt.Sprintf("error posting the request to the RPC server: %s", err))
|
||||
}
|
||||
var responseString string
|
||||
done := make(chan struct{})
|
||||
|
||||
fmt.Println(responseString)
|
||||
go func() {
|
||||
requestString := cfg.RequestJSON
|
||||
var err error
|
||||
responseString, err = client.PostJSON(requestString)
|
||||
if err != nil {
|
||||
printErrorAndExit(fmt.Sprintf("error posting the request to the RPC server: %s", err))
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
|
||||
timeout := time.Duration(cfg.Timeout) * time.Second
|
||||
select {
|
||||
case <-done:
|
||||
fmt.Println(responseString)
|
||||
case <-time.After(timeout):
|
||||
printErrorAndExit(fmt.Sprintf("timeout of %s has been exceeded", timeout))
|
||||
}
|
||||
}
|
||||
|
||||
func printErrorAndExit(message string) {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
// if it was accepted or not by some block
|
||||
type TxAcceptanceData struct {
|
||||
Tx *util.Tx
|
||||
Fee uint64
|
||||
IsAccepted bool
|
||||
}
|
||||
|
||||
@@ -40,7 +41,7 @@ func (data MultiBlockTxsAcceptanceData) FindAcceptanceData(blockHash *daghash.Ha
|
||||
//
|
||||
// This function MUST be called with the DAG read-lock held
|
||||
func (dag *BlockDAG) TxsAcceptedByVirtual() (MultiBlockTxsAcceptanceData, error) {
|
||||
_, _, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
|
||||
_, _, txsAcceptanceData, err := dag.pastUTXO(dag.virtual.blockNode)
|
||||
return txsAcceptanceData, err
|
||||
}
|
||||
|
||||
@@ -60,57 +61,45 @@ func (dag *BlockDAG) meldVirtualUTXO(newVirtualUTXODiffSet *DiffUTXOSet) error {
|
||||
return newVirtualUTXODiffSet.meldToBase()
|
||||
}
|
||||
|
||||
// checkDoubleSpendsWithBlockPast checks that each block transaction
|
||||
// has a corresponding UTXO in the block pastUTXO.
|
||||
func checkDoubleSpendsWithBlockPast(pastUTXO UTXOSet, blockTransactions []*util.Tx) error {
|
||||
for _, tx := range blockTransactions {
|
||||
if tx.IsCoinBase() {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
if _, ok := pastUTXO.Get(txIn.PreviousOutpoint); !ok {
|
||||
return ruleError(ErrMissingTxOut, fmt.Sprintf("missing transaction "+
|
||||
"output %s in the utxo set", txIn.PreviousOutpoint))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
type utxoVerificationOutput struct {
|
||||
newBlockPastUTXO UTXOSet
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData
|
||||
newBlockMultiset *secp256k1.MultiSet
|
||||
}
|
||||
|
||||
// verifyAndBuildUTXO verifies all transactions in the given block and builds its UTXO
|
||||
// to save extra traversals it returns the transactions acceptance data, the compactFeeData
|
||||
// to save extra traversals it returns the transactions acceptance data
|
||||
// for the new block and its multiset.
|
||||
func (node *blockNode) verifyAndBuildUTXO(dag *BlockDAG, transactions []*util.Tx, fastAdd bool) (
|
||||
newBlockUTXO UTXOSet, txsAcceptanceData MultiBlockTxsAcceptanceData, newBlockFeeData compactFeeData, multiset *secp256k1.MultiSet, err error) {
|
||||
|
||||
pastUTXO, selectedParentPastUTXO, txsAcceptanceData, err := dag.pastUTXO(node)
|
||||
func (node *blockNode) verifyAndBuildUTXO(transactions []*util.Tx) (*utxoVerificationOutput, error) {
|
||||
pastUTXO, selectedParentPastUTXO, txsAcceptanceData, err := node.dag.pastUTXO(node)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = node.validateAcceptedIDMerkleRoot(dag, txsAcceptanceData)
|
||||
err = node.validateAcceptedIDMerkleRoot(node.dag, txsAcceptanceData)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
feeData, err := dag.checkConnectToPastUTXO(node, pastUTXO, transactions, fastAdd)
|
||||
err = node.dag.checkConnectBlockToPastUTXO(node, pastUTXO, transactions)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
multiset, err = node.calcMultiset(dag, txsAcceptanceData, selectedParentPastUTXO)
|
||||
multiset, err := node.calcMultiset(txsAcceptanceData, selectedParentPastUTXO)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = node.validateUTXOCommitment(multiset)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pastUTXO, txsAcceptanceData, feeData, multiset, nil
|
||||
return &utxoVerificationOutput{
|
||||
newBlockPastUTXO: pastUTXO,
|
||||
txsAcceptanceData: txsAcceptanceData,
|
||||
newBlockMultiset: multiset}, nil
|
||||
}
|
||||
|
||||
func genesisPastUTXO(virtual *virtualBlock) UTXOSet {
|
||||
@@ -118,7 +107,7 @@ func genesisPastUTXO(virtual *virtualBlock) UTXOSet {
|
||||
// set by creating a diff UTXO set with the virtual UTXO
|
||||
// set, and adding all of its entries in toRemove
|
||||
diff := NewUTXODiff()
|
||||
for outpoint, entry := range virtual.utxoSet.utxoCollection {
|
||||
for outpoint, entry := range virtual.utxoSet.utxoCache {
|
||||
diff.toRemove[outpoint] = entry
|
||||
}
|
||||
genesisPastUTXO := UTXOSet(NewDiffUTXOSet(virtual.utxoSet, diff))
|
||||
@@ -134,6 +123,11 @@ func (node *blockNode) applyBlueBlocks(selectedParentPastUTXO UTXOSet, blueBlock
|
||||
pastUTXO = selectedParentPastUTXO.(*DiffUTXOSet).cloneWithoutBase()
|
||||
multiBlockTxsAcceptanceData = make(MultiBlockTxsAcceptanceData, len(blueBlocks))
|
||||
|
||||
// We obtain the median time of the selected parent block (unless it's genesis block)
|
||||
// in order to determine if transactions in the current block are final.
|
||||
selectedParentMedianTime := node.selectedParentMedianTime()
|
||||
accumulatedMass := uint64(0)
|
||||
|
||||
// Add blueBlocks to multiBlockTxsAcceptanceData in topological order. This
|
||||
// is so that anyone who iterates over it would process blocks (and transactions)
|
||||
// in their order of appearance in the DAG.
|
||||
@@ -146,20 +140,20 @@ func (node *blockNode) applyBlueBlocks(selectedParentPastUTXO UTXOSet, blueBlock
|
||||
}
|
||||
isSelectedParent := i == 0
|
||||
|
||||
for j, tx := range blueBlock.Transactions() {
|
||||
for j, tx := range transactions {
|
||||
var isAccepted bool
|
||||
var txFee uint64
|
||||
|
||||
// Coinbase transaction outputs are added to the UTXO
|
||||
// only if they are in the selected parent chain.
|
||||
if !isSelectedParent && tx.IsCoinBase() {
|
||||
isAccepted = false
|
||||
} else {
|
||||
isAccepted, err = pastUTXO.AddTx(tx.MsgTx(), node.blueScore)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
isAccepted, txFee, accumulatedMass, err =
|
||||
node.maybeAcceptTx(tx, isSelectedParent, pastUTXO, accumulatedMass, selectedParentMedianTime)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
blockTxsAcceptanceData.TxAcceptanceData[j] = TxAcceptanceData{Tx: tx, IsAccepted: isAccepted}
|
||||
|
||||
blockTxsAcceptanceData.TxAcceptanceData[j] = TxAcceptanceData{
|
||||
Tx: tx,
|
||||
Fee: txFee,
|
||||
IsAccepted: isAccepted}
|
||||
}
|
||||
multiBlockTxsAcceptanceData[i] = blockTxsAcceptanceData
|
||||
}
|
||||
@@ -167,6 +161,48 @@ func (node *blockNode) applyBlueBlocks(selectedParentPastUTXO UTXOSet, blueBlock
|
||||
return pastUTXO, multiBlockTxsAcceptanceData, nil
|
||||
}
|
||||
|
||||
func (node *blockNode) maybeAcceptTx(tx *util.Tx, isSelectedParent bool, pastUTXO UTXOSet,
|
||||
accumulatedMassBefore uint64, selectedParentMedianTime mstime.Time) (
|
||||
isAccepted bool, txFee uint64, accumulatedMassAfter uint64, err error) {
|
||||
|
||||
accumulatedMass := accumulatedMassBefore
|
||||
|
||||
// Coinbase transaction outputs are added to the UTXO-set only if they are in the selected parent chain.
|
||||
if tx.IsCoinBase() {
|
||||
if !isSelectedParent {
|
||||
return false, 0, 0, nil
|
||||
}
|
||||
txMass := CalcTxMass(tx, nil)
|
||||
accumulatedMass += txMass
|
||||
|
||||
_, err = pastUTXO.AddTx(tx.MsgTx(), node.blueScore)
|
||||
if err != nil {
|
||||
return false, 0, 0, err
|
||||
}
|
||||
|
||||
return true, 0, accumulatedMass, nil
|
||||
}
|
||||
|
||||
txFee, accumulatedMassAfter, err = node.dag.checkConnectTransactionToPastUTXO(
|
||||
node, tx, pastUTXO, accumulatedMassBefore, selectedParentMedianTime)
|
||||
if err != nil {
|
||||
if !errors.As(err, &(RuleError{})) {
|
||||
return false, 0, 0, err
|
||||
}
|
||||
|
||||
isAccepted = false
|
||||
} else {
|
||||
isAccepted = true
|
||||
accumulatedMass = accumulatedMassAfter
|
||||
|
||||
_, err = pastUTXO.AddTx(tx.MsgTx(), node.blueScore)
|
||||
if err != nil {
|
||||
return false, 0, 0, err
|
||||
}
|
||||
}
|
||||
return isAccepted, txFee, accumulatedMass, nil
|
||||
}
|
||||
|
||||
// pastUTXO returns the UTXO of a given block's past
|
||||
// To save traversals over the blue blocks, it also returns the transaction acceptance data for
|
||||
// all blue blocks
|
||||
@@ -234,18 +270,18 @@ func (dag *BlockDAG) restorePastUTXO(node *blockNode) (UTXOSet, error) {
|
||||
return NewDiffUTXOSet(dag.virtual.utxoSet, accumulatedDiff), nil
|
||||
}
|
||||
|
||||
// updateTipsUTXO builds and applies new diff UTXOs for all the DAG's tips
|
||||
func updateTipsUTXO(dag *BlockDAG, virtualUTXO UTXOSet) error {
|
||||
for tip := range dag.virtual.parents {
|
||||
tipPastUTXO, err := dag.restorePastUTXO(tip)
|
||||
// updateValidTipsUTXO builds and applies new diff UTXOs for all the DAG's valid tips
|
||||
func updateValidTipsUTXO(dag *BlockDAG, virtualUTXO UTXOSet) error {
|
||||
for validTip := range dag.validTips {
|
||||
validTipPastUTXO, err := dag.restorePastUTXO(validTip)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
diff, err := virtualUTXO.diffFrom(tipPastUTXO)
|
||||
diff, err := virtualUTXO.diffFrom(validTipPastUTXO)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dag.utxoDiffStore.setBlockDiff(tip, diff)
|
||||
err = dag.utxoDiffStore.setBlockDiff(validTip, diff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -254,26 +290,13 @@ func updateTipsUTXO(dag *BlockDAG, virtualUTXO UTXOSet) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateParents adds this block to the children sets of its parents
|
||||
// and updates the diff of any parent whose DiffChild is this block
|
||||
func (node *blockNode) updateParents(dag *BlockDAG, newBlockUTXO UTXOSet) error {
|
||||
node.updateParentsChildren()
|
||||
return node.updateParentsDiffs(dag, newBlockUTXO)
|
||||
}
|
||||
|
||||
// updateParentsDiffs updates the diff of any parent whose DiffChild is this block
|
||||
func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) error {
|
||||
virtualDiffFromNewBlock, err := dag.virtual.utxoSet.diffFrom(newBlockUTXO)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dag.utxoDiffStore.setBlockDiff(node, virtualDiffFromNewBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockPastUTXO UTXOSet) error {
|
||||
for parent := range node.parents {
|
||||
if node.dag.index.BlockNodeStatus(parent) == statusUTXOPendingVerification {
|
||||
continue
|
||||
}
|
||||
|
||||
diffChild, err := dag.utxoDiffStore.diffChildByNode(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -287,7 +310,7 @@ func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
diff, err := newBlockUTXO.diffFrom(parentPastUTXO)
|
||||
diff, err := newBlockPastUTXO.diffFrom(parentPastUTXO)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -300,3 +323,41 @@ func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) e
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *blockNode) updateDiffAndDiffChild(newBlockPastUTXO UTXOSet) error {
|
||||
var diffChild *blockNode
|
||||
for child := range node.children {
|
||||
if node.dag.index.BlockNodeStatus(child) == statusValid {
|
||||
diffChild = child
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If there's no diffChild, then virtual is the de-facto diffChild
|
||||
var diffChildUTXOSet UTXOSet = node.dag.virtual.utxoSet
|
||||
if diffChild != nil {
|
||||
var err error
|
||||
diffChildUTXOSet, err = node.dag.restorePastUTXO(diffChild)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
diffFromDiffChild, err := diffChildUTXOSet.diffFrom(newBlockPastUTXO)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = node.dag.utxoDiffStore.setBlockDiff(node, diffFromDiffChild)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if diffChild != nil {
|
||||
err = node.dag.utxoDiffStore.setBlockDiffChild(node, diffChild)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,9 +6,11 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"sync"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
@@ -59,6 +61,18 @@ func (bi *blockIndex) LookupNode(hash *daghash.Hash) (*blockNode, bool) {
|
||||
return node, ok
|
||||
}
|
||||
|
||||
func (bi *blockIndex) LookupNodes(hashes []*daghash.Hash) ([]*blockNode, error) {
|
||||
blocks := make([]*blockNode, 0, len(hashes))
|
||||
for _, hash := range hashes {
|
||||
node, ok := bi.LookupNode(hash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Couldn't find block with hash %s", hash)
|
||||
}
|
||||
blocks = append(blocks, node)
|
||||
}
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// AddNode adds the provided node to the block index and marks it as dirty.
|
||||
// Duplicate entries are not checked so it is up to caller to avoid adding them.
|
||||
//
|
||||
@@ -78,36 +92,23 @@ func (bi *blockIndex) addNode(node *blockNode) {
|
||||
bi.index[*node.hash] = node
|
||||
}
|
||||
|
||||
// NodeStatus provides concurrent-safe access to the status field of a node.
|
||||
// BlockNodeStatus provides concurrent-safe access to the status field of a node.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
|
||||
func (bi *blockIndex) BlockNodeStatus(node *blockNode) blockStatus {
|
||||
bi.RLock()
|
||||
defer bi.RUnlock()
|
||||
status := node.status
|
||||
return status
|
||||
}
|
||||
|
||||
// SetStatusFlags flips the provided status flags on the block node to on,
|
||||
// regardless of whether they were on or off previously. This does not unset any
|
||||
// flags currently on.
|
||||
// SetBlockNodeStatus changes the status of a blockNode
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
func (bi *blockIndex) SetBlockNodeStatus(node *blockNode, newStatus blockStatus) {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
node.status |= flags
|
||||
bi.dirty[node] = struct{}{}
|
||||
}
|
||||
|
||||
// UnsetStatusFlags flips the provided status flags on the block node to off,
|
||||
// regardless of whether they were on or off previously.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
|
||||
bi.Lock()
|
||||
defer bi.Unlock()
|
||||
node.status &^= flags
|
||||
node.status = newStatus
|
||||
bi.dirty[node] = struct{}{}
|
||||
}
|
||||
|
||||
@@ -165,7 +166,7 @@ func lookupParentNodes(block *util.Block, dag *BlockDAG) (blockSet, error) {
|
||||
if !ok {
|
||||
str := fmt.Sprintf("parent block %s is unknown", parentHash)
|
||||
return nil, ruleError(ErrParentBlockUnknown, str)
|
||||
} else if dag.index.NodeStatus(node).KnownInvalid() {
|
||||
} else if dag.index.BlockNodeStatus(node).KnownInvalid() {
|
||||
str := fmt.Sprintf("parent block %s is known to be invalid", parentHash)
|
||||
return nil, ruleError(ErrInvalidAncestorBlock, str)
|
||||
}
|
||||
|
||||
@@ -169,9 +169,15 @@ func (dag *BlockDAG) antiPastBetween(lowHash, highHash *daghash.Hash, maxEntries
|
||||
continue
|
||||
}
|
||||
visited.add(current)
|
||||
isCurrentAncestorOfLowNode, err := dag.isInPast(current, lowNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var isCurrentAncestorOfLowNode bool
|
||||
if current == lowNode {
|
||||
isCurrentAncestorOfLowNode = false
|
||||
} else {
|
||||
var err error
|
||||
isCurrentAncestorOfLowNode, err = dag.isInPast(current, lowNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if isCurrentAncestorOfLowNode {
|
||||
continue
|
||||
|
||||
@@ -16,12 +16,12 @@ import (
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// blockStatus is a bit field representing the validation state of the block.
|
||||
// blockStatus is representing the validation state of the block.
|
||||
type blockStatus byte
|
||||
|
||||
const (
|
||||
// statusDataStored indicates that the block's payload is stored on disk.
|
||||
statusDataStored blockStatus = 1 << iota
|
||||
statusDataStored blockStatus = iota
|
||||
|
||||
// statusValid indicates that the block has been fully validated.
|
||||
statusValid
|
||||
@@ -32,12 +32,33 @@ const (
|
||||
// statusInvalidAncestor indicates that one of the block's ancestors has
|
||||
// has failed validation, thus the block is also invalid.
|
||||
statusInvalidAncestor
|
||||
|
||||
// statusUTXOPendingVerification indicates that the block is pending verification against its past UTXO-Set, either
|
||||
// because it was not yet verified since the block was never in the selected parent chain, or if the
|
||||
// block violates finality.
|
||||
statusUTXOPendingVerification
|
||||
|
||||
// statusDisqualifiedFromChain indicates that the block is not eligible to be a selected parent.
|
||||
statusDisqualifiedFromChain
|
||||
)
|
||||
|
||||
var blockStatusToString = map[blockStatus]string{
|
||||
statusDataStored: "statusDataStored",
|
||||
statusValid: "statusValid",
|
||||
statusValidateFailed: "statusValidateFailed",
|
||||
statusInvalidAncestor: "statusInvalidAncestor",
|
||||
statusUTXOPendingVerification: "statusUTXOPendingVerification",
|
||||
statusDisqualifiedFromChain: "statusDisqualifiedFromChain",
|
||||
}
|
||||
|
||||
func (status blockStatus) String() string {
|
||||
return blockStatusToString[status]
|
||||
}
|
||||
|
||||
// KnownValid returns whether the block is known to be valid. This will return
|
||||
// false for a valid block that has not been fully validated yet.
|
||||
func (status blockStatus) KnownValid() bool {
|
||||
return status&statusValid != 0
|
||||
return status == statusValid
|
||||
}
|
||||
|
||||
// KnownInvalid returns whether the block is known to be invalid. This may be
|
||||
@@ -45,7 +66,7 @@ func (status blockStatus) KnownValid() bool {
|
||||
// invalid. This will return false for invalid blocks that have not been proven
|
||||
// invalid yet.
|
||||
func (status blockStatus) KnownInvalid() bool {
|
||||
return status&(statusValidateFailed|statusInvalidAncestor) != 0
|
||||
return status == statusValidateFailed || status == statusInvalidAncestor
|
||||
}
|
||||
|
||||
// blockNode represents a block within the block DAG. The DAG is stored into
|
||||
@@ -58,6 +79,9 @@ type blockNode struct {
|
||||
// hundreds of thousands of these in memory, so a few extra bytes of
|
||||
// padding adds up.
|
||||
|
||||
// dag is the blockDAG in which this node resides
|
||||
dag *BlockDAG
|
||||
|
||||
// parents is the parent blocks for this node.
|
||||
parents blockSet
|
||||
|
||||
@@ -68,9 +92,12 @@ type blockNode struct {
|
||||
// children are all the blocks that refer to this block as a parent
|
||||
children blockSet
|
||||
|
||||
// blues are all blue blocks in this block's worldview that are in its selected parent anticone
|
||||
// blues are all blue blocks in this block's worldview that are in its merge set
|
||||
blues []*blockNode
|
||||
|
||||
// reds are all red blocks in this block's worldview that are in its merge set
|
||||
reds []*blockNode
|
||||
|
||||
// blueScore is the count of all the blue blocks in this block's past
|
||||
blueScore uint64
|
||||
|
||||
@@ -81,7 +108,7 @@ type blockNode struct {
|
||||
// hash is the double sha 256 of the block.
|
||||
hash *daghash.Hash
|
||||
|
||||
// Some fields from block headers to aid in reconstructing headers
|
||||
// Some fields from block headers to aid in reconstructing headers
|
||||
// from memory. These must be treated as immutable and are intentionally
|
||||
// ordered to avoid padding on 64-bit platforms.
|
||||
version int32
|
||||
@@ -94,12 +121,9 @@ type blockNode struct {
|
||||
|
||||
// status is a bitfield representing the validation state of the block. The
|
||||
// status field, unlike the other fields, may be written to and so should
|
||||
// only be accessed using the concurrent-safe NodeStatus method on
|
||||
// only be accessed using the concurrent-safe BlockNodeStatus method on
|
||||
// blockIndex once the node has been added to the global index.
|
||||
status blockStatus
|
||||
|
||||
// isFinalized determines whether the node is below the finality point.
|
||||
isFinalized bool
|
||||
}
|
||||
|
||||
// newBlockNode returns a new block node for the given block header and parents, and the
|
||||
@@ -108,6 +132,7 @@ type blockNode struct {
|
||||
// This function is NOT safe for concurrent access.
|
||||
func (dag *BlockDAG) newBlockNode(blockHeader *appmessage.BlockHeader, parents blockSet) (node *blockNode, selectedParentAnticone []*blockNode) {
|
||||
node = &blockNode{
|
||||
dag: dag,
|
||||
parents: parents,
|
||||
children: make(blockSet),
|
||||
blueScore: math.MaxUint64, // Initialized to the max value to avoid collisions with the genesis block
|
||||
@@ -205,8 +230,8 @@ func (node *blockNode) RelativeAncestor(distance uint64) *blockNode {
|
||||
// prior to, and including, the block node.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (node *blockNode) PastMedianTime(dag *BlockDAG) mstime.Time {
|
||||
window := blueBlockWindow(node, 2*dag.TimestampDeviationTolerance-1)
|
||||
func (node *blockNode) PastMedianTime() mstime.Time {
|
||||
window := blueBlockWindow(node, 2*node.dag.TimestampDeviationTolerance-1)
|
||||
medianTimestamp, err := window.medianTimestamp()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("blueBlockWindow: %s", err))
|
||||
@@ -214,6 +239,14 @@ func (node *blockNode) PastMedianTime(dag *BlockDAG) mstime.Time {
|
||||
return mstime.UnixMilliseconds(medianTimestamp)
|
||||
}
|
||||
|
||||
func (node *blockNode) selectedParentMedianTime() mstime.Time {
|
||||
medianTime := node.Header().Timestamp
|
||||
if !node.isGenesis() {
|
||||
medianTime = node.selectedParent.PastMedianTime()
|
||||
}
|
||||
return medianTime
|
||||
}
|
||||
|
||||
func (node *blockNode) ParentHashes() []*daghash.Hash {
|
||||
return node.parents.hashes()
|
||||
}
|
||||
@@ -223,8 +256,8 @@ func (node *blockNode) isGenesis() bool {
|
||||
return len(node.parents) == 0
|
||||
}
|
||||
|
||||
func (node *blockNode) finalityScore(dag *BlockDAG) uint64 {
|
||||
return node.blueScore / uint64(dag.FinalityInterval())
|
||||
func (node *blockNode) finalityScore() uint64 {
|
||||
return node.blueScore / node.dag.FinalityInterval()
|
||||
}
|
||||
|
||||
// String returns a string that contains the block hash.
|
||||
@@ -235,3 +268,111 @@ func (node blockNode) String() string {
|
||||
func (node *blockNode) time() mstime.Time {
|
||||
return mstime.UnixMilliseconds(node.timestamp)
|
||||
}
|
||||
|
||||
func (node *blockNode) blockAtDepth(depth uint64) *blockNode {
|
||||
if node.blueScore <= depth { // to prevent overflow of requiredBlueScore
|
||||
depth = node.blueScore
|
||||
}
|
||||
|
||||
current := node
|
||||
requiredBlueScore := node.blueScore - depth
|
||||
|
||||
for current.blueScore >= requiredBlueScore {
|
||||
if current.isGenesis() {
|
||||
return current
|
||||
}
|
||||
current = current.selectedParent
|
||||
}
|
||||
|
||||
return current
|
||||
}
|
||||
|
||||
func (node *blockNode) finalityPoint() *blockNode {
|
||||
return node.blockAtDepth(node.dag.FinalityInterval())
|
||||
}
|
||||
|
||||
func (node *blockNode) hasFinalityPointInOthersSelectedChain(other *blockNode) (bool, error) {
|
||||
finalityPoint := node.finalityPoint()
|
||||
return node.dag.isInSelectedParentChainOf(finalityPoint, other)
|
||||
}
|
||||
|
||||
func (node *blockNode) nonBoundedMergeDepthViolatingBlues() (blockSet, error) {
|
||||
nonBoundedMergeDepthViolatingBlues := newBlockSet()
|
||||
|
||||
for _, blueNode := range node.blues {
|
||||
notViolatingFinality, err := node.hasFinalityPointInOthersSelectedChain(blueNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if notViolatingFinality {
|
||||
nonBoundedMergeDepthViolatingBlues.add(blueNode)
|
||||
}
|
||||
}
|
||||
|
||||
return nonBoundedMergeDepthViolatingBlues, nil
|
||||
}
|
||||
|
||||
func (node *blockNode) checkBoundedMergeDepth() error {
|
||||
nonBoundedMergeDepthViolatingBlues, err := node.nonBoundedMergeDepthViolatingBlues()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
finalityPoint := node.finalityPoint()
|
||||
for _, red := range node.reds {
|
||||
doesRedHaveFinalityPointInPast, err := node.dag.isInPast(finalityPoint, red)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
isRedInPastOfAnyNonFinalityViolatingBlue, err := node.dag.isInPastOfAny(red, nonBoundedMergeDepthViolatingBlues)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !doesRedHaveFinalityPointInPast && !isRedInPastOfAnyNonFinalityViolatingBlue {
|
||||
return ruleError(ErrViolatingBoundedMergeDepth, "block is violating bounded merge depth")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *blockNode) isViolatingFinality() (bool, error) {
|
||||
if node.isGenesis() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if node.dag.virtual.less(node) {
|
||||
isVirtualFinalityPointInNodesSelectedChain, err := node.dag.isInSelectedParentChainOf(
|
||||
node.dag.virtual.finalityPoint(), node.selectedParent) // use node.selectedParent because node still doesn't have reachability data
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !isVirtualFinalityPointInNodesSelectedChain {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (node *blockNode) checkMergeSizeLimit() error {
|
||||
mergeSetSize := len(node.reds) + len(node.blues)
|
||||
|
||||
if mergeSetSize > mergeSetSizeLimit {
|
||||
return ruleError(ErrViolatingMergeLimit,
|
||||
fmt.Sprintf("The block merges %d blocks > %d merge set size limit", mergeSetSize, mergeSetSizeLimit))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *blockNode) hasValidChildren() bool {
|
||||
for child := range node.children {
|
||||
if node.dag.index.BlockNodeStatus(child) == statusValid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// This test is to ensure the size BlueAnticoneSizesSize is serialized to the size of KType.
|
||||
|
||||
@@ -36,7 +36,7 @@ func (bs blockSet) remove(node *blockNode) {
|
||||
|
||||
// clone clones thie block set
|
||||
func (bs blockSet) clone() blockSet {
|
||||
clone := newBlockSet()
|
||||
clone := make(blockSet, len(bs))
|
||||
for node := range bs {
|
||||
clone.add(node)
|
||||
}
|
||||
@@ -104,14 +104,48 @@ func (bs blockSet) String() string {
|
||||
|
||||
func (bs blockSet) bluest() *blockNode {
|
||||
var bluestNode *blockNode
|
||||
var maxScore uint64
|
||||
for node := range bs {
|
||||
if bluestNode == nil ||
|
||||
node.blueScore > maxScore ||
|
||||
(node.blueScore == maxScore && daghash.Less(node.hash, bluestNode.hash)) {
|
||||
if bluestNode == nil || bluestNode.less(node) {
|
||||
bluestNode = node
|
||||
maxScore = node.blueScore
|
||||
}
|
||||
}
|
||||
return bluestNode
|
||||
}
|
||||
|
||||
func (bs blockSet) isEqual(other blockSet) bool {
|
||||
if len(bs) != len(other) {
|
||||
return false
|
||||
}
|
||||
|
||||
for node := range bs {
|
||||
if !other.contains(node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (bs blockSet) areAllIn(other blockSet) bool {
|
||||
for node := range bs {
|
||||
if !other.contains(node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// isOnlyGenesis returns true if the only block in this blockSet is the genesis block
|
||||
func (bs blockSet) isOnlyGenesis() bool {
|
||||
if len(bs) != 1 {
|
||||
return false
|
||||
}
|
||||
for node := range bs {
|
||||
if node.isGenesis() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -243,3 +243,68 @@ func TestBlockSetUnion(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSetAreAllIn(t *testing.T) {
|
||||
node1 := &blockNode{hash: &daghash.Hash{10}}
|
||||
node2 := &blockNode{hash: &daghash.Hash{20}}
|
||||
node3 := &blockNode{hash: &daghash.Hash{30}}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
set blockSet
|
||||
other blockSet
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
name: "two empty sets",
|
||||
set: blockSetFromSlice(),
|
||||
other: blockSetFromSlice(),
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "set empty, other full",
|
||||
set: blockSetFromSlice(),
|
||||
other: blockSetFromSlice(node1, node2, node3),
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "set full, other empty",
|
||||
set: blockSetFromSlice(node1, node2, node3),
|
||||
other: blockSetFromSlice(),
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "same node in both",
|
||||
set: blockSetFromSlice(node1),
|
||||
other: blockSetFromSlice(node1),
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "different node in both",
|
||||
set: blockSetFromSlice(node1),
|
||||
other: blockSetFromSlice(node2),
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
name: "set is subset of other",
|
||||
set: blockSetFromSlice(node1, node2),
|
||||
other: blockSetFromSlice(node2, node1, node3),
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "other is subset of set",
|
||||
set: blockSetFromSlice(node2, node1, node3),
|
||||
other: blockSetFromSlice(node1, node2),
|
||||
expectedResult: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := test.set.areAllIn(test.other)
|
||||
|
||||
if result != test.expectedResult {
|
||||
t.Errorf("blockSet.areAllIn: unexpected result in test '%s'. "+
|
||||
"Expected: '%t', got: '%t'", test.name, test.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestBlueBlockWindow(t *testing.T) {
|
||||
@@ -53,12 +54,12 @@ func TestBlueBlockWindow(t *testing.T) {
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
id: "E",
|
||||
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"C", "D"},
|
||||
id: "F",
|
||||
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"A"},
|
||||
@@ -73,37 +74,37 @@ func TestBlueBlockWindow(t *testing.T) {
|
||||
{
|
||||
parents: []string{"H", "F"},
|
||||
id: "I",
|
||||
expectedWindowWithGenesisPadding: []string{"F", "C", "D", "B", "A", "A", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"F", "D", "C", "B", "A", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"I"},
|
||||
id: "J",
|
||||
expectedWindowWithGenesisPadding: []string{"I", "F", "C", "D", "B", "A", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"I", "F", "D", "C", "B", "A", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"J"},
|
||||
id: "K",
|
||||
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "C", "D", "B", "A", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "D", "C", "B", "A", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"K"},
|
||||
id: "L",
|
||||
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "C", "D", "B", "A", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "D", "C", "B", "A", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"L"},
|
||||
id: "M",
|
||||
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "C", "D", "B", "A", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "D", "C", "B", "A", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"M"},
|
||||
id: "N",
|
||||
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "C", "D", "B", "A"},
|
||||
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "D", "C", "B", "A"},
|
||||
},
|
||||
{
|
||||
parents: []string{"N"},
|
||||
id: "O",
|
||||
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "B"},
|
||||
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "B"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -117,13 +118,13 @@ func TestBlueBlockWindow(t *testing.T) {
|
||||
|
||||
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("block %v got unexpected error from PrepareBlockForTest: %v", blockData.id, err)
|
||||
t.Fatalf("block %v got unexpected error from PrepareBlockForTest: %+v", blockData.id, err)
|
||||
}
|
||||
|
||||
utilBlock := util.NewBlock(block)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("dag.ProcessBlock got unexpected error for block %v: %v", blockData.id, err)
|
||||
t.Fatalf("dag.ProcessBlock got unexpected error for block %v: %+v", blockData.id, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("block %s "+
|
||||
|
||||
@@ -1,98 +1,15 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/coinbasepayload"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/kaspanet/kaspad/util/txsort"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// compactFeeData is a specialized data type to store a compact list of fees
|
||||
// inside a block.
|
||||
// Every transaction gets a single uint64 value, stored as a plain binary list.
|
||||
// The transactions are ordered the same way they are ordered inside the block, making it easy
|
||||
// to traverse every transaction in a block and extract its fee.
|
||||
//
|
||||
// compactFeeFactory is used to create such a list.
|
||||
// compactFeeIterator is used to iterate over such a list.
|
||||
|
||||
type compactFeeData []byte
|
||||
|
||||
func (cfd compactFeeData) Len() int {
|
||||
return len(cfd) / 8
|
||||
}
|
||||
|
||||
type compactFeeFactory struct {
|
||||
buffer *bytes.Buffer
|
||||
writer *bufio.Writer
|
||||
}
|
||||
|
||||
func newCompactFeeFactory() *compactFeeFactory {
|
||||
buffer := bytes.NewBuffer([]byte{})
|
||||
return &compactFeeFactory{
|
||||
buffer: buffer,
|
||||
writer: bufio.NewWriter(buffer),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) add(txFee uint64) error {
|
||||
return binary.Write(cfw.writer, binary.LittleEndian, txFee)
|
||||
}
|
||||
|
||||
func (cfw *compactFeeFactory) data() (compactFeeData, error) {
|
||||
err := cfw.writer.Flush()
|
||||
|
||||
return compactFeeData(cfw.buffer.Bytes()), err
|
||||
}
|
||||
|
||||
type compactFeeIterator struct {
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func (cfd compactFeeData) iterator() *compactFeeIterator {
|
||||
return &compactFeeIterator{
|
||||
reader: bufio.NewReader(bytes.NewBuffer(cfd)),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfr *compactFeeIterator) next() (uint64, error) {
|
||||
var txFee uint64
|
||||
|
||||
err := binary.Read(cfr.reader, binary.LittleEndian, &txFee)
|
||||
|
||||
return txFee, err
|
||||
}
|
||||
|
||||
// The following functions relate to storing and retrieving fee data from the database
|
||||
|
||||
// getBluesFeeData returns the compactFeeData for all nodes's blues,
|
||||
// used to calculate the fees this blockNode needs to pay
|
||||
func (dag *BlockDAG) getBluesFeeData(node *blockNode) (map[daghash.Hash]compactFeeData, error) {
|
||||
bluesFeeData := make(map[daghash.Hash]compactFeeData)
|
||||
|
||||
for _, blueBlock := range node.blues {
|
||||
feeData, err := dbaccess.FetchFeeData(dag.databaseContext, blueBlock.hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bluesFeeData[*blueBlock.hash] = feeData
|
||||
}
|
||||
|
||||
return bluesFeeData, nil
|
||||
}
|
||||
|
||||
// The following functions deal with building and validating the coinbase transaction
|
||||
|
||||
func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
|
||||
if node.isGenesis() {
|
||||
return nil
|
||||
@@ -105,7 +22,7 @@ func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Bl
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expectedCoinbaseTransaction, err := node.expectedCoinbaseTransaction(dag, txsAcceptanceData, scriptPubKey, extraData)
|
||||
expectedCoinbaseTransaction, err := node.expectedCoinbaseTransaction(txsAcceptanceData, scriptPubKey, extraData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -118,17 +35,12 @@ func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Bl
|
||||
}
|
||||
|
||||
// expectedCoinbaseTransaction returns the coinbase transaction for the current block
|
||||
func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData, scriptPubKey []byte, extraData []byte) (*util.Tx, error) {
|
||||
bluesFeeData, err := dag.getBluesFeeData(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (node *blockNode) expectedCoinbaseTransaction(txsAcceptanceData MultiBlockTxsAcceptanceData, scriptPubKey []byte, extraData []byte) (*util.Tx, error) {
|
||||
txIns := []*appmessage.TxIn{}
|
||||
txOuts := []*appmessage.TxOut{}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
txOut, err := coinbaseOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData)
|
||||
txOut, err := coinbaseOutputForBlueBlock(node.dag, blue, txsAcceptanceData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -148,33 +60,18 @@ func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceD
|
||||
// coinbaseOutputForBlueBlock calculates the output that should go into the coinbase transaction of blueBlock
|
||||
// If blueBlock gets no fee - returns nil for txOut
|
||||
func coinbaseOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (*appmessage.TxOut, error) {
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData) (*appmessage.TxOut, error) {
|
||||
|
||||
blockTxsAcceptanceData, ok := txsAcceptanceData.FindAcceptanceData(blueBlock.hash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
|
||||
}
|
||||
blockFeeData, ok := feeData[*blueBlock.hash]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("No feeData for block %s", blueBlock.hash)
|
||||
}
|
||||
|
||||
if len(blockTxsAcceptanceData.TxAcceptanceData) != blockFeeData.Len() {
|
||||
return nil, errors.Errorf(
|
||||
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
|
||||
len(blockTxsAcceptanceData.TxAcceptanceData), blockFeeData.Len(), blueBlock.hash)
|
||||
}
|
||||
|
||||
totalFees := uint64(0)
|
||||
feeIterator := blockFeeData.iterator()
|
||||
|
||||
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
|
||||
fee, err := feeIterator.next()
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
|
||||
}
|
||||
if txAcceptanceData.IsAccepted {
|
||||
totalFees += fee
|
||||
totalFees += txAcceptanceData.Fee
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,5 +113,5 @@ func (dag *BlockDAG) NextBlockCoinbaseTransactionNoLock(scriptPubKey []byte, ext
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dag.virtual.blockNode.expectedCoinbaseTransaction(dag, txsAcceptanceData, scriptPubKey, extraData)
|
||||
return dag.virtual.blockNode.expectedCoinbaseTransaction(txsAcceptanceData, scriptPubKey, extraData)
|
||||
}
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"io"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFeeAccumulators(t *testing.T) {
|
||||
fees := []uint64{1, 2, 3, 4, 5, 6, 7, 0xffffffffffffffff}
|
||||
|
||||
factory := newCompactFeeFactory()
|
||||
|
||||
for _, fee := range fees {
|
||||
err := factory.add(fee)
|
||||
if err != nil {
|
||||
t.Fatalf("Error writing %d as tx fee: %s", fee, err)
|
||||
}
|
||||
}
|
||||
|
||||
expectedData := compactFeeData{
|
||||
1, 0, 0, 0, 0, 0, 0, 0,
|
||||
2, 0, 0, 0, 0, 0, 0, 0,
|
||||
3, 0, 0, 0, 0, 0, 0, 0,
|
||||
4, 0, 0, 0, 0, 0, 0, 0,
|
||||
5, 0, 0, 0, 0, 0, 0, 0,
|
||||
6, 0, 0, 0, 0, 0, 0, 0,
|
||||
7, 0, 0, 0, 0, 0, 0, 0,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
actualData, err := factory.data()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting bytes from writer: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(expectedData, actualData) {
|
||||
t.Errorf("Expected bytes: %v, but got: %v", expectedData, actualData)
|
||||
}
|
||||
|
||||
iterator := actualData.iterator()
|
||||
|
||||
for i, expectedFee := range fees {
|
||||
actualFee, err := iterator.next()
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting fee for Tx#%d: %s", i, err)
|
||||
}
|
||||
|
||||
if actualFee != expectedFee {
|
||||
t.Errorf("Tx #%d: Expected fee: %d, but got %d", i, expectedFee, actualFee)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = iterator.next()
|
||||
if err == nil {
|
||||
t.Fatal("No error from iterator.nextTxFee after done reading all transactions")
|
||||
}
|
||||
if err != io.EOF {
|
||||
t.Fatalf("Error from iterator.nextTxFee after done reading all transactions is not io.EOF: %s", err)
|
||||
}
|
||||
}
|
||||
@@ -78,7 +78,7 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoSet.utxoCollection[appmessage.Outpoint{TxID: txID, Index: index}] = entry
|
||||
utxoSet.utxoCache[appmessage.Outpoint{TxID: txID, Index: index}] = entry
|
||||
}
|
||||
|
||||
return utxoSet, nil
|
||||
|
||||
@@ -9,13 +9,6 @@ import (
|
||||
|
||||
// Config is a descriptor which specifies the blockDAG instance configuration.
|
||||
type Config struct {
|
||||
// Interrupt specifies a channel the caller can close to signal that
|
||||
// long running operations, such as catching up indexes or performing
|
||||
// database migrations, should be interrupted.
|
||||
//
|
||||
// This field can be nil if the caller does not desire the behavior.
|
||||
Interrupt <-chan struct{}
|
||||
|
||||
// DAGParams identifies which DAG parameters the DAG is associated
|
||||
// with.
|
||||
//
|
||||
@@ -51,4 +44,8 @@ type Config struct {
|
||||
// DatabaseContext is the context in which all database queries related to
|
||||
// this DAG are going to run.
|
||||
DatabaseContext *dbaccess.DatabaseContext
|
||||
|
||||
// MaxUTXOCacheSize is the Max size of loaded UTXO into ram from the disk in bytes
|
||||
// to support UTXO lazy-load
|
||||
MaxUTXOCacheSize uint64
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ func (dag *BlockDAG) blockConfirmations(node *blockNode) (uint64, error) {
|
||||
// the given node
|
||||
func (dag *BlockDAG) acceptingBlock(node *blockNode) (*blockNode, error) {
|
||||
// Return an error if the node is the virtual block
|
||||
if node == &dag.virtual.blockNode {
|
||||
if node == dag.virtual.blockNode {
|
||||
return nil, errors.New("cannot get acceptingBlock for virtual")
|
||||
}
|
||||
|
||||
|
||||
@@ -6,12 +6,11 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"sync"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
@@ -87,8 +86,6 @@ type BlockDAG struct {
|
||||
notificationsLock sync.RWMutex
|
||||
notifications []NotificationCallback
|
||||
|
||||
lastFinalityPoint *blockNode
|
||||
|
||||
utxoDiffStore *utxoDiffStore
|
||||
multisetStore *multisetStore
|
||||
|
||||
@@ -96,6 +93,13 @@ type BlockDAG struct {
|
||||
|
||||
recentBlockProcessingTimestamps []mstime.Time
|
||||
startTime mstime.Time
|
||||
|
||||
maxUTXOCacheSize uint64
|
||||
tips blockSet
|
||||
|
||||
// validTips is a set of blocks with the status "valid", which have no valid descendants.
|
||||
// Note that some validTips might not be actual tips.
|
||||
validTips blockSet
|
||||
}
|
||||
|
||||
// New returns a BlockDAG instance using the provided configuration details.
|
||||
@@ -119,6 +123,7 @@ func New(config *Config) (*BlockDAG, error) {
|
||||
blockCount: 0,
|
||||
subnetworkID: config.SubnetworkID,
|
||||
startTime: mstime.Now(),
|
||||
maxUTXOCacheSize: config.MaxUTXOCacheSize,
|
||||
}
|
||||
|
||||
dag.virtual = newVirtualBlock(dag, nil)
|
||||
@@ -241,7 +246,7 @@ func (dag *BlockDAG) UTXOSet() *FullUTXOSet {
|
||||
|
||||
// CalcPastMedianTime returns the past median time of the DAG.
|
||||
func (dag *BlockDAG) CalcPastMedianTime() mstime.Time {
|
||||
return dag.virtual.tips().bluest().PastMedianTime(dag)
|
||||
return dag.virtual.selectedParent.PastMedianTime()
|
||||
}
|
||||
|
||||
// GetUTXOEntry returns the requested unspent transaction output. The returned
|
||||
@@ -308,7 +313,17 @@ func (dag *BlockDAG) BlockCount() uint64 {
|
||||
|
||||
// TipHashes returns the hashes of the DAG's tips
|
||||
func (dag *BlockDAG) TipHashes() []*daghash.Hash {
|
||||
return dag.virtual.tips().hashes()
|
||||
return dag.tips.hashes()
|
||||
}
|
||||
|
||||
// ValidTipHashes returns the hashes of the DAG's valid tips
|
||||
func (dag *BlockDAG) ValidTipHashes() []*daghash.Hash {
|
||||
return dag.validTips.hashes()
|
||||
}
|
||||
|
||||
// VirtualParentHashes returns the hashes of the virtual block's parents
|
||||
func (dag *BlockDAG) VirtualParentHashes() []*daghash.Hash {
|
||||
return dag.virtual.parents.hashes()
|
||||
}
|
||||
|
||||
// HeaderByHash returns the block header identified by the given hash or an
|
||||
@@ -356,13 +371,50 @@ func (dag *BlockDAG) SelectedParentHash(blockHash *daghash.Hash) (*daghash.Hash,
|
||||
return node.selectedParent.hash, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) isInPast(this *blockNode, other *blockNode) (bool, error) {
|
||||
return dag.reachabilityTree.isInPast(this, other)
|
||||
// isInPast returns true if `node` is in the past of `other`
|
||||
//
|
||||
// Note: this method will return true if `node == other`
|
||||
func (dag *BlockDAG) isInPast(node *blockNode, other *blockNode) (bool, error) {
|
||||
return dag.reachabilityTree.isInPast(node, other)
|
||||
}
|
||||
|
||||
// isInPastOfAny returns true if `node` is in the past of any of `others`
|
||||
//
|
||||
// Note: this method will return true if `node` is in `others`
|
||||
func (dag *BlockDAG) isInPastOfAny(node *blockNode, others blockSet) (bool, error) {
|
||||
for other := range others {
|
||||
isInPast, err := dag.isInPast(node, other)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if isInPast {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// isInPastOfAny returns true if any one of `nodes` is in the past of any of `others`
|
||||
//
|
||||
// Note: this method will return true if `other` is in `nodes`
|
||||
func (dag *BlockDAG) isAnyInPastOf(nodes blockSet, other *blockNode) (bool, error) {
|
||||
for node := range nodes {
|
||||
isInPast, err := dag.isInPast(node, other)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if isInPast {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// GetTopHeaders returns the top appmessage.MaxBlockHeadersPerMsg block headers ordered by blue score.
|
||||
func (dag *BlockDAG) GetTopHeaders(highHash *daghash.Hash, maxHeaders uint64) ([]*appmessage.BlockHeader, error) {
|
||||
highNode := &dag.virtual.blockNode
|
||||
highNode := dag.virtual.blockNode
|
||||
if highHash != nil {
|
||||
var ok bool
|
||||
highNode, ok = dag.index.LookupNode(highHash)
|
||||
@@ -443,5 +495,191 @@ func (dag *BlockDAG) IsKnownInvalid(hash *daghash.Hash) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return dag.index.NodeStatus(node).KnownInvalid()
|
||||
return dag.index.BlockNodeStatus(node).KnownInvalid()
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) addTip(tip *blockNode) (
|
||||
didVirtualParentsChange bool, chainUpdates *selectedParentChainUpdates, err error) {
|
||||
|
||||
newTips := dag.tips.clone()
|
||||
for parent := range tip.parents {
|
||||
newTips.remove(parent)
|
||||
}
|
||||
|
||||
newTips.add(tip)
|
||||
|
||||
return dag.setTips(newTips)
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) setTips(newTips blockSet) (
|
||||
didVirtualParentsChange bool, chainUpdates *selectedParentChainUpdates, err error) {
|
||||
|
||||
didVirtualParentsChange, chainUpdates, err = dag.updateVirtualParents(newTips, dag.virtual.finalityPoint())
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
dag.tips = newTips
|
||||
|
||||
return didVirtualParentsChange, chainUpdates, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) updateVirtualParents(newTips blockSet, finalityPoint *blockNode) (
|
||||
didVirtualParentsChange bool, chainUpdates *selectedParentChainUpdates, err error) {
|
||||
|
||||
var newVirtualParents blockSet
|
||||
// If only genesis is the newTips - we are still initializing the DAG and not all structures required
|
||||
// for calling dag.selectVirtualParents have been initialized yet.
|
||||
// Specifically - this function would be called with finalityPoint = dag.virtual.finalityPoint(), which has
|
||||
// not been initialized to anything real yet.
|
||||
//
|
||||
// Therefore, in this case - simply pick genesis as virtual's only parent
|
||||
if newTips.isOnlyGenesis() {
|
||||
newVirtualParents = newTips
|
||||
} else {
|
||||
newVirtualParents, err = dag.selectVirtualParents(newTips, finalityPoint)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
oldVirtualParents := dag.virtual.parents
|
||||
didVirtualParentsChange = !oldVirtualParents.isEqual(newVirtualParents)
|
||||
|
||||
if !didVirtualParentsChange {
|
||||
return false, &selectedParentChainUpdates{}, nil
|
||||
}
|
||||
|
||||
oldSelectedParent := dag.virtual.selectedParent
|
||||
dag.virtual.blockNode, _ = dag.newBlockNode(nil, newVirtualParents)
|
||||
chainUpdates = dag.virtual.updateSelectedParentSet(oldSelectedParent)
|
||||
|
||||
return didVirtualParentsChange, chainUpdates, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) addValidTip(newValidTip *blockNode) error {
|
||||
newValidTips := dag.validTips.clone()
|
||||
for validTip := range dag.validTips {
|
||||
// We use isInPastOfAny on newValidTip.parents instead of
|
||||
// isInPast on newValidTip because newValidTip does not
|
||||
// necessarily have reachability data associated with it yet.
|
||||
isInPastOfNewValidTip, err := dag.isInPastOfAny(validTip, newValidTip.parents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isInPastOfNewValidTip {
|
||||
newValidTips.remove(validTip)
|
||||
}
|
||||
}
|
||||
|
||||
newValidTips.add(newValidTip)
|
||||
|
||||
dag.validTips = newValidTips
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) selectVirtualParents(tips blockSet, finalityPoint *blockNode) (blockSet, error) {
|
||||
selected := newBlockSet()
|
||||
|
||||
candidatesHeap := newDownHeap()
|
||||
candidatesHeap.pushSet(tips)
|
||||
|
||||
// If the first candidate has been disqualified from the chain or violates finality -
|
||||
// it cannot be virtual's parent, since it will make it virtual's selectedParent - disqualifying virtual itself.
|
||||
// Therefore, in such a case we remove it from the list of virtual parent candidates, and replace with
|
||||
// its parents that have no disqualified children
|
||||
disqualifiedCandidates := newBlockSet()
|
||||
for {
|
||||
if candidatesHeap.Len() == 0 {
|
||||
return nil, errors.New("virtual has no valid parent candidates")
|
||||
}
|
||||
selectedParentCandidate := candidatesHeap.pop()
|
||||
|
||||
if dag.index.BlockNodeStatus(selectedParentCandidate) == statusValid {
|
||||
isFinalityPointInSelectedParentChain, err := dag.isInSelectedParentChainOf(finalityPoint, selectedParentCandidate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isFinalityPointInSelectedParentChain {
|
||||
selected.add(selectedParentCandidate)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
disqualifiedCandidates.add(selectedParentCandidate)
|
||||
|
||||
for parent := range selectedParentCandidate.parents {
|
||||
if parent.children.areAllIn(disqualifiedCandidates) {
|
||||
candidatesHeap.Push(parent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mergeSetSize := 1 // starts counting from 1 because selectedParent is already in the mergeSet
|
||||
|
||||
for len(selected) < appmessage.MaxBlockParents && candidatesHeap.Len() > 0 {
|
||||
candidate := candidatesHeap.pop()
|
||||
|
||||
// check that the candidate doesn't increase the virtual's merge set over `mergeSetSizeLimit`
|
||||
mergeSetIncrease, err := dag.mergeSetIncrease(candidate, selected)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if mergeSetSize+mergeSetIncrease > mergeSetSizeLimit {
|
||||
continue
|
||||
}
|
||||
|
||||
selected.add(candidate)
|
||||
mergeSetSize += mergeSetIncrease
|
||||
}
|
||||
|
||||
tempVirtual, _ := dag.newBlockNode(nil, selected)
|
||||
|
||||
boundedMergeBreakingParents, err := dag.boundedMergeBreakingParents(tempVirtual)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selected = selected.subtract(boundedMergeBreakingParents)
|
||||
|
||||
return selected, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) mergeSetIncrease(candidate *blockNode, selected blockSet) (int, error) {
|
||||
visited := newBlockSet()
|
||||
queue := newDownHeap()
|
||||
queue.Push(candidate)
|
||||
mergeSetIncrease := 1 // starts with 1 for the candidate itself
|
||||
|
||||
for queue.Len() > 0 {
|
||||
current := queue.pop()
|
||||
isInPastOfSelected, err := dag.isInPastOfAny(current, selected)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if isInPastOfSelected {
|
||||
continue
|
||||
}
|
||||
mergeSetIncrease++
|
||||
|
||||
for parent := range current.parents {
|
||||
if !visited.contains(parent) {
|
||||
visited.add(parent)
|
||||
queue.Push(parent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return mergeSetIncrease, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) saveState(dbTx *dbaccess.TxContext) error {
|
||||
state := &dagState{
|
||||
TipHashes: dag.TipHashes(),
|
||||
ValidTipHashes: dag.ValidTipHashes(),
|
||||
VirtualParentsHashes: dag.VirtualParentHashes(),
|
||||
LocalSubnetworkID: dag.subnetworkID,
|
||||
}
|
||||
return saveDAGState(dbTx, state)
|
||||
}
|
||||
|
||||
@@ -14,15 +14,15 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domain/txscript"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestBlockCount(t *testing.T) {
|
||||
@@ -237,19 +237,41 @@ func TestIsKnownBlock(t *testing.T) {
|
||||
// the returned SequenceLocks are correct for each test instance.
|
||||
func TestCalcSequenceLock(t *testing.T) {
|
||||
netParams := &dagconfig.SimnetParams
|
||||
|
||||
blockVersion := int32(0x10000000)
|
||||
dag, teardownFunc, err := DAGSetup("TestCalcSequenceLock", true, Config{
|
||||
DAGParams: netParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error in DAGSetup: %+v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Generate enough synthetic blocks for the rest of the test
|
||||
dag := newTestDAG(netParams)
|
||||
node := dag.selectedTip()
|
||||
blockTime := node.Header().Timestamp
|
||||
numBlocksToGenerate := 5
|
||||
for i := 0; i < numBlocksToGenerate; i++ {
|
||||
blockTime = blockTime.Add(time.Second)
|
||||
node = newTestNode(dag, blockSetFromSlice(node), blockVersion, 0, blockTime)
|
||||
dag.index.AddNode(node)
|
||||
dag.virtual.SetTips(blockSetFromSlice(node))
|
||||
parents := blockSetFromSlice(node)
|
||||
|
||||
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("block No. %d got unexpected error from PrepareBlockForTest: %+v", i, err)
|
||||
}
|
||||
block.Header.Timestamp = blockTime.Add(time.Second)
|
||||
|
||||
utilBlock := util.NewBlock(block)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("block No. %d got unexpected error from ProcessBlock: %+v", i, err)
|
||||
}
|
||||
if isOrphan || isDelayed {
|
||||
t.Fatalf("Block No. %d is unexpectadly orphan: %t or delayed: %t", i, isOrphan, isDelayed)
|
||||
}
|
||||
|
||||
var ok bool
|
||||
node, ok = dag.index.LookupNode(block.BlockHash())
|
||||
if !ok {
|
||||
t.Errorf("Block No. %d not found in index after adding to dag", i)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a utxo view with a fake utxo for the inputs used in the
|
||||
@@ -257,7 +279,10 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
// age of 4 blocks.
|
||||
msgTx := appmessage.NewNativeMsgTx(appmessage.TxVersion, nil, []*appmessage.TxOut{{ScriptPubKey: nil, Value: 10}})
|
||||
targetTx := util.NewTx(msgTx)
|
||||
utxoSet := NewFullUTXOSet()
|
||||
fullUTXOCacheSize := config.DefaultConfig().MaxUTXOCacheSize
|
||||
db, teardown := prepareDatabaseForTest(t, "TestCalcSequenceLock")
|
||||
defer teardown()
|
||||
utxoSet := NewFullUTXOSetFromContext(db, fullUTXOCacheSize)
|
||||
blueScore := uint64(numBlocksToGenerate) - 4
|
||||
if isAccepted, err := utxoSet.AddTx(targetTx.MsgTx(), blueScore); err != nil {
|
||||
t.Fatalf("AddTx unexpectedly failed. Error: %s", err)
|
||||
@@ -279,13 +304,13 @@ func TestCalcSequenceLock(t *testing.T) {
|
||||
// Obtain the past median time from the PoV of the input created above.
|
||||
// The past median time for the input is the past median time from the PoV
|
||||
// of the block *prior* to the one that included it.
|
||||
medianTime := node.RelativeAncestor(5).PastMedianTime(dag).UnixMilliseconds()
|
||||
medianTime := node.RelativeAncestor(5).PastMedianTime().UnixMilliseconds()
|
||||
|
||||
// The median time calculated from the PoV of the best block in the
|
||||
// test DAG. For unconfirmed inputs, this value will be used since
|
||||
// the MTP will be calculated from the PoV of the yet-to-be-mined
|
||||
// block.
|
||||
nextMedianTime := node.PastMedianTime(dag).UnixMilliseconds()
|
||||
nextMedianTime := node.PastMedianTime().UnixMilliseconds()
|
||||
nextBlockBlueScore := int32(numBlocksToGenerate) + 1
|
||||
|
||||
// Add an additional transaction which will serve as our unconfirmed
|
||||
@@ -538,7 +563,7 @@ func TestCalcPastMedianTime(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
millisecondsSinceGenesis := nodes[test.blockNumber].PastMedianTime(dag).UnixMilliseconds() -
|
||||
millisecondsSinceGenesis := nodes[test.blockNumber].PastMedianTime().UnixMilliseconds() -
|
||||
dag.genesis.Header().Timestamp.UnixMilliseconds()
|
||||
|
||||
if millisecondsSinceGenesis != test.expectedMillisecondsSinceGenesis {
|
||||
@@ -670,7 +695,7 @@ func TestAcceptingInInit(t *testing.T) {
|
||||
t.Fatalf("block %s does not exist in the DAG", testBlock.Hash())
|
||||
}
|
||||
|
||||
if testNode.status&statusValid == 0 {
|
||||
if testNode.status != statusValid {
|
||||
t.Fatalf("testNode is unexpectedly invalid")
|
||||
}
|
||||
}
|
||||
@@ -735,7 +760,7 @@ func TestConfirmations(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check that each of the tips has a 0 confirmations
|
||||
tips := dag.virtual.tips()
|
||||
tips := dag.tips
|
||||
for tip := range tips {
|
||||
tipConfirmations, err := dag.blockConfirmations(tip)
|
||||
if err != nil {
|
||||
@@ -904,119 +929,6 @@ func TestAcceptingBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalizeNodesBelowFinalityPoint(t *testing.T) {
|
||||
testFinalizeNodesBelowFinalityPoint(t, true)
|
||||
testFinalizeNodesBelowFinalityPoint(t, false)
|
||||
}
|
||||
|
||||
func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("testFinalizeNodesBelowFinalityPoint", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
blockVersion := int32(0x10000000)
|
||||
blockTime := dag.genesis.Header().Timestamp
|
||||
|
||||
flushUTXODiffStore := func() {
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open database transaction: %s", err)
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = dag.utxoDiffStore.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
|
||||
}
|
||||
dag.utxoDiffStore.clearDirtyEntries()
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to commit database transaction: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
addNode := func(parent *blockNode) *blockNode {
|
||||
blockTime = blockTime.Add(time.Second)
|
||||
node := newTestNode(dag, blockSetFromSlice(parent), blockVersion, 0, blockTime)
|
||||
node.updateParentsChildren()
|
||||
dag.index.AddNode(node)
|
||||
|
||||
// Put dummy diff data in dag.utxoDiffStore
|
||||
err := dag.utxoDiffStore.setBlockDiff(node, NewUTXODiff())
|
||||
if err != nil {
|
||||
t.Fatalf("setBlockDiff: %s", err)
|
||||
}
|
||||
flushUTXODiffStore()
|
||||
return node
|
||||
}
|
||||
finalityInterval := dag.FinalityInterval()
|
||||
nodes := make([]*blockNode, 0, finalityInterval)
|
||||
currentNode := dag.genesis
|
||||
nodes = append(nodes, currentNode)
|
||||
for i := uint64(0); i <= finalityInterval*2; i++ {
|
||||
currentNode = addNode(currentNode)
|
||||
nodes = append(nodes, currentNode)
|
||||
}
|
||||
|
||||
// Manually set the last finality point
|
||||
dag.lastFinalityPoint = nodes[finalityInterval-1]
|
||||
|
||||
// Don't unload diffData
|
||||
currentDifference := maxBlueScoreDifferenceToKeepLoaded
|
||||
maxBlueScoreDifferenceToKeepLoaded = math.MaxUint64
|
||||
defer func() { maxBlueScoreDifferenceToKeepLoaded = currentDifference }()
|
||||
|
||||
dag.finalizeNodesBelowFinalityPoint(deleteDiffData)
|
||||
flushUTXODiffStore()
|
||||
|
||||
for _, node := range nodes[:finalityInterval-1] {
|
||||
if !node.isFinalized {
|
||||
t.Errorf("Node with blue score %d expected to be finalized", node.blueScore)
|
||||
}
|
||||
if _, ok := dag.utxoDiffStore.loaded[node]; deleteDiffData && ok {
|
||||
t.Errorf("The diff data of node with blue score %d should have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData)
|
||||
} else if !deleteDiffData && !ok {
|
||||
t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData)
|
||||
}
|
||||
|
||||
_, err := dag.utxoDiffStore.diffDataFromDB(node.hash)
|
||||
exists := !dbaccess.IsNotFoundError(err)
|
||||
if exists && err != nil {
|
||||
t.Errorf("diffDataFromDB: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if deleteDiffData && exists {
|
||||
t.Errorf("The diff data of node with blue score %d should have been deleted from the database if deleteDiffData is %T", node.blueScore, deleteDiffData)
|
||||
continue
|
||||
}
|
||||
|
||||
if !deleteDiffData && !exists {
|
||||
t.Errorf("The diff data of node with blue score %d shouldn't have been deleted from the database if deleteDiffData is %T", node.blueScore, deleteDiffData)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for _, node := range nodes[finalityInterval-1:] {
|
||||
if node.isFinalized {
|
||||
t.Errorf("Node with blue score %d wasn't expected to be finalized", node.blueScore)
|
||||
}
|
||||
if _, ok := dag.utxoDiffStore.loaded[node]; !ok {
|
||||
t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded", node.blueScore)
|
||||
}
|
||||
if diffData, err := dag.utxoDiffStore.diffDataFromDB(node.hash); err != nil {
|
||||
t.Errorf("diffDataFromDB: %s", err)
|
||||
} else if diffData == nil {
|
||||
t.Errorf("The diff data of node with blue score %d shouldn't have been deleted from the database", node.blueScore)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
dag, teardownFunc, err := DAGSetup("TestDAGIndexFailedStatus", true, Config{
|
||||
@@ -1027,8 +939,14 @@ func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
invalidCbTx := appmessage.NewSubnetworkMsgTx(appmessage.TxVersion, []*appmessage.TxIn{}, []*appmessage.TxOut{}, subnetworkid.SubnetworkIDCoinbase, 0, []byte{})
|
||||
txs := []*util.Tx{util.NewTx(invalidCbTx)}
|
||||
// Create a block with non-finalized transaction so that it's flagged as invalid
|
||||
coinbaseTx := appmessage.NewSubnetworkMsgTx(appmessage.TxVersion, []*appmessage.TxIn{},
|
||||
[]*appmessage.TxOut{}, subnetworkid.SubnetworkIDCoinbase, 0, []byte{})
|
||||
invalidTxIn := appmessage.NewTxIn(appmessage.NewOutpoint(dag.Params.GenesisBlock.Transactions[0].TxID(), 0), nil)
|
||||
invalidTxIn.Sequence = 0
|
||||
invalidTx := appmessage.NewNativeMsgTx(appmessage.TxVersion, []*appmessage.TxIn{invalidTxIn}, []*appmessage.TxOut{})
|
||||
invalidTx.LockTime = math.MaxUint64
|
||||
txs := []*util.Tx{util.NewTx(coinbaseTx), util.NewTx(invalidTx)}
|
||||
hashMerkleRoot := BuildHashMerkleTreeStore(txs).Root()
|
||||
invalidMsgBlock := appmessage.NewMsgBlock(
|
||||
appmessage.NewBlockHeader(
|
||||
@@ -1039,7 +957,8 @@ func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
dag.genesis.bits,
|
||||
0),
|
||||
)
|
||||
invalidMsgBlock.AddTransaction(invalidCbTx)
|
||||
invalidMsgBlock.AddTransaction(coinbaseTx)
|
||||
invalidMsgBlock.AddTransaction(invalidTx)
|
||||
invalidBlock := util.NewBlock(invalidMsgBlock)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(invalidBlock, BFNoPoWCheck)
|
||||
|
||||
@@ -1059,7 +978,7 @@ func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatalf("invalidBlockNode wasn't added to the block index as expected")
|
||||
}
|
||||
if invalidBlockNode.status&statusValidateFailed != statusValidateFailed {
|
||||
if invalidBlockNode.status != statusValidateFailed {
|
||||
t.Fatalf("invalidBlockNode status to have %b flags raised (got: %b)", statusValidateFailed, invalidBlockNode.status)
|
||||
}
|
||||
|
||||
@@ -1068,7 +987,8 @@ func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
invalidBlock.Hash(),
|
||||
}, hashMerkleRoot, &daghash.Hash{}, &daghash.Hash{}, dag.genesis.bits, 0),
|
||||
)
|
||||
invalidMsgBlockChild.AddTransaction(invalidCbTx)
|
||||
invalidMsgBlockChild.AddTransaction(coinbaseTx)
|
||||
invalidMsgBlockChild.AddTransaction(invalidTx)
|
||||
invalidBlockChild := util.NewBlock(invalidMsgBlockChild)
|
||||
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(invalidBlockChild, BFNoPoWCheck)
|
||||
@@ -1088,7 +1008,7 @@ func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatalf("invalidBlockChild wasn't added to the block index as expected")
|
||||
}
|
||||
if invalidBlockChildNode.status&statusInvalidAncestor != statusInvalidAncestor {
|
||||
if invalidBlockChildNode.status != statusInvalidAncestor {
|
||||
t.Fatalf("invalidBlockNode status to have %b flags raised (got %b)", statusInvalidAncestor, invalidBlockChildNode.status)
|
||||
}
|
||||
|
||||
@@ -1097,7 +1017,8 @@ func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
invalidBlockChild.Hash(),
|
||||
}, hashMerkleRoot, &daghash.Hash{}, &daghash.Hash{}, dag.genesis.bits, 0),
|
||||
)
|
||||
invalidMsgBlockGrandChild.AddTransaction(invalidCbTx)
|
||||
invalidMsgBlockGrandChild.AddTransaction(coinbaseTx)
|
||||
invalidMsgBlockGrandChild.AddTransaction(invalidTx)
|
||||
invalidBlockGrandChild := util.NewBlock(invalidMsgBlockGrandChild)
|
||||
|
||||
isOrphan, isDelayed, err = dag.ProcessBlock(invalidBlockGrandChild, BFNoPoWCheck)
|
||||
@@ -1116,25 +1037,60 @@ func TestDAGIndexFailedStatus(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatalf("invalidBlockGrandChild wasn't added to the block index as expected")
|
||||
}
|
||||
if invalidBlockGrandChildNode.status&statusInvalidAncestor != statusInvalidAncestor {
|
||||
if invalidBlockGrandChildNode.status != statusInvalidAncestor {
|
||||
t.Fatalf("invalidBlockGrandChildNode status to have %b flags raised (got %b)", statusInvalidAncestor, invalidBlockGrandChildNode.status)
|
||||
}
|
||||
}
|
||||
|
||||
func testProcessBlockRuleError(t *testing.T, dag *BlockDAG, block *appmessage.MsgBlock, expectedRuleErr error) {
|
||||
// testProcessBlockStatus submits the given block, and makes sure this block has got the expected status
|
||||
func testProcessBlockStatus(
|
||||
t *testing.T, testName string, dag *BlockDAG, block *appmessage.MsgBlock, expectedStatus blockStatus) {
|
||||
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(block), BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: Error submitting block: %+v", testName, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("%s: ProcessBlock: block is too far in the future", testName)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("%s: ProcessBlock: block got unexpectedly orphaned", testName)
|
||||
}
|
||||
|
||||
node, ok := dag.index.LookupNode(block.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("%s: Error locating block %s after processing it", testName, block.BlockHash())
|
||||
}
|
||||
|
||||
actualStatus := dag.index.BlockNodeStatus(node)
|
||||
if actualStatus != expectedStatus {
|
||||
t.Errorf("%s: Expected block status: '%s' but got '%s'", testName, expectedStatus, actualStatus)
|
||||
}
|
||||
}
|
||||
|
||||
func testProcessBlockRuleError(t *testing.T, testName string, dag *BlockDAG, block *appmessage.MsgBlock, expectedRuleErr error) {
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(block), BFNoPoWCheck)
|
||||
|
||||
err = checkRuleError(err, expectedRuleErr)
|
||||
if err != nil {
|
||||
t.Errorf("checkRuleError: %s", err)
|
||||
t.Errorf("%s: checkRuleError: %s", err, testName)
|
||||
}
|
||||
|
||||
if isDelayed {
|
||||
t.Fatalf("ProcessBlock: block " +
|
||||
"is too far in the future")
|
||||
t.Fatalf("%s: ProcessBlock: block is too far in the future", testName)
|
||||
}
|
||||
if isOrphan {
|
||||
t.Fatalf("ProcessBlock: block got unexpectedly orphaned")
|
||||
t.Fatalf("%s: ProcessBlock: block got unexpectedly orphaned", testName)
|
||||
}
|
||||
}
|
||||
|
||||
// makeNextSelectedTip plays with block's nonce until its hash is smaller than dag's selectedTip
|
||||
// It is the callers responsibility to make sure block's blue score is equal to selected tip's
|
||||
func makeNextSelectedTip(dag *BlockDAG, block *appmessage.MsgBlock) {
|
||||
selectedTip := dag.selectedTip()
|
||||
|
||||
for daghash.Less(block.BlockHash(), selectedTip.hash) {
|
||||
block.Header.Nonce++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1176,7 +1132,7 @@ func TestDoubleSpends(t *testing.T) {
|
||||
|
||||
blockWithTx1 := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{fundingBlock.BlockHash()}, []*appmessage.MsgTx{tx1})
|
||||
|
||||
// Check that a block will be rejected if it has a transaction that already exists in its past.
|
||||
// Check that a block will be disqualified if it has a transaction that already exists in its past.
|
||||
anotherBlockWithTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{blockWithTx1.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
@@ -1190,9 +1146,9 @@ func TestDoubleSpends(t *testing.T) {
|
||||
}
|
||||
anotherBlockWithTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(anotherBlockWithTx1UtilTxs).Root()
|
||||
|
||||
testProcessBlockRuleError(t, dag, anotherBlockWithTx1, ruleError(ErrOverwriteTx, ""))
|
||||
testProcessBlockStatus(t, "anotherBlockWithTx1", dag, anotherBlockWithTx1, statusDisqualifiedFromChain)
|
||||
|
||||
// Check that a block will be rejected if it has a transaction that double spends
|
||||
// Check that a block will be disqualified if it has a transaction that double spends
|
||||
// a transaction from its past.
|
||||
blockWithDoubleSpendForTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{blockWithTx1.BlockHash()}, nil)
|
||||
if err != nil {
|
||||
@@ -1207,16 +1163,18 @@ func TestDoubleSpends(t *testing.T) {
|
||||
}
|
||||
blockWithDoubleSpendForTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDoubleSpendForTx1UtilTxs).Root()
|
||||
|
||||
testProcessBlockRuleError(t, dag, blockWithDoubleSpendForTx1, ruleError(ErrMissingTxOut, ""))
|
||||
testProcessBlockStatus(t, "blockWithDoubleSpendForTx1", dag, blockWithDoubleSpendForTx1, statusDisqualifiedFromChain)
|
||||
|
||||
blockInAnticoneOfBlockWithTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, []*appmessage.MsgTx{doubleSpendTx1})
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
// Check that a block will not get rejected if it has a transaction that double spends
|
||||
makeNextSelectedTip(dag, blockInAnticoneOfBlockWithTx1)
|
||||
|
||||
// Check that a block will not get disqualified if it has a transaction that double spends
|
||||
// a transaction from its anticone.
|
||||
testProcessBlockRuleError(t, dag, blockInAnticoneOfBlockWithTx1, nil)
|
||||
testProcessBlockStatus(t, "blockInAnticoneOfBlockWithTx1", dag, blockInAnticoneOfBlockWithTx1, statusValid)
|
||||
|
||||
// Check that a block will be rejected if it has two transactions that spend the same UTXO.
|
||||
blockWithDoubleSpendWithItself, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, nil)
|
||||
@@ -1232,7 +1190,8 @@ func TestDoubleSpends(t *testing.T) {
|
||||
}
|
||||
blockWithDoubleSpendWithItself.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDoubleSpendWithItselfUtilTxs).Root()
|
||||
|
||||
testProcessBlockRuleError(t, dag, blockWithDoubleSpendWithItself, ruleError(ErrDoubleSpendInSameBlock, ""))
|
||||
testProcessBlockRuleError(t, "blockWithDoubleSpendWithItself", dag,
|
||||
blockWithDoubleSpendWithItself, ruleError(ErrDoubleSpendInSameBlock, ""))
|
||||
|
||||
// Check that a block will be rejected if it has the same transaction twice.
|
||||
blockWithDuplicateTransaction, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, nil)
|
||||
@@ -1247,7 +1206,8 @@ func TestDoubleSpends(t *testing.T) {
|
||||
blockWithDuplicateTransactionUtilTxs[i] = util.NewTx(tx)
|
||||
}
|
||||
blockWithDuplicateTransaction.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDuplicateTransactionUtilTxs).Root()
|
||||
testProcessBlockRuleError(t, dag, blockWithDuplicateTransaction, ruleError(ErrDuplicateTx, ""))
|
||||
testProcessBlockRuleError(t, "blockWithDuplicateTransaction", dag,
|
||||
blockWithDuplicateTransaction, ruleError(ErrDuplicateTx, ""))
|
||||
}
|
||||
|
||||
func TestUTXOCommitment(t *testing.T) {
|
||||
@@ -1316,7 +1276,7 @@ func TestUTXOCommitment(t *testing.T) {
|
||||
|
||||
// Build a Multiset for block D
|
||||
multiset := secp256k1.NewMultiset()
|
||||
for outpoint, entry := range blockDPastDiffUTXOSet.base.utxoCollection {
|
||||
for outpoint, entry := range blockDPastDiffUTXOSet.base.utxoCache {
|
||||
var err error
|
||||
multiset, err = addUTXOToMultiset(multiset, entry, &outpoint)
|
||||
if err != nil {
|
||||
@@ -1371,7 +1331,7 @@ func TestPastUTXOMultiSet(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatalf("TestPastUTXOMultiSet: blockNode for blockC not found")
|
||||
}
|
||||
blockCSelectedParentMultiset, err := blockNodeC.selectedParentMultiset(dag)
|
||||
blockCSelectedParentMultiset, err := blockNodeC.selectedParentMultiset()
|
||||
if err != nil {
|
||||
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset unexpectedly failed: %s", err)
|
||||
}
|
||||
@@ -1384,7 +1344,7 @@ func TestPastUTXOMultiSet(t *testing.T) {
|
||||
PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockC.BlockHash()}, nil)
|
||||
|
||||
// Get blockC's selectedParentMultiset again
|
||||
blockCSelectedParentMultiSetAfterAnotherBlock, err := blockNodeC.selectedParentMultiset(dag)
|
||||
blockCSelectedParentMultiSetAfterAnotherBlock, err := blockNodeC.selectedParentMultiset()
|
||||
if err != nil {
|
||||
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset unexpectedly failed: %s", err)
|
||||
}
|
||||
|
||||
@@ -55,7 +55,14 @@ func serializeOutpoint(w io.Writer, outpoint *appmessage.Outpoint) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return binaryserializer.PutUint32(w, outpointIndexByteOrder, outpoint.Index)
|
||||
var buf [4]byte
|
||||
outpointIndexByteOrder.PutUint32(buf[:], outpoint.Index)
|
||||
_, err = w.Write(buf[:])
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var outpointSerializeSize = daghash.TxIDSize + 4
|
||||
@@ -126,9 +133,10 @@ func updateUTXOSet(dbContext dbaccess.Context, virtualUTXODiff *UTXODiff) error
|
||||
}
|
||||
|
||||
type dagState struct {
|
||||
TipHashes []*daghash.Hash
|
||||
LastFinalityPoint *daghash.Hash
|
||||
LocalSubnetworkID *subnetworkid.SubnetworkID
|
||||
TipHashes []*daghash.Hash
|
||||
VirtualParentsHashes []*daghash.Hash
|
||||
ValidTipHashes []*daghash.Hash
|
||||
LocalSubnetworkID *subnetworkid.SubnetworkID
|
||||
}
|
||||
|
||||
// serializeDAGState returns the serialization of the DAG state.
|
||||
@@ -165,9 +173,10 @@ func saveDAGState(dbContext dbaccess.Context, state *dagState) error {
|
||||
// genesis block and the node's local subnetwork id.
|
||||
func (dag *BlockDAG) createDAGState(localSubnetworkID *subnetworkid.SubnetworkID) error {
|
||||
return saveDAGState(dag.databaseContext, &dagState{
|
||||
TipHashes: []*daghash.Hash{dag.Params.GenesisHash},
|
||||
LastFinalityPoint: dag.Params.GenesisHash,
|
||||
LocalSubnetworkID: localSubnetworkID,
|
||||
TipHashes: []*daghash.Hash{dag.Params.GenesisHash},
|
||||
VirtualParentsHashes: []*daghash.Hash{dag.Params.GenesisHash},
|
||||
ValidTipHashes: []*daghash.Hash{dag.Params.GenesisHash},
|
||||
LocalSubnetworkID: localSubnetworkID,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -202,12 +211,6 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Loading UTXO set...")
|
||||
fullUTXOCollection, err := dag.initUTXOSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Loading reachability data...")
|
||||
err = dag.reachabilityTree.init(dag.databaseContext)
|
||||
if err != nil {
|
||||
@@ -220,27 +223,12 @@ func (dag *BlockDAG) initDAGState() error {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Applying the loaded utxoCollection to the virtual block...")
|
||||
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error loading UTXOSet")
|
||||
}
|
||||
|
||||
log.Debugf("Applying the stored tips to the virtual block...")
|
||||
err = dag.initVirtualBlockTips(dagState)
|
||||
err = dag.initTipsAndVirtualParents(dagState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Setting the last finality point...")
|
||||
var ok bool
|
||||
dag.lastFinalityPoint, ok = dag.index.LookupNode(dagState.LastFinalityPoint)
|
||||
if !ok {
|
||||
return errors.Errorf("finality point block %s "+
|
||||
"does not exist in the DAG", dagState.LastFinalityPoint)
|
||||
}
|
||||
dag.finalizeNodesBelowFinalityPoint(false)
|
||||
|
||||
log.Debugf("Processing unprocessed blockNodes...")
|
||||
err = dag.processUnprocessedBlockNodes(unprocessedBlockNodes)
|
||||
if err != nil {
|
||||
@@ -350,17 +338,28 @@ func (dag *BlockDAG) initUTXOSet() (fullUTXOCollection utxoCollection, err error
|
||||
return fullUTXOCollection, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) initVirtualBlockTips(state *dagState) error {
|
||||
tips := newBlockSet()
|
||||
for _, tipHash := range state.TipHashes {
|
||||
tip, ok := dag.index.LookupNode(tipHash)
|
||||
if !ok {
|
||||
return errors.Errorf("cannot find "+
|
||||
"DAG tip %s in block index", state.TipHashes)
|
||||
}
|
||||
tips.add(tip)
|
||||
func (dag *BlockDAG) initTipsAndVirtualParents(state *dagState) error {
|
||||
tips, err := dag.index.LookupNodes(state.TipHashes)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error loading tips")
|
||||
}
|
||||
dag.virtual.SetTips(tips)
|
||||
dag.tips = blockSetFromSlice(tips...)
|
||||
|
||||
validTips, err := dag.index.LookupNodes(state.ValidTipHashes)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error loading tips")
|
||||
}
|
||||
dag.validTips = blockSetFromSlice(validTips...)
|
||||
|
||||
virtualParents, err := dag.index.LookupNodes(state.VirtualParentsHashes)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error loading tips")
|
||||
}
|
||||
dag.virtual.blockNode, _ = dag.newBlockNode(nil, blockSetFromSlice(virtualParents...))
|
||||
|
||||
// call updateSelectedParentSet with genesis as oldSelectedParent, so that the selectedParentSet is fully calculated
|
||||
_ = dag.virtual.updateSelectedParentSet(dag.genesis)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -417,6 +416,7 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
|
||||
}
|
||||
|
||||
node := &blockNode{
|
||||
dag: dag,
|
||||
hash: header.BlockHash(),
|
||||
version: header.Version,
|
||||
bits: header.Bits,
|
||||
@@ -483,6 +483,25 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
|
||||
}
|
||||
}
|
||||
|
||||
redsCount, err := appmessage.ReadVarInt(buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node.reds = make([]*blockNode, redsCount)
|
||||
for i := uint64(0); i < redsCount; i++ {
|
||||
hash := &daghash.Hash{}
|
||||
if _, err := io.ReadFull(buffer, hash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
node.reds[i], ok = dag.index.LookupNode(hash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("block %s does not exist in the DAG", selectedParentHash)
|
||||
}
|
||||
}
|
||||
|
||||
bluesAnticoneSizesLen, err := appmessage.ReadVarInt(buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -566,6 +585,18 @@ func serializeBlockNode(node *blockNode) ([]byte, error) {
|
||||
}
|
||||
}
|
||||
|
||||
err = appmessage.WriteVarInt(w, uint64(len(node.reds)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, red := range node.reds {
|
||||
_, err = w.Write(red.hash[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = appmessage.WriteVarInt(w, uint64(len(node.bluesAnticoneSizes)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -7,10 +7,11 @@ package blockdag
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"github.com/pkg/errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
@@ -185,18 +186,20 @@ func TestDAGStateSerialization(t *testing.T) {
|
||||
{
|
||||
name: "genesis",
|
||||
state: &dagState{
|
||||
TipHashes: []*daghash.Hash{newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")},
|
||||
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
|
||||
TipHashes: []*daghash.Hash{newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")},
|
||||
ValidTipHashes: []*daghash.Hash{newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")},
|
||||
VirtualParentsHashes: []*daghash.Hash{newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")},
|
||||
},
|
||||
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
|
||||
serialized: []byte(`{"TipHashes":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],"VirtualParentsHashes":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],"ValidTipHashes":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],"LocalSubnetworkID":null}`),
|
||||
},
|
||||
{
|
||||
name: "block 1",
|
||||
state: &dagState{
|
||||
TipHashes: []*daghash.Hash{newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")},
|
||||
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
|
||||
TipHashes: []*daghash.Hash{newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")},
|
||||
ValidTipHashes: []*daghash.Hash{newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")},
|
||||
VirtualParentsHashes: []*daghash.Hash{newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")},
|
||||
},
|
||||
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
|
||||
serialized: []byte(`{"TipHashes":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],"VirtualParentsHashes":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],"ValidTipHashes":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],"LocalSubnetworkID":null}`),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -5,12 +5,13 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
@@ -135,7 +136,7 @@ func TestDifficulty(t *testing.T) {
|
||||
t.Fatalf("As long as the block rate remains the same, the difficulty shouldn't change")
|
||||
}
|
||||
}
|
||||
nodeInThePast := addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
|
||||
nodeInThePast := addNode(blockSetFromSlice(tip), tip.PastMedianTime())
|
||||
if nodeInThePast.bits != tip.bits {
|
||||
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
|
||||
}
|
||||
@@ -157,7 +158,7 @@ func TestDifficulty(t *testing.T) {
|
||||
|
||||
// Increase block rate to increase difficulty
|
||||
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
|
||||
tip = addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
|
||||
tip = addNode(blockSetFromSlice(tip), tip.PastMedianTime())
|
||||
if compareBits(tip.bits, tip.parents.bluest().bits) > 0 {
|
||||
t.Fatalf("Because we're increasing the block rate, the difficulty can't decrease")
|
||||
}
|
||||
@@ -203,7 +204,7 @@ func TestDifficulty(t *testing.T) {
|
||||
|
||||
redChainTip := splitNode
|
||||
for i := 0; i < 10; i++ {
|
||||
redChainTip = addNode(blockSetFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
|
||||
redChainTip = addNode(blockSetFromSlice(redChainTip), redChainTip.PastMedianTime())
|
||||
}
|
||||
tipWithRedPast := addNode(blockSetFromSlice(redChainTip, blueTip), zeroTime)
|
||||
tipWithoutRedPast := addNode(blockSetFromSlice(blueTip), zeroTime)
|
||||
|
||||
@@ -203,6 +203,9 @@ const (
|
||||
// is also an ancestor of another parent
|
||||
ErrInvalidParentsRelation
|
||||
|
||||
// ErrTooManyParents indicates that a block points to more then `MaxNumParentBlocks` parents
|
||||
ErrTooManyParents
|
||||
|
||||
// ErrDelayedBlockIsNotAllowed indicates that a block with a delayed timestamp was
|
||||
// submitted with BFDisallowDelay flag raised.
|
||||
ErrDelayedBlockIsNotAllowed
|
||||
@@ -210,6 +213,20 @@ const (
|
||||
// ErrOrphanBlockIsNotAllowed indicates that an orphan block was submitted with
|
||||
// BFDisallowOrphans flag raised.
|
||||
ErrOrphanBlockIsNotAllowed
|
||||
|
||||
// ErrViolatingBoundedMergeDepth indicates that a block is violating finality from
|
||||
// its own point of view
|
||||
ErrViolatingBoundedMergeDepth
|
||||
|
||||
// ErrViolatingMergeLimit indicates that a block merges more than mergeLimit blocks
|
||||
ErrViolatingMergeLimit
|
||||
|
||||
// ErrChainedTransactions indicates that a block contains a transaction that spends an output of a transaction
|
||||
// In the same block
|
||||
ErrChainedTransactions
|
||||
|
||||
// ErrSelectedParentDisqualifiedFromChain indicates that a block's selectedParent has the status DisqualifiedFromChain
|
||||
ErrSelectedParentDisqualifiedFromChain
|
||||
)
|
||||
|
||||
// Map of ErrorCode values back to their constant names for pretty printing.
|
||||
|
||||
@@ -16,153 +16,10 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/blockdag"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domain/mining"
|
||||
"github.com/kaspanet/kaspad/domain/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// TestFinality checks that the finality mechanism works as expected.
|
||||
// This is how the flow goes:
|
||||
// 1) We build a chain of params.FinalityInterval blocks and call its tip altChainTip.
|
||||
// 2) We build another chain (let's call it mainChain) of 2 * params.FinalityInterval
|
||||
// blocks, which points to genesis, and then we check that the block in that
|
||||
// chain with height of params.FinalityInterval is marked as finality point (This is
|
||||
// very predictable, because the blue score of each new block in a chain is the
|
||||
// parents plus one).
|
||||
// 3) We make a new child to block with height (2 * params.FinalityInterval - 1)
|
||||
// in mainChain, and we check that connecting it to the DAG
|
||||
// doesn't affect the last finality point.
|
||||
// 4) We make a block that points to genesis, and check that it
|
||||
// gets rejected because its blue score is lower then the last finality
|
||||
// point.
|
||||
// 5) We make a block that points to altChainTip, and check that it
|
||||
// gets rejected because it doesn't have the last finality point in
|
||||
// its selected parent chain.
|
||||
func TestFinality(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
params.FinalityDuration = 100 * params.TargetTimePerBlock
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", true, blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup DAG instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
buildNodeToDag := func(parentHashes []*daghash.Hash) (*util.Block, error) {
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, parentHashes, nil, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block := util.NewBlock(msgBlock)
|
||||
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(block, blockdag.BFNoPoWCheck)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isDelayed {
|
||||
return nil, errors.Errorf("ProcessBlock: block " +
|
||||
"is too far in the future")
|
||||
}
|
||||
if isOrphan {
|
||||
return nil, errors.Errorf("ProcessBlock: unexpected returned orphan block")
|
||||
}
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
genesis := util.NewBlock(params.GenesisBlock)
|
||||
currentNode := genesis
|
||||
|
||||
// First we build a chain of params.FinalityInterval blocks for future use
|
||||
for i := uint64(0); i < dag.FinalityInterval(); i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
altChainTip := currentNode
|
||||
|
||||
// Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and
|
||||
// we expect the block with height 1 * params.FinalityInterval to be the last finality point
|
||||
currentNode = genesis
|
||||
for i := uint64(0); i < dag.FinalityInterval(); i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
expectedFinalityPoint := currentNode
|
||||
|
||||
for i := uint64(0); i < dag.FinalityInterval(); i++ {
|
||||
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !dag.LastFinalityPointHash().IsEqual(expectedFinalityPoint.Hash()) {
|
||||
t.Errorf("TestFinality: dag.lastFinalityPoint expected to be %v but got %v", expectedFinalityPoint, dag.LastFinalityPointHash())
|
||||
}
|
||||
|
||||
// Here we check that even if we create a parallel tip (a new tip with
|
||||
// the same parents as the current one) with the same blue score as the
|
||||
// current tip, it still won't affect the last finality point.
|
||||
_, err = buildNodeToDag(currentNode.MsgBlock().Header.ParentHashes)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
|
||||
}
|
||||
if !dag.LastFinalityPointHash().IsEqual(expectedFinalityPoint.Hash()) {
|
||||
t.Errorf("TestFinality: dag.lastFinalityPoint was unexpectly changed")
|
||||
}
|
||||
|
||||
// Here we check that a block with lower blue score than the last finality
|
||||
// point will get rejected
|
||||
fakeCoinbaseTx, err := dag.NextBlockCoinbaseTransaction(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("NextBlockCoinbaseTransaction: %s", err)
|
||||
}
|
||||
merkleRoot := blockdag.BuildHashMerkleTreeStore([]*util.Tx{fakeCoinbaseTx}).Root()
|
||||
beforeFinalityBlock := appmessage.NewMsgBlock(&appmessage.BlockHeader{
|
||||
Version: 0x10000000,
|
||||
ParentHashes: []*daghash.Hash{genesis.Hash()},
|
||||
HashMerkleRoot: merkleRoot,
|
||||
AcceptedIDMerkleRoot: &daghash.ZeroHash,
|
||||
UTXOCommitment: &daghash.ZeroHash,
|
||||
Timestamp: dag.SelectedTipHeader().Timestamp,
|
||||
Bits: genesis.MsgBlock().Header.Bits,
|
||||
})
|
||||
beforeFinalityBlock.AddTransaction(fakeCoinbaseTx.MsgTx())
|
||||
_, _, err = dag.ProcessBlock(util.NewBlock(beforeFinalityBlock), blockdag.BFNoPoWCheck)
|
||||
if err == nil {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
|
||||
}
|
||||
var ruleErr blockdag.RuleError
|
||||
if errors.As(err, &ruleErr) {
|
||||
if ruleErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Here we check that a block that doesn't have the last finality point in
|
||||
// its selected parent chain will get rejected
|
||||
_, err = buildNodeToDag([]*daghash.Hash{altChainTip.Hash()})
|
||||
if err == nil {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
|
||||
}
|
||||
if errors.As(err, &ruleErr) {
|
||||
if ruleErr.ErrorCode != blockdag.ErrFinality {
|
||||
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", ruleErr)
|
||||
}
|
||||
}
|
||||
|
||||
// TestFinalityInterval tests that the finality interval is
|
||||
// smaller then appmessage.MaxInvPerMsg, so when a peer receives
|
||||
// a getblocks message it should always be able to send
|
||||
@@ -231,7 +88,7 @@ func TestChainedTransactions(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
block1, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{params.GenesisHash}, nil, false)
|
||||
block1, err := blockdag.PrepareBlockForTest(dag, []*daghash.Hash{params.GenesisHash}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -279,7 +136,7 @@ func TestChainedTransactions(t *testing.T) {
|
||||
}
|
||||
chainedTx := appmessage.NewNativeMsgTx(appmessage.TxVersion, []*appmessage.TxIn{chainedTxIn}, []*appmessage.TxOut{chainedTxOut})
|
||||
|
||||
block2, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*appmessage.MsgTx{tx}, false)
|
||||
block2, err := blockdag.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*appmessage.MsgTx{tx})
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -299,8 +156,8 @@ func TestChainedTransactions(t *testing.T) {
|
||||
} else {
|
||||
var ruleErr blockdag.RuleError
|
||||
if ok := errors.As(err, &ruleErr); ok {
|
||||
if ruleErr.ErrorCode != blockdag.ErrMissingTxOut {
|
||||
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, ruleErr.ErrorCode)
|
||||
if ruleErr.ErrorCode != blockdag.ErrChainedTransactions {
|
||||
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrChainedTransactions, ruleErr.ErrorCode)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
|
||||
@@ -325,7 +182,7 @@ func TestChainedTransactions(t *testing.T) {
|
||||
}
|
||||
nonChainedTx := appmessage.NewNativeMsgTx(appmessage.TxVersion, []*appmessage.TxIn{nonChainedTxIn}, []*appmessage.TxOut{nonChainedTxOut})
|
||||
|
||||
block3, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*appmessage.MsgTx{nonChainedTx}, false)
|
||||
block3, err := blockdag.PrepareBlockForTest(dag, []*daghash.Hash{block1.BlockHash()}, []*appmessage.MsgTx{nonChainedTx})
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -382,9 +239,9 @@ func TestOrderInDiffFromAcceptanceData(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the block
|
||||
msgBlock, err := mining.PrepareBlockForTest(dag, []*daghash.Hash{previousBlock.Hash()}, txs, false)
|
||||
msgBlock, err := blockdag.PrepareBlockForTest(dag, []*daghash.Hash{previousBlock.Hash()}, txs)
|
||||
if err != nil {
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to prepare block: %s", err)
|
||||
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to prepare block: %+v", err)
|
||||
}
|
||||
|
||||
// Add the block to the DAG
|
||||
@@ -439,7 +296,7 @@ func TestGasLimit(t *testing.T) {
|
||||
|
||||
cbTxs := []*appmessage.MsgTx{}
|
||||
for i := 0; i < 4; i++ {
|
||||
fundsBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), nil, false)
|
||||
fundsBlock, err := blockdag.PrepareBlockForTest(dag, dag.VirtualParentHashes(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -491,7 +348,7 @@ func TestGasLimit(t *testing.T) {
|
||||
tx2 := appmessage.NewSubnetworkMsgTx(appmessage.TxVersion, []*appmessage.TxIn{tx2In}, []*appmessage.TxOut{tx2Out}, subnetworkID, 10000, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that has transactions that exceed the gas limit
|
||||
overLimitBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*appmessage.MsgTx{tx1, tx2}, true)
|
||||
overLimitBlock, err := blockdag.PrepareBlockForTest(dag, dag.VirtualParentHashes(), []*appmessage.MsgTx{tx1, tx2})
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -526,7 +383,7 @@ func TestGasLimit(t *testing.T) {
|
||||
subnetworkID, math.MaxUint64, []byte{})
|
||||
|
||||
// Here we check that we can't process a block that its transactions' gas overflows uint64
|
||||
overflowGasBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*appmessage.MsgTx{tx1, overflowGasTx}, true)
|
||||
overflowGasBlock, err := blockdag.PrepareBlockForTest(dag, dag.VirtualParentHashes(), []*appmessage.MsgTx{tx1, overflowGasTx})
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -560,7 +417,7 @@ func TestGasLimit(t *testing.T) {
|
||||
nonExistentSubnetworkTx := appmessage.NewSubnetworkMsgTx(appmessage.TxVersion, []*appmessage.TxIn{nonExistentSubnetworkTxIn},
|
||||
[]*appmessage.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{})
|
||||
|
||||
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*appmessage.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
|
||||
nonExistentSubnetworkBlock, err := blockdag.PrepareBlockForTest(dag, dag.VirtualParentHashes(), []*appmessage.MsgTx{nonExistentSubnetworkTx, overflowGasTx})
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
@@ -581,7 +438,7 @@ func TestGasLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit
|
||||
validBlock, err := mining.PrepareBlockForTest(dag, dag.TipHashes(), []*appmessage.MsgTx{tx1}, true)
|
||||
validBlock, err := blockdag.PrepareBlockForTest(dag, dag.VirtualParentHashes(), []*appmessage.MsgTx{tx1})
|
||||
if err != nil {
|
||||
t.Fatalf("PrepareBlockForTest: %v", err)
|
||||
}
|
||||
|
||||
@@ -1,113 +1,6 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// LastFinalityPointHash returns the hash of the last finality point
|
||||
func (dag *BlockDAG) LastFinalityPointHash() *daghash.Hash {
|
||||
if dag.lastFinalityPoint == nil {
|
||||
return nil
|
||||
}
|
||||
return dag.lastFinalityPoint.hash
|
||||
}
|
||||
|
||||
// FinalityInterval is the interval that determines the finality window of the DAG.
|
||||
func (dag *BlockDAG) FinalityInterval() uint64 {
|
||||
return uint64(dag.Params.FinalityDuration / dag.Params.TargetTimePerBlock)
|
||||
}
|
||||
|
||||
// checkFinalityViolation checks the new block does not violate the finality rules
|
||||
// specifically - the new block selectedParent chain should contain the old finality point.
|
||||
func (dag *BlockDAG) checkFinalityViolation(newNode *blockNode) error {
|
||||
// the genesis block can not violate finality rules
|
||||
if newNode.isGenesis() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Because newNode doesn't have reachability data we
|
||||
// need to check if the last finality point is in the
|
||||
// selected parent chain of newNode.selectedParent, so
|
||||
// we explicitly check if newNode.selectedParent is
|
||||
// the finality point.
|
||||
if dag.lastFinalityPoint == newNode.selectedParent {
|
||||
return nil
|
||||
}
|
||||
|
||||
isInSelectedChain, err := dag.isInSelectedParentChainOf(dag.lastFinalityPoint, newNode.selectedParent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !isInSelectedChain {
|
||||
return ruleError(ErrFinality, "the last finality point is not in the selected parent chain of this block")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateFinalityPoint updates the dag's last finality point if necessary.
|
||||
func (dag *BlockDAG) updateFinalityPoint() {
|
||||
selectedTip := dag.selectedTip()
|
||||
// if the selected tip is the genesis block - it should be the new finality point
|
||||
if selectedTip.isGenesis() {
|
||||
dag.lastFinalityPoint = selectedTip
|
||||
return
|
||||
}
|
||||
// We are looking for a new finality point only if the new block's finality score is higher
|
||||
// by 2 than the existing finality point's
|
||||
if selectedTip.finalityScore(dag) < dag.lastFinalityPoint.finalityScore(dag)+2 {
|
||||
return
|
||||
}
|
||||
|
||||
var currentNode *blockNode
|
||||
for currentNode = selectedTip.selectedParent; ; currentNode = currentNode.selectedParent {
|
||||
// We look for the first node in the selected parent chain that has a higher finality score than the last finality point.
|
||||
if currentNode.selectedParent.finalityScore(dag) == dag.lastFinalityPoint.finalityScore(dag) {
|
||||
break
|
||||
}
|
||||
}
|
||||
dag.lastFinalityPoint = currentNode
|
||||
spawn("dag.finalizeNodesBelowFinalityPoint", func() {
|
||||
dag.finalizeNodesBelowFinalityPoint(true)
|
||||
})
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
|
||||
queue := make([]*blockNode, 0, len(dag.lastFinalityPoint.parents))
|
||||
for parent := range dag.lastFinalityPoint.parents {
|
||||
queue = append(queue, parent)
|
||||
}
|
||||
var nodesToDelete []*blockNode
|
||||
if deleteDiffData {
|
||||
nodesToDelete = make([]*blockNode, 0, dag.FinalityInterval())
|
||||
}
|
||||
for len(queue) > 0 {
|
||||
var current *blockNode
|
||||
current, queue = queue[0], queue[1:]
|
||||
if !current.isFinalized {
|
||||
current.isFinalized = true
|
||||
if deleteDiffData {
|
||||
nodesToDelete = append(nodesToDelete, current)
|
||||
}
|
||||
for parent := range current.parents {
|
||||
queue = append(queue, parent)
|
||||
}
|
||||
}
|
||||
}
|
||||
if deleteDiffData {
|
||||
err := dag.utxoDiffStore.removeBlocksDiffData(dag.databaseContext, nodesToDelete)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error removing diff data from utxoDiffStore: %s", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IsKnownFinalizedBlock returns whether the block is below the finality point.
|
||||
// IsKnownFinalizedBlock might be false-negative because node finality status is
|
||||
// updated in a separate goroutine. To get a definite answer if a block
|
||||
// is finalized or not, use dag.checkFinalityViolation.
|
||||
func (dag *BlockDAG) IsKnownFinalizedBlock(blockHash *daghash.Hash) bool {
|
||||
node, ok := dag.index.LookupNode(blockHash)
|
||||
return ok && node.isFinalized
|
||||
}
|
||||
|
||||
79
domain/blockdag/finality_conflicts.go
Normal file
79
domain/blockdag/finality_conflicts.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// ResolveFinalityConflict resolves all finality conflicts by setting an arbitrary finality block, and
|
||||
// re-selecting virtual parents in such a way that given finalityBlock will be in virtual's selectedParentChain
|
||||
func (dag *BlockDAG) ResolveFinalityConflict(finalityBlockHash *daghash.Hash) error {
|
||||
dag.dagLock.Lock()
|
||||
defer dag.dagLock.Unlock()
|
||||
|
||||
finalityBlock, ok := dag.index.LookupNode(finalityBlockHash)
|
||||
if !ok {
|
||||
return errors.Errorf("Couldn't find finality block with hash %s", finalityBlockHash)
|
||||
}
|
||||
|
||||
err := dag.prepareForFinalityConflictResolution(finalityBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, chainUpdates, err := dag.updateVirtualParents(dag.tips, finalityBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dag.sendNotification(NTChainChanged, ChainChangedNotificationData{
|
||||
RemovedChainBlockHashes: chainUpdates.removedChainBlockHashes,
|
||||
AddedChainBlockHashes: chainUpdates.addedChainBlockHashes,
|
||||
})
|
||||
dag.sendNotification(NTFinalityConflictResolved, FinalityConflictResolvedNotificationData{
|
||||
FinalityBlockHash: finalityBlockHash,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepareForFinalityConflictResolution makes sure that the designated selectedTip once a finality conflict is resolved
|
||||
// is not UTXOPendingVerification.
|
||||
func (dag *BlockDAG) prepareForFinalityConflictResolution(finalityBlock *blockNode) error {
|
||||
queue := newDownHeap()
|
||||
queue.pushSet(dag.tips)
|
||||
|
||||
disqualifiedCandidates := newBlockSet()
|
||||
for {
|
||||
if queue.Len() == 0 {
|
||||
return errors.New("No valid selectedTip candidates")
|
||||
}
|
||||
candidate := queue.pop()
|
||||
|
||||
isFinalityBlockInSelectedParentChain, err := dag.isInSelectedParentChainOf(finalityBlock, candidate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isFinalityBlockInSelectedParentChain {
|
||||
continue
|
||||
}
|
||||
if dag.index.BlockNodeStatus(candidate) == statusUTXOPendingVerification {
|
||||
err := dag.resolveNodeStatusInNewTransaction(candidate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if dag.index.BlockNodeStatus(candidate) == statusValid {
|
||||
return nil
|
||||
}
|
||||
|
||||
disqualifiedCandidates.add(candidate)
|
||||
|
||||
for parent := range candidate.parents {
|
||||
if parent.children.areAllIn(disqualifiedCandidates) {
|
||||
queue.Push(parent)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
124
domain/blockdag/finality_test.go
Normal file
124
domain/blockdag/finality_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
func TestFinality(t *testing.T) {
|
||||
// Set finalityInterval to 50 blocks, so that test runs quickly
|
||||
dagConfig := dagconfig.SimnetParams
|
||||
dagConfig.FinalityDuration = 50 * dagConfig.TargetTimePerBlock
|
||||
|
||||
dag, teardownFunc, err := DAGSetup("finality", true, Config{
|
||||
DAGParams: &dagConfig,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup dag instance: %v", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Build a chain of `finalityInterval - 1` blocks
|
||||
finalityInterval := dag.FinalityInterval()
|
||||
mainChainTip := dag.genesis
|
||||
var ok bool
|
||||
for i := uint64(0); i < finalityInterval-1; i++ {
|
||||
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{mainChainTip.hash}, nil)
|
||||
mainChainTip, ok = dag.index.LookupNode(block.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("Couldn't lookup in blockIndex that was just submitted: %s", block.BlockHash())
|
||||
}
|
||||
|
||||
status := dag.index.BlockNodeStatus(mainChainTip)
|
||||
if status != statusValid {
|
||||
t.Fatalf("Block #%d in main chain expected to have status '%s', but got '%s'",
|
||||
i, statusValid, status)
|
||||
}
|
||||
}
|
||||
|
||||
// Mine another chain of `finality-Interval - 2` blocks
|
||||
sideChainTip := dag.genesis
|
||||
for i := uint64(0); i < finalityInterval-2; i++ {
|
||||
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{sideChainTip.hash}, nil)
|
||||
sideChainTip, ok = dag.index.LookupNode(block.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("Couldn't lookup in blockIndex that was just submitted: %s", block.BlockHash())
|
||||
}
|
||||
|
||||
status := dag.index.BlockNodeStatus(sideChainTip)
|
||||
if status != statusUTXOPendingVerification {
|
||||
t.Fatalf("Block #%d in side-chain expected to have status '%s', but got '%s'",
|
||||
i, statusUTXOPendingVerification, status)
|
||||
}
|
||||
}
|
||||
|
||||
// Add two more blocks in the side-chain until it becomes the selected chain
|
||||
for i := uint64(0); i < 2; i++ {
|
||||
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{sideChainTip.hash}, nil)
|
||||
sideChainTip, ok = dag.index.LookupNode(block.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("Couldn't lookup in blockIndex that was just submitted: %s", block.BlockHash())
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that now the sideChainTip is valid and selectedTip
|
||||
status := dag.index.BlockNodeStatus(sideChainTip)
|
||||
if status != statusValid {
|
||||
t.Fatalf("Overtaking block in side-chain expected to have status '%s', but got '%s'",
|
||||
statusValid, status)
|
||||
}
|
||||
if dag.selectedTip() != sideChainTip {
|
||||
t.Fatalf("Overtaking block in side-chain is not selectedTip")
|
||||
}
|
||||
|
||||
// Add two more blocks to main chain, to move finality point to first non-genesis block in mainChain
|
||||
for i := uint64(0); i < 2; i++ {
|
||||
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{mainChainTip.hash}, nil)
|
||||
mainChainTip, ok = dag.index.LookupNode(block.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("Couldn't lookup in blockIndex that was just submitted: %s", block.BlockHash())
|
||||
}
|
||||
}
|
||||
|
||||
if dag.virtual.finalityPoint() == dag.genesis {
|
||||
t.Fatalf("virtual's finalityPoint is still genesis after adding finalityInterval + 1 blocks to the main chain")
|
||||
}
|
||||
|
||||
// Subscribe to finality conflict notifications
|
||||
notificationChan := make(chan struct{}, 1)
|
||||
dag.Subscribe(func(notification *Notification) {
|
||||
if notification.Type == NTFinalityConflict {
|
||||
notificationChan <- struct{}{}
|
||||
}
|
||||
})
|
||||
|
||||
// Add two more blocks to the side chain, so that it violates finality and gets status UTXOPendingVerification even
|
||||
// though it is the block with the highest blue score.
|
||||
for i := uint64(0); i < 2; i++ {
|
||||
block := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{sideChainTip.hash}, nil)
|
||||
sideChainTip, ok = dag.index.LookupNode(block.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("Couldn't lookup in blockIndex that was just submitted: %s", block.BlockHash())
|
||||
}
|
||||
}
|
||||
|
||||
// Check that sideChainTip is the bluest tip now
|
||||
if dag.tips.bluest() != sideChainTip {
|
||||
t.Fatalf("sideChainTip is not the bluest tip when it is expected to be")
|
||||
}
|
||||
|
||||
status = dag.index.BlockNodeStatus(sideChainTip)
|
||||
if status != statusUTXOPendingVerification {
|
||||
t.Fatalf("Finality violating block expected to have status '%s', but got '%s'",
|
||||
statusUTXOPendingVerification, status)
|
||||
}
|
||||
|
||||
// Make sure that a finlality conflict notification was sent
|
||||
select {
|
||||
case <-notificationChan:
|
||||
default:
|
||||
t.Fatalf("No finality violation notification was sent")
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,10 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// ghostdag runs the GHOSTDAG protocol and updates newNode.blues,
|
||||
@@ -39,86 +40,97 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
})
|
||||
|
||||
for _, blueCandidate := range selectedParentAnticone {
|
||||
candidateBluesAnticoneSizes := make(map[*blockNode]dagconfig.KType)
|
||||
var candidateAnticoneSize dagconfig.KType
|
||||
possiblyBlue := true
|
||||
|
||||
// Iterate over all blocks in the blue set of newNode that are not in the past
|
||||
// of blueCandidate, and check for each one of them if blueCandidate potentially
|
||||
// enlarges their blue anticone to be over K, or that they enlarge the blue anticone
|
||||
// of blueCandidate to be over K.
|
||||
for chainBlock := newNode; possiblyBlue; chainBlock = chainBlock.selectedParent {
|
||||
// If blueCandidate is in the future of chainBlock, it means
|
||||
// that all remaining blues are in the past of chainBlock and thus
|
||||
// in the past of blueCandidate. In this case we know for sure that
|
||||
// the anticone of blueCandidate will not exceed K, and we can mark
|
||||
// it as blue.
|
||||
//
|
||||
// newNode is always in the future of blueCandidate, so there's
|
||||
// no point in checking it.
|
||||
if chainBlock != newNode {
|
||||
if isAncestorOfBlueCandidate, err := dag.isInPast(chainBlock, blueCandidate); err != nil {
|
||||
return nil, err
|
||||
} else if isAncestorOfBlueCandidate {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, block := range chainBlock.blues {
|
||||
// Skip blocks that exist in the past of blueCandidate.
|
||||
if isAncestorOfBlueCandidate, err := dag.isInPast(block, blueCandidate); err != nil {
|
||||
return nil, err
|
||||
} else if isAncestorOfBlueCandidate {
|
||||
continue
|
||||
}
|
||||
|
||||
candidateBluesAnticoneSizes[block], err = dag.blueAnticoneSize(block, newNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
candidateAnticoneSize++
|
||||
|
||||
if candidateAnticoneSize > dag.Params.K {
|
||||
// k-cluster violation: The candidate's blue anticone exceeded k
|
||||
possiblyBlue = false
|
||||
break
|
||||
}
|
||||
|
||||
if candidateBluesAnticoneSizes[block] == dag.Params.K {
|
||||
// k-cluster violation: A block in candidate's blue anticone already
|
||||
// has k blue blocks in its own anticone
|
||||
possiblyBlue = false
|
||||
break
|
||||
}
|
||||
|
||||
// This is a sanity check that validates that a blue
|
||||
// block's blue anticone is not already larger than K.
|
||||
if candidateBluesAnticoneSizes[block] > dag.Params.K {
|
||||
return nil, errors.New("found blue anticone size larger than k")
|
||||
}
|
||||
}
|
||||
isBlue, candidateAnticoneSize, candidateBluesAnticoneSizes, err := dag.checkBlueCandidate(newNode, blueCandidate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if possiblyBlue {
|
||||
if isBlue {
|
||||
// No k-cluster violation found, we can now set the candidate block as blue
|
||||
newNode.blues = append(newNode.blues, blueCandidate)
|
||||
newNode.bluesAnticoneSizes[blueCandidate] = candidateAnticoneSize
|
||||
for blue, blueAnticoneSize := range candidateBluesAnticoneSizes {
|
||||
newNode.bluesAnticoneSizes[blue] = blueAnticoneSize + 1
|
||||
}
|
||||
|
||||
// The maximum length of node.blues can be K+1 because
|
||||
// it contains the selected parent.
|
||||
if dagconfig.KType(len(newNode.blues)) == dag.Params.K+1 {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
newNode.reds = append(newNode.reds, blueCandidate)
|
||||
}
|
||||
}
|
||||
|
||||
newNode.blueScore = newNode.selectedParent.blueScore + uint64(len(newNode.blues))
|
||||
|
||||
return selectedParentAnticone, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlueCandidate(newNode *blockNode, blueCandidate *blockNode) (
|
||||
isBlue bool, candidateAnticoneSize dagconfig.KType, candidateBluesAnticoneSizes map[*blockNode]dagconfig.KType,
|
||||
err error) {
|
||||
|
||||
// The maximum length of node.blues can be K+1 because
|
||||
// it contains the selected parent.
|
||||
if dagconfig.KType(len(newNode.blues)) == dag.Params.K+1 {
|
||||
return false, 0, nil, nil
|
||||
}
|
||||
|
||||
candidateBluesAnticoneSizes = make(map[*blockNode]dagconfig.KType)
|
||||
|
||||
// Iterate over all blocks in the blue set of newNode that are not in the past
|
||||
// of blueCandidate, and check for each one of them if blueCandidate potentially
|
||||
// enlarges their blue anticone to be over K, or that they enlarge the blue anticone
|
||||
// of blueCandidate to be over K.
|
||||
for chainBlock := newNode; ; chainBlock = chainBlock.selectedParent {
|
||||
// If blueCandidate is in the future of chainBlock, it means
|
||||
// that all remaining blues are in the past of chainBlock and thus
|
||||
// in the past of blueCandidate. In this case we know for sure that
|
||||
// the anticone of blueCandidate will not exceed K, and we can mark
|
||||
// it as blue.
|
||||
//
|
||||
// newNode is always in the future of blueCandidate, so there's
|
||||
// no point in checking it.
|
||||
if chainBlock != newNode {
|
||||
if isAncestorOfBlueCandidate, err := dag.isInPast(chainBlock, blueCandidate); err != nil {
|
||||
return false, 0, nil, err
|
||||
} else if isAncestorOfBlueCandidate {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, block := range chainBlock.blues {
|
||||
// Skip blocks that exist in the past of blueCandidate.
|
||||
if isAncestorOfBlueCandidate, err := dag.isInPast(block, blueCandidate); err != nil {
|
||||
return false, 0, nil, err
|
||||
} else if isAncestorOfBlueCandidate {
|
||||
continue
|
||||
}
|
||||
|
||||
candidateBluesAnticoneSizes[block], err = dag.blueAnticoneSize(block, newNode)
|
||||
if err != nil {
|
||||
return false, 0, nil, err
|
||||
}
|
||||
candidateAnticoneSize++
|
||||
|
||||
if candidateAnticoneSize > dag.Params.K {
|
||||
// k-cluster violation: The candidate's blue anticone exceeded k
|
||||
return false, 0, nil, nil
|
||||
}
|
||||
|
||||
if candidateBluesAnticoneSizes[block] == dag.Params.K {
|
||||
// k-cluster violation: A block in candidate's blue anticone already
|
||||
// has k blue blocks in its own anticone
|
||||
return false, 0, nil, nil
|
||||
}
|
||||
|
||||
// This is a sanity check that validates that a blue
|
||||
// block's blue anticone is not already larger than K.
|
||||
if candidateBluesAnticoneSizes[block] > dag.Params.K {
|
||||
return false, 0, nil, errors.New("found blue anticone size larger than k")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true, candidateAnticoneSize, candidateBluesAnticoneSizes, nil
|
||||
}
|
||||
|
||||
// selectedParentAnticone returns the blocks in the anticone of the selected parent of the given node.
|
||||
// The function work as follows.
|
||||
// We start by adding all parents of the node (other than the selected parent) to a process queue.
|
||||
|
||||
@@ -21,6 +21,7 @@ type block struct {
|
||||
ExpectedScore uint64
|
||||
ExpectedSelectedParent string
|
||||
ExpectedBlues []string
|
||||
ExpectedReds []string
|
||||
Parents []string
|
||||
}
|
||||
|
||||
@@ -78,14 +79,14 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
|
||||
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("TestGHOSTDAG: block %s got unexpected error from PrepareBlockForTest: %v", blockData.ID,
|
||||
t.Fatalf("TestGHOSTDAG: block %s got unexpected error from PrepareBlockForTest: %+v", blockData.ID,
|
||||
err)
|
||||
}
|
||||
|
||||
utilBlock := util.NewBlock(block)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("TestGHOSTDAG: dag.ProcessBlock got unexpected error for block %s: %v", blockData.ID, err)
|
||||
t.Fatalf("TestGHOSTDAG: dag.ProcessBlock got unexpected error for block %s: %+v", blockData.ID, err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("TestGHOSTDAG: block %s "+
|
||||
@@ -104,9 +105,16 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
idByBlockMap[node] = blockData.ID
|
||||
|
||||
bluesIDs := make([]string, 0, len(node.blues))
|
||||
redsIDs := make([]string, 0, len(node.reds))
|
||||
|
||||
for _, blue := range node.blues {
|
||||
bluesIDs = append(bluesIDs, idByBlockMap[blue])
|
||||
}
|
||||
|
||||
for _, red := range node.reds {
|
||||
redsIDs = append(redsIDs, idByBlockMap[red])
|
||||
}
|
||||
|
||||
selectedParentID := idByBlockMap[node.selectedParent]
|
||||
fullDataStr := fmt.Sprintf("blues: %v, selectedParent: %v, score: %v",
|
||||
bluesIDs, selectedParentID, node.blueScore)
|
||||
@@ -122,6 +130,10 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
t.Errorf("Test %s: Block %s expected to have blues %v but got %v (fulldata: %v)",
|
||||
info.Name(), blockData.ID, blockData.ExpectedBlues, bluesIDs, fullDataStr)
|
||||
}
|
||||
if !reflect.DeepEqual(blockData.ExpectedReds, redsIDs) {
|
||||
t.Errorf("Test %s: Block %v expected to have reds %v but got %v (fulldata: %v)",
|
||||
info.Name(), blockData.ID, blockData.ExpectedReds, redsIDs, fullDataStr)
|
||||
}
|
||||
}
|
||||
|
||||
reds := make(map[string]bool)
|
||||
@@ -130,7 +142,7 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
reds[id] = true
|
||||
}
|
||||
|
||||
for tip := &dag.virtual.blockNode; tip.selectedParent != nil; tip = tip.selectedParent {
|
||||
for tip := dag.virtual.blockNode; tip.selectedParent != nil; tip = tip.selectedParent {
|
||||
tipID := idByBlockMap[tip]
|
||||
delete(reds, tipID)
|
||||
for _, blue := range tip.blues {
|
||||
|
||||
@@ -77,7 +77,6 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
|
||||
testFiles := []string{
|
||||
"blk_0_to_4.dat",
|
||||
"blk_3B.dat",
|
||||
}
|
||||
|
||||
var blocks []*util.Block
|
||||
@@ -110,7 +109,7 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
|
||||
db1DAG, teardown, err := blockdag.DAGSetup("", false, db1Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %+v", err)
|
||||
}
|
||||
if teardown != nil {
|
||||
defer teardown()
|
||||
@@ -178,7 +177,7 @@ func TestAcceptanceIndexRecover(t *testing.T) {
|
||||
|
||||
db2DAG, teardown, err := blockdag.DAGSetup("", false, db2Config)
|
||||
if err != nil {
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
|
||||
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %+v", err)
|
||||
}
|
||||
if teardown != nil {
|
||||
defer teardown()
|
||||
|
||||
@@ -44,7 +44,7 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*appmessage.MsgBlo
|
||||
|
||||
msgBlock.Header = appmessage.BlockHeader{
|
||||
Version: blockVersion,
|
||||
ParentHashes: dag.TipHashes(),
|
||||
ParentHashes: dag.VirtualParentHashes(),
|
||||
HashMerkleRoot: hashMerkleTree.Root(),
|
||||
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
|
||||
UTXOCommitment: (*daghash.Hash)(multiset.Finalize()),
|
||||
@@ -60,12 +60,12 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*appmessage.MsgBlo
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for reads).
|
||||
func (dag *BlockDAG) NextBlockMultiset() (*secp256k1.MultiSet, error) {
|
||||
_, selectedParentPastUTXO, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
|
||||
_, selectedParentPastUTXO, txsAcceptanceData, err := dag.pastUTXO(dag.virtual.blockNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dag.virtual.blockNode.calcMultiset(dag, txsAcceptanceData, selectedParentPastUTXO)
|
||||
return dag.virtual.blockNode.calcMultiset(txsAcceptanceData, selectedParentPastUTXO)
|
||||
}
|
||||
|
||||
// CoinbasePayloadExtraData returns coinbase payload extra data parameter
|
||||
@@ -127,9 +127,8 @@ func (dag *BlockDAG) NextBlockTime() mstime.Time {
|
||||
|
||||
// CurrentBits returns the bits of the tip with the lowest bits, which also means it has highest difficulty.
|
||||
func (dag *BlockDAG) CurrentBits() uint32 {
|
||||
tips := dag.virtual.tips()
|
||||
minBits := uint32(math.MaxUint32)
|
||||
for tip := range tips {
|
||||
for tip := range dag.virtual.parents {
|
||||
if minBits > tip.Header().Bits {
|
||||
minBits = tip.Header().Bits
|
||||
}
|
||||
|
||||
@@ -7,16 +7,10 @@ import (
|
||||
)
|
||||
|
||||
// calcMultiset returns the multiset of the past UTXO of the given block.
|
||||
func (node *blockNode) calcMultiset(dag *BlockDAG, acceptanceData MultiBlockTxsAcceptanceData,
|
||||
func (node *blockNode) calcMultiset(acceptanceData MultiBlockTxsAcceptanceData,
|
||||
selectedParentPastUTXO UTXOSet) (*secp256k1.MultiSet, error) {
|
||||
|
||||
return node.pastUTXOMultiSet(dag, acceptanceData, selectedParentPastUTXO)
|
||||
}
|
||||
|
||||
func (node *blockNode) pastUTXOMultiSet(dag *BlockDAG, acceptanceData MultiBlockTxsAcceptanceData,
|
||||
selectedParentPastUTXO UTXOSet) (*secp256k1.MultiSet, error) {
|
||||
|
||||
ms, err := node.selectedParentMultiset(dag)
|
||||
ms, err := node.selectedParentMultiset()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -42,12 +36,12 @@ func (node *blockNode) pastUTXOMultiSet(dag *BlockDAG, acceptanceData MultiBlock
|
||||
// selectedParentMultiset returns the multiset of the node's selected
|
||||
// parent. If the node is the genesis blockNode then it does not have
|
||||
// a selected parent, in which case return a new, empty multiset.
|
||||
func (node *blockNode) selectedParentMultiset(dag *BlockDAG) (*secp256k1.MultiSet, error) {
|
||||
func (node *blockNode) selectedParentMultiset() (*secp256k1.MultiSet, error) {
|
||||
if node.isGenesis() {
|
||||
return secp256k1.NewMultiset(), nil
|
||||
}
|
||||
|
||||
ms, err := dag.multisetStore.multisetByBlockNode(node.selectedParent)
|
||||
ms, err := node.dag.multisetStore.multisetByBlockNode(node.selectedParent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
@@ -26,13 +27,21 @@ const (
|
||||
// NTChainChanged indicates that selected parent
|
||||
// chain had changed.
|
||||
NTChainChanged
|
||||
|
||||
// NTFinalityConflict indicates that a finality conflict has just occurred
|
||||
NTFinalityConflict
|
||||
|
||||
// NTFinalityConflict indicates that a finality conflict has been resolved
|
||||
NTFinalityConflictResolved
|
||||
)
|
||||
|
||||
// notificationTypeStrings is a map of notification types back to their constant
|
||||
// names for pretty printing.
|
||||
var notificationTypeStrings = map[NotificationType]string{
|
||||
NTBlockAdded: "NTBlockAdded",
|
||||
NTChainChanged: "NTChainChanged",
|
||||
NTBlockAdded: "NTBlockAdded",
|
||||
NTChainChanged: "NTChainChanged",
|
||||
NTFinalityConflict: "NTFinalityConflict",
|
||||
NTFinalityConflictResolved: "NTFinalityConflictResolved",
|
||||
}
|
||||
|
||||
// String returns the NotificationType in human-readable form.
|
||||
@@ -87,3 +96,15 @@ type ChainChangedNotificationData struct {
|
||||
RemovedChainBlockHashes []*daghash.Hash
|
||||
AddedChainBlockHashes []*daghash.Hash
|
||||
}
|
||||
|
||||
// FinalityConflictNotificationData defines data to be sent along with a
|
||||
// FinalityConflict notification
|
||||
type FinalityConflictNotificationData struct {
|
||||
ViolatingBlockHash *daghash.Hash
|
||||
}
|
||||
|
||||
// FinalityConflictResolvedNotificationData defines data to be sent along with a
|
||||
// FinalityConflictResolved notification
|
||||
type FinalityConflictResolvedNotificationData struct {
|
||||
FinalityBlockHash *daghash.Hash
|
||||
}
|
||||
|
||||
@@ -2,17 +2,17 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// chainUpdates represents the updates made to the selected parent chain after
|
||||
// selectedParentChainUpdates represents the updates made to the selected parent chain after
|
||||
// a block had been added to the DAG.
|
||||
type chainUpdates struct {
|
||||
type selectedParentChainUpdates struct {
|
||||
removedChainBlockHashes []*daghash.Hash
|
||||
addedChainBlockHashes []*daghash.Hash
|
||||
}
|
||||
@@ -58,8 +58,6 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
log.Debugf("Accepted block %s", blockHash)
|
||||
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
@@ -171,6 +169,8 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
|
||||
|
||||
dag.notifyBlockAccepted(block, chainUpdates, flags)
|
||||
|
||||
log.Debugf("Accepted block %s with status '%s'", newNode.hash, dag.index.BlockNodeStatus(newNode))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -185,8 +185,8 @@ func (dag *BlockDAG) createBlockNodeFromBlock(block *util.Block) (
|
||||
return nil, nil, err
|
||||
}
|
||||
newNode, selectedParentAnticone = dag.newBlockNode(&block.MsgBlock().Header, parents)
|
||||
newNode.status = statusDataStored
|
||||
dag.index.AddNode(newNode)
|
||||
dag.index.SetBlockNodeStatus(newNode, statusDataStored)
|
||||
|
||||
// Insert the block into the database if it's not already there. Even
|
||||
// though it is possible the block will ultimately fail to connect, it
|
||||
@@ -226,15 +226,21 @@ func (dag *BlockDAG) createBlockNodeFromBlock(block *util.Block) (
|
||||
// connectBlock handles connecting the passed node/block to the DAG.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) connectBlock(node *blockNode,
|
||||
block *util.Block, selectedParentAnticone []*blockNode, flags BehaviorFlags) (*chainUpdates, error) {
|
||||
func (dag *BlockDAG) connectBlock(newNode *blockNode,
|
||||
block *util.Block, selectedParentAnticone []*blockNode, flags BehaviorFlags) (*selectedParentChainUpdates, error) {
|
||||
|
||||
err := dag.checkBlockTransactionsFinalized(block, node, flags)
|
||||
err := newNode.checkDAGRelations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = dag.checkFinalityViolation(node); err != nil {
|
||||
err = dag.checkBlockTransactionsFinalized(block, newNode, flags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dag.checkBlockHasNoChainedTransactions(block, newNode, flags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -242,108 +248,224 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newBlockPastUTXO, txsAcceptanceData, newBlockFeeData, newBlockMultiSet, err :=
|
||||
node.verifyAndBuildUTXO(dag, block.Transactions(), isBehaviorFlagRaised(flags, BFFastAdd))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error verifying UTXO for %s", node)
|
||||
isNewSelectedTip := dag.isNewSelectedTip(newNode)
|
||||
if !isNewSelectedTip {
|
||||
dag.index.SetBlockNodeStatus(newNode, statusUTXOPendingVerification)
|
||||
}
|
||||
|
||||
err = node.validateCoinbaseTransaction(dag, block, txsAcceptanceData)
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
|
||||
if isNewSelectedTip {
|
||||
err = dag.resolveNodeStatus(newNode, dbTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dag.index.BlockNodeStatus(newNode) == statusValid {
|
||||
isViolatingFinality, err := newNode.isViolatingFinality()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isViolatingFinality {
|
||||
dag.index.SetBlockNodeStatus(newNode, statusUTXOPendingVerification)
|
||||
dag.sendNotification(NTFinalityConflict, &FinalityConflictNotificationData{
|
||||
ViolatingBlockHash: newNode.hash,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
chainUpdates, err := dag.applyDAGChanges(newNode, selectedParentAnticone, dbTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
virtualUTXODiff, chainUpdates, err :=
|
||||
dag.applyDAGChanges(node, newBlockPastUTXO, newBlockMultiSet, selectedParentAnticone)
|
||||
if err != nil {
|
||||
// Since all validation logic has already ran, if applyDAGChanges errors out,
|
||||
// this means we have a problem in the internal structure of the DAG - a problem which is
|
||||
// irrecoverable, and it would be a bad idea to attempt adding any more blocks to the DAG.
|
||||
// Therefore - in such cases we panic.
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = dag.saveChangesFromBlock(block, virtualUTXODiff, txsAcceptanceData, newBlockFeeData)
|
||||
err = dag.saveChangesFromBlock(block, dbTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dag.clearDirtyEntries()
|
||||
|
||||
dag.addBlockProcessingTimestamp()
|
||||
dag.blockCount++
|
||||
|
||||
return chainUpdates, nil
|
||||
}
|
||||
|
||||
// applyDAGChanges does the following:
|
||||
// 1. Connects each of the new block's parents to the block.
|
||||
// 2. Adds the new block to the DAG's tips.
|
||||
// 3. Updates the DAG's full UTXO set.
|
||||
// 4. Updates each of the tips' utxoDiff.
|
||||
// 5. Applies the new virtual's blue score to all the unaccepted UTXOs
|
||||
// 6. Adds the block to the reachability structures
|
||||
// 7. Adds the multiset of the block to the multiset store.
|
||||
// 8. Updates the finality point of the DAG (if required).
|
||||
//
|
||||
// It returns the diff in the virtual block's UTXO set.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockPastUTXO UTXOSet,
|
||||
newBlockMultiset *secp256k1.MultiSet, selectedParentAnticone []*blockNode) (
|
||||
virtualUTXODiff *UTXODiff, chainUpdates *chainUpdates, err error) {
|
||||
|
||||
// Add the block to the reachability tree
|
||||
err = dag.reachabilityTree.addBlock(node, selectedParentAnticone)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed adding block to the reachability tree")
|
||||
}
|
||||
|
||||
dag.multisetStore.setMultiset(node, newBlockMultiset)
|
||||
|
||||
if err = node.updateParents(dag, newBlockPastUTXO); err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed updating parents of %s", node)
|
||||
}
|
||||
|
||||
// Update the virtual block's parents (the DAG tips) to include the new block.
|
||||
chainUpdates = dag.virtual.AddTip(node)
|
||||
|
||||
// Build a UTXO set for the new virtual block
|
||||
newVirtualUTXO, _, _, err := dag.pastUTXO(&dag.virtual.blockNode)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not restore past UTXO for virtual")
|
||||
}
|
||||
|
||||
// Apply new utxoDiffs to all the tips
|
||||
err = updateTipsUTXO(dag, newVirtualUTXO)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed updating the tips' UTXO")
|
||||
}
|
||||
|
||||
// It is now safe to meld the UTXO set to base.
|
||||
diffSet := newVirtualUTXO.(*DiffUTXOSet)
|
||||
virtualUTXODiff = diffSet.UTXODiff
|
||||
err = dag.meldVirtualUTXO(diffSet)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed melding the virtual UTXO")
|
||||
}
|
||||
|
||||
dag.index.SetStatusFlags(node, statusValid)
|
||||
|
||||
// And now we can update the finality point of the DAG (if required)
|
||||
dag.updateFinalityPoint()
|
||||
|
||||
return virtualUTXODiff, chainUpdates, nil
|
||||
// isNewSelectedTip determines if a new blockNode qualifies to be the next selectedTip
|
||||
func (dag *BlockDAG) isNewSelectedTip(newNode *blockNode) bool {
|
||||
return newNode.isGenesis() || dag.selectedTip().less(newNode)
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UTXODiff,
|
||||
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData compactFeeData) error {
|
||||
func (dag *BlockDAG) updateVirtualAndTips(node *blockNode, dbTx *dbaccess.TxContext) (*selectedParentChainUpdates, error) {
|
||||
didVirtualParentsChange, chainUpdates, err := dag.addTip(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if didVirtualParentsChange {
|
||||
// Build a UTXO set for the new virtual block
|
||||
newVirtualUTXO, _, _, err := dag.pastUTXO(dag.virtual.blockNode)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not restore past UTXO for virtual")
|
||||
}
|
||||
|
||||
// Apply new utxoDiffs to all the tips
|
||||
err = updateValidTipsUTXO(dag, newVirtualUTXO)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed updating the tips' UTXO")
|
||||
}
|
||||
|
||||
// It is now safe to meld the UTXO set to base.
|
||||
diffSet := newVirtualUTXO.(*DiffUTXOSet)
|
||||
virtualUTXODiff := diffSet.UTXODiff
|
||||
err = dag.meldVirtualUTXO(diffSet)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed melding the virtual UTXO")
|
||||
}
|
||||
|
||||
// Update the UTXO set using the diffSet that was melded into the
|
||||
// full UTXO set.
|
||||
err = updateUTXOSet(dbTx, virtualUTXODiff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return chainUpdates, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) validateAndApplyUTXOSet(
|
||||
node *blockNode, block *util.Block, dbTx *dbaccess.TxContext) error {
|
||||
|
||||
if !node.isGenesis() {
|
||||
err := dag.resolveNodeStatus(node.selectedParent, dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dag.index.BlockNodeStatus(node.selectedParent) == statusDisqualifiedFromChain {
|
||||
return ruleError(ErrSelectedParentDisqualifiedFromChain,
|
||||
"Block's selected parent is disqualified from chain")
|
||||
}
|
||||
}
|
||||
|
||||
utxoVerificationData, err := node.verifyAndBuildUTXO(block.Transactions())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error verifying UTXO for %s", node)
|
||||
}
|
||||
|
||||
err = node.validateCoinbaseTransaction(dag, block, utxoVerificationData.txsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dag.applyUTXOSetChanges(node, utxoVerificationData, dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) applyUTXOSetChanges(
|
||||
node *blockNode, utxoVerificationData *utxoVerificationOutput, dbTx *dbaccess.TxContext) error {
|
||||
|
||||
dag.index.SetBlockNodeStatus(node, statusValid)
|
||||
|
||||
if !node.hasValidChildren() {
|
||||
err := dag.addValidTip(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dag.multisetStore.setMultiset(node, utxoVerificationData.newBlockMultiset)
|
||||
|
||||
err := node.updateDiffAndDiffChild(utxoVerificationData.newBlockPastUTXO)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := node.updateParentsDiffs(dag, utxoVerificationData.newBlockPastUTXO); err != nil {
|
||||
return errors.Wrapf(err, "failed updating parents of %s", node)
|
||||
}
|
||||
|
||||
if dag.indexManager != nil {
|
||||
err := dag.indexManager.ConnectBlock(dbTx, node.hash, utxoVerificationData.txsAcceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) resolveNodeStatus(node *blockNode, dbTx *dbaccess.TxContext) error {
|
||||
blockStatus := dag.index.BlockNodeStatus(node)
|
||||
if blockStatus != statusValid && blockStatus != statusDisqualifiedFromChain {
|
||||
block, err := dag.fetchBlockByHash(node.hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dag.validateAndApplyUTXOSet(node, block, dbTx)
|
||||
if err != nil {
|
||||
if !errors.As(err, &(RuleError{})) {
|
||||
return err
|
||||
}
|
||||
dag.index.SetBlockNodeStatus(node, statusDisqualifiedFromChain)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) resolveNodeStatusInNewTransaction(node *blockNode) error {
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
err = dag.resolveNodeStatus(node, dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err = dag.index.flushToDB(dbTx)
|
||||
func (dag *BlockDAG) applyDAGChanges(node *blockNode, selectedParentAnticone []*blockNode, dbTx *dbaccess.TxContext) (
|
||||
*selectedParentChainUpdates, error) {
|
||||
|
||||
// Add the block to the reachability tree
|
||||
err := dag.reachabilityTree.addBlock(node, selectedParentAnticone)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed adding block to the reachability tree")
|
||||
}
|
||||
|
||||
node.updateParentsChildren()
|
||||
|
||||
chainUpdates, err := dag.updateVirtualAndTips(node, dbTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return chainUpdates, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, dbTx *dbaccess.TxContext) error {
|
||||
err := dag.index.flushToDB(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -364,19 +486,7 @@ func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UT
|
||||
}
|
||||
|
||||
// Update DAG state.
|
||||
state := &dagState{
|
||||
TipHashes: dag.TipHashes(),
|
||||
LastFinalityPoint: dag.lastFinalityPoint.hash,
|
||||
LocalSubnetworkID: dag.subnetworkID,
|
||||
}
|
||||
err = saveDAGState(dbTx, state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the UTXO set using the diffSet that was melded into the
|
||||
// full UTXO set.
|
||||
err = updateUTXOSet(dbTx, virtualUTXODiff)
|
||||
err = dag.saveState(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -389,39 +499,78 @@ func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UT
|
||||
return err
|
||||
}
|
||||
|
||||
// Allow the index manager to call each of the currently active
|
||||
// optional indexes with the block being connected so they can
|
||||
// update themselves accordingly.
|
||||
if dag.indexManager != nil {
|
||||
err := dag.indexManager.ConnectBlock(dbTx, block.Hash(), txsAcceptanceData)
|
||||
return nil
|
||||
}
|
||||
|
||||
// boundedMergeBreakingParents returns all parents of given `node` that break the bounded merge depth rule:
|
||||
// All blocks in node.MergeSet should be in future of node.finalityPoint, with the following exception:
|
||||
// If there exists a block C violating this, i.e., C is in node's merge set and node.finalityPoint's anticone,
|
||||
// then there must be a "kosherizing" block D in C's Future such that D is in node.blues
|
||||
// and node.finalityPoint is in D.SelectedChain
|
||||
func (dag *BlockDAG) boundedMergeBreakingParents(node *blockNode) (blockSet, error) {
|
||||
potentiallyKosherizingBlocks, err := node.nonBoundedMergeDepthViolatingBlues()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
badReds := []*blockNode{}
|
||||
|
||||
finalityPoint := node.finalityPoint()
|
||||
for _, redBlock := range node.reds {
|
||||
isFinalityPointInPast, err := dag.isInPast(finalityPoint, redBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if isFinalityPointInPast {
|
||||
continue
|
||||
}
|
||||
|
||||
isKosherized := false
|
||||
for potentiallyKosherizingBlock := range potentiallyKosherizingBlocks {
|
||||
isKosherized, err = dag.isInPast(redBlock, potentiallyKosherizingBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isKosherized {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !isKosherized {
|
||||
badReds = append(badReds, redBlock)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the fee data into the database
|
||||
err = dbaccess.StoreFeeData(dbTx, block.Hash(), feeData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
boundedMergeBreakingParents := newBlockSet()
|
||||
for parent := range node.parents {
|
||||
isBadRedInPast := false
|
||||
for _, badRedBlock := range badReds {
|
||||
isBadRedInPast, err = dag.isInPast(badRedBlock, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isBadRedInPast {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
if isBadRedInPast {
|
||||
boundedMergeBreakingParents.add(parent)
|
||||
}
|
||||
}
|
||||
return boundedMergeBreakingParents, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) clearDirtyEntries() {
|
||||
dag.index.clearDirtyEntries()
|
||||
dag.utxoDiffStore.clearDirtyEntries()
|
||||
dag.utxoDiffStore.clearOldEntries()
|
||||
dag.reachabilityTree.store.clearDirtyEntries()
|
||||
dag.multisetStore.clearNewEntries()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) handleConnectBlockError(err error, newNode *blockNode) error {
|
||||
if errors.As(err, &RuleError{}) {
|
||||
dag.index.SetStatusFlags(newNode, statusValidateFailed)
|
||||
dag.index.SetBlockNodeStatus(newNode, statusValidateFailed)
|
||||
|
||||
dbTx, err := dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
@@ -444,7 +593,7 @@ func (dag *BlockDAG) handleConnectBlockError(err error, newNode *blockNode) erro
|
||||
// notifyBlockAccepted notifies the caller that the new block was
|
||||
// accepted into the block DAG. The caller would typically want to
|
||||
// react by relaying the inventory to other peers.
|
||||
func (dag *BlockDAG) notifyBlockAccepted(block *util.Block, chainUpdates *chainUpdates, flags BehaviorFlags) {
|
||||
func (dag *BlockDAG) notifyBlockAccepted(block *util.Block, chainUpdates *selectedParentChainUpdates, flags BehaviorFlags) {
|
||||
dag.sendNotification(NTBlockAdded, &BlockAddedNotificationData{
|
||||
Block: block,
|
||||
WasUnorphaned: flags&BFWasUnorphaned != 0,
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
@@ -38,7 +40,10 @@ func TestProcessOrphans(t *testing.T) {
|
||||
// a. It gets added to the orphan pool
|
||||
// b. It gets rejected once it's unorphaned
|
||||
childBlock := blocks[2]
|
||||
childBlock.MsgBlock().Header.UTXOCommitment = &daghash.ZeroHash
|
||||
tx := childBlock.Transactions()[1].MsgTx()
|
||||
tx.LockTime = math.MaxUint64
|
||||
tx.TxIn[0].Sequence = 0
|
||||
childBlock.MsgBlock().Header.HashMerkleRoot = BuildHashMerkleTreeStore(childBlock.Transactions()).Root()
|
||||
|
||||
// Process the child block so that it gets added to the orphan pool
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(childBlock, BFNoPoWCheck)
|
||||
@@ -69,7 +74,7 @@ func TestProcessOrphans(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatalf("TestProcessOrphans: child block missing from block index")
|
||||
}
|
||||
if !dag.index.NodeStatus(node).KnownInvalid() {
|
||||
if !dag.index.BlockNodeStatus(node).KnownInvalid() {
|
||||
t.Fatalf("TestProcessOrphans: child block erroneously not marked as invalid")
|
||||
}
|
||||
}
|
||||
@@ -300,7 +305,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatalf("block %s does not exist in the DAG", block1.Hash())
|
||||
}
|
||||
dag.index.SetStatusFlags(blockNode1, statusValidateFailed)
|
||||
dag.index.SetBlockNodeStatus(blockNode1, statusValidateFailed)
|
||||
|
||||
block2 := blocks[2]
|
||||
err = dag.maybeAcceptBlock(block2, BFNone)
|
||||
@@ -317,7 +322,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
|
||||
}
|
||||
|
||||
// Set block1's status back to valid for next tests
|
||||
dag.index.UnsetStatusFlags(blockNode1, statusValidateFailed)
|
||||
dag.index.SetBlockNodeStatus(blockNode1, statusValid)
|
||||
|
||||
// Test rejecting the block due to bad context
|
||||
originalBits := block2.MsgBlock().Header.Bits
|
||||
|
||||
9
domain/blockdag/pruning.go
Normal file
9
domain/blockdag/pruning.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package blockdag
|
||||
|
||||
// pruningDepth is used to determine the depth from the virtual block where the pruning point is set once updated.
|
||||
// The pruningDepth is defined in a way that it's mathematically proven that a block
|
||||
// in virtual.blockAtDepth(pruningDepth).anticone that is not in virtual.past will never be in virtual.past.
|
||||
func (dag *BlockDAG) pruningDepth() uint64 {
|
||||
k := uint64(dag.Params.K)
|
||||
return 2*dag.FinalityInterval() + 4*mergeSetSizeLimit*k + 2*k + 2
|
||||
}
|
||||
46
domain/blockdag/pruning_test.go
Normal file
46
domain/blockdag/pruning_test.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
)
|
||||
|
||||
func TestPruningDepth(t *testing.T) {
|
||||
tests := []struct {
|
||||
params *dagconfig.Params
|
||||
expectedDepth uint64
|
||||
}{
|
||||
{
|
||||
params: &dagconfig.MainnetParams,
|
||||
expectedDepth: 244838,
|
||||
},
|
||||
{
|
||||
params: &dagconfig.TestnetParams,
|
||||
expectedDepth: 244838,
|
||||
},
|
||||
{
|
||||
params: &dagconfig.DevnetParams,
|
||||
expectedDepth: 244838,
|
||||
},
|
||||
{
|
||||
params: &dagconfig.SimnetParams,
|
||||
expectedDepth: 192038,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
func() {
|
||||
dag, teardownFunc, err := DAGSetup("TestFinalityInterval", true, Config{
|
||||
DAGParams: test.params,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup dag instance for %s: %v", test.params.Name, err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
if dag.pruningDepth() != test.expectedDepth {
|
||||
t.Errorf("pruningDepth in %s is expected to be %d but got %d", test.params.Name, test.expectedDepth, dag.pruningDepth())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
@@ -2,11 +2,13 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -897,7 +899,7 @@ func newReachabilityTree(dag *BlockDAG) *reachabilityTree {
|
||||
}
|
||||
}
|
||||
|
||||
func (rt *reachabilityTree) init(dbContext dbaccess.Context) error {
|
||||
func (rt *reachabilityTree) init(dbContext *dbaccess.DatabaseContext) error {
|
||||
// Init the store
|
||||
err := rt.store.init(dbContext)
|
||||
if err != nil {
|
||||
@@ -1161,15 +1163,12 @@ func (rt *reachabilityTree) propagateChildIntervals(interval *reachabilityInterv
|
||||
return nil
|
||||
}
|
||||
|
||||
// isInPast returns true if `this` is in the past (exclusive) of `other`
|
||||
// isInPast returns true if `this` is in the past of `other`
|
||||
// in the DAG.
|
||||
//
|
||||
// Note: this method will return true if this == other
|
||||
// The complexity of this method is O(log(|this.futureCoveringTreeNodeSet|))
|
||||
func (rt *reachabilityTree) isInPast(this *blockNode, other *blockNode) (bool, error) {
|
||||
// By definition, a node is not in the past of itself.
|
||||
if this == other {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if this node is a reachability tree ancestor of the
|
||||
// other node
|
||||
isReachabilityTreeAncestor, err := rt.isReachabilityTreeAncestorOf(this, other)
|
||||
@@ -1194,6 +1193,8 @@ func (rt *reachabilityTree) isInPast(this *blockNode, other *blockNode) (bool, e
|
||||
}
|
||||
|
||||
// isReachabilityTreeAncestorOf returns whether `this` is in the selected parent chain of `other`.
|
||||
//
|
||||
// Note: this method will return true if this == other
|
||||
func (rt *reachabilityTree) isReachabilityTreeAncestorOf(this *blockNode, other *blockNode) (bool, error) {
|
||||
thisTreeNode, err := rt.store.treeNodeByBlockNode(this)
|
||||
if err != nil {
|
||||
|
||||
@@ -2,12 +2,13 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
type reachabilityData struct {
|
||||
|
||||
@@ -7,7 +7,6 @@ package blockdag
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/txscript"
|
||||
@@ -25,12 +24,12 @@ type txValidateItem struct {
|
||||
// inputs. It provides several channels for communication and a processing
|
||||
// function that is intended to be in run multiple goroutines.
|
||||
type txValidator struct {
|
||||
validateChan chan *txValidateItem
|
||||
quitChan chan struct{}
|
||||
resultChan chan error
|
||||
utxoSet UTXOSet
|
||||
flags txscript.ScriptFlags
|
||||
sigCache *txscript.SigCache
|
||||
validateChan chan *txValidateItem
|
||||
quitChan chan struct{}
|
||||
resultChan chan error
|
||||
referencedUTXOEntries []*UTXOEntry
|
||||
flags txscript.ScriptFlags
|
||||
sigCache *txscript.SigCache
|
||||
}
|
||||
|
||||
// sendResult sends the result of a script pair validation on the internal
|
||||
@@ -52,19 +51,8 @@ out:
|
||||
for {
|
||||
select {
|
||||
case txVI := <-v.validateChan:
|
||||
// Ensure the referenced input utxo is available.
|
||||
txIn := txVI.txIn
|
||||
entry, ok := v.utxoSet.Get(txIn.PreviousOutpoint)
|
||||
if !ok {
|
||||
str := fmt.Sprintf("unable to find unspent "+
|
||||
"output %s referenced from "+
|
||||
"transaction %s input %d",
|
||||
txIn.PreviousOutpoint, txVI.tx.ID(),
|
||||
txVI.txInIndex)
|
||||
err := ruleError(ErrMissingTxOut, str)
|
||||
v.sendResult(err)
|
||||
break out
|
||||
}
|
||||
entry := v.referencedUTXOEntries[txVI.txInIndex]
|
||||
|
||||
// Create a new script engine for the script pair.
|
||||
sigScript := txIn.SignatureScript
|
||||
@@ -165,20 +153,20 @@ func (v *txValidator) Validate(items []*txValidateItem) error {
|
||||
|
||||
// newTxValidator returns a new instance of txValidator to be used for
|
||||
// validating transaction scripts asynchronously.
|
||||
func newTxValidator(utxoSet UTXOSet, flags txscript.ScriptFlags, sigCache *txscript.SigCache) *txValidator {
|
||||
func newTxValidator(referencedUTXOEntries []*UTXOEntry, flags txscript.ScriptFlags, sigCache *txscript.SigCache) *txValidator {
|
||||
return &txValidator{
|
||||
validateChan: make(chan *txValidateItem),
|
||||
quitChan: make(chan struct{}),
|
||||
resultChan: make(chan error),
|
||||
utxoSet: utxoSet,
|
||||
sigCache: sigCache,
|
||||
flags: flags,
|
||||
validateChan: make(chan *txValidateItem),
|
||||
quitChan: make(chan struct{}),
|
||||
resultChan: make(chan error),
|
||||
referencedUTXOEntries: referencedUTXOEntries,
|
||||
sigCache: sigCache,
|
||||
flags: flags,
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateTransactionScripts validates the scripts for the passed transaction
|
||||
// using multiple goroutines.
|
||||
func ValidateTransactionScripts(tx *util.Tx, utxoSet UTXOSet, flags txscript.ScriptFlags, sigCache *txscript.SigCache) error {
|
||||
func ValidateTransactionScripts(tx *util.Tx, referencedUTXOEntries []*UTXOEntry, flags txscript.ScriptFlags, sigCache *txscript.SigCache) error {
|
||||
// Collect all of the transaction inputs and required information for
|
||||
// validation.
|
||||
txIns := tx.MsgTx().TxIn
|
||||
@@ -193,40 +181,6 @@ func ValidateTransactionScripts(tx *util.Tx, utxoSet UTXOSet, flags txscript.Scr
|
||||
}
|
||||
|
||||
// Validate all of the inputs.
|
||||
validator := newTxValidator(utxoSet, flags, sigCache)
|
||||
validator := newTxValidator(referencedUTXOEntries, flags, sigCache)
|
||||
return validator.Validate(txValItems)
|
||||
}
|
||||
|
||||
// checkBlockScripts executes and validates the scripts for all transactions in
|
||||
// the passed block using multiple goroutines.
|
||||
func checkBlockScripts(block *blockNode, utxoSet UTXOSet, transactions []*util.Tx, scriptFlags txscript.ScriptFlags, sigCache *txscript.SigCache) error {
|
||||
// Collect all of the transaction inputs and required information for
|
||||
// validation for all transactions in the block into a single slice.
|
||||
numInputs := 0
|
||||
for _, tx := range transactions {
|
||||
numInputs += len(tx.MsgTx().TxIn)
|
||||
}
|
||||
txValItems := make([]*txValidateItem, 0, numInputs)
|
||||
for _, tx := range transactions {
|
||||
for txInIdx, txIn := range tx.MsgTx().TxIn {
|
||||
txVI := &txValidateItem{
|
||||
txInIndex: txInIdx,
|
||||
txIn: txIn,
|
||||
tx: tx,
|
||||
}
|
||||
txValItems = append(txValItems, txVI)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate all of the inputs.
|
||||
validator := newTxValidator(utxoSet, scriptFlags, sigCache)
|
||||
start := time.Now()
|
||||
if err := validator.Validate(txValItems); err != nil {
|
||||
return err
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
|
||||
log.Tracef("block %s took %s to verify", block.hash, elapsed)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
// Copyright (c) 2013-2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/txscript"
|
||||
)
|
||||
|
||||
// TestCheckBlockScripts ensures that validating the all of the scripts in a
|
||||
// known-good block doesn't return an error.
|
||||
func TestCheckBlockScripts(t *testing.T) {
|
||||
t.Skip()
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
testBlockNum := 277647
|
||||
blockDataFile := fmt.Sprintf("%d.dat", testBlockNum)
|
||||
blocks, err := LoadBlocks(filepath.Join("testdata/", blockDataFile))
|
||||
if err != nil {
|
||||
t.Errorf("Error loading file: %v\n", err)
|
||||
return
|
||||
}
|
||||
if len(blocks) > 1 {
|
||||
t.Errorf("The test block file must only have one block in it")
|
||||
return
|
||||
}
|
||||
if len(blocks) == 0 {
|
||||
t.Errorf("The test block file may not be empty")
|
||||
return
|
||||
}
|
||||
|
||||
storeDataFile := fmt.Sprintf("%d.utxostore", testBlockNum)
|
||||
utxoSet, err := loadUTXOSet(storeDataFile)
|
||||
if err != nil {
|
||||
t.Errorf("Error loading txstore: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
node := &blockNode{
|
||||
hash: blocks[0].Hash(),
|
||||
}
|
||||
|
||||
scriptFlags := txscript.ScriptNoFlags
|
||||
err = checkBlockScripts(node, utxoSet, blocks[0].Transactions(), scriptFlags, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Transaction script validation failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -77,11 +78,22 @@ func (dag *BlockDAG) IsInSelectedParentChain(blockHash *daghash.Hash) (bool, err
|
||||
}
|
||||
|
||||
// isInSelectedParentChainOf returns whether `node` is in the selected parent chain of `other`.
|
||||
//
|
||||
// Note: this method will return true if node == other
|
||||
func (dag *BlockDAG) isInSelectedParentChainOf(node *blockNode, other *blockNode) (bool, error) {
|
||||
// By definition, a node is not in the selected parent chain of itself.
|
||||
if node == other {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return dag.reachabilityTree.isReachabilityTreeAncestorOf(node, other)
|
||||
}
|
||||
|
||||
// isInSelectedParentChainOfAll returns true if `node` is in the selected parent chain of all `others`
|
||||
func (dag *BlockDAG) isInSelectedParentChainOfAll(node *blockNode, others blockSet) (bool, error) {
|
||||
for other := range others {
|
||||
isInSelectedParentChain, err := dag.isInSelectedParentChainOf(node, other)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !isInSelectedParentChain {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
@@ -30,20 +29,41 @@ func (dag *BlockDAG) CalcSequenceLock(tx *util.Tx, utxoSet UTXOSet) (*SequenceLo
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
|
||||
return dag.calcSequenceLock(dag.selectedTip(), utxoSet, tx)
|
||||
return dag.calcTxSequenceLock(dag.selectedTip(), tx, utxoSet)
|
||||
}
|
||||
|
||||
// CalcSequenceLockNoLock is lock free version of CalcSequenceLockWithLock
|
||||
// This function is unsafe for concurrent access.
|
||||
func (dag *BlockDAG) CalcSequenceLockNoLock(tx *util.Tx, utxoSet UTXOSet) (*SequenceLock, error) {
|
||||
return dag.calcSequenceLock(dag.selectedTip(), utxoSet, tx)
|
||||
return dag.calcTxSequenceLock(dag.selectedTip(), tx, utxoSet)
|
||||
}
|
||||
|
||||
// calcSequenceLock computes the relative lock-times for the passed
|
||||
// calcTxSequenceLock computes the relative lock-times for the passed
|
||||
// transaction. See the exported version, CalcSequenceLock for further details.
|
||||
//
|
||||
// This function MUST be called with the DAG state lock held (for writes).
|
||||
func (dag *BlockDAG) calcSequenceLock(node *blockNode, utxoSet UTXOSet, tx *util.Tx) (*SequenceLock, error) {
|
||||
func (dag *BlockDAG) calcTxSequenceLock(node *blockNode, tx *util.Tx, utxoSet UTXOSet) (*SequenceLock, error) {
|
||||
referencedUTXOEntries, err := dag.getReferencedUTXOEntries(tx, utxoSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dag.calcTxSequenceLockFromReferencedUTXOEntries(node, tx, referencedUTXOEntries)
|
||||
}
|
||||
|
||||
// CalcTxSequenceLockFromReferencedUTXOEntries computes the relative lock-times for the passed
|
||||
// transaction, with the given referenced UTXO entries. See CalcSequenceLock for further details.
|
||||
func (dag *BlockDAG) CalcTxSequenceLockFromReferencedUTXOEntries(
|
||||
tx *util.Tx, referencedUTXOEntries []*UTXOEntry) (*SequenceLock, error) {
|
||||
dag.dagLock.RLock()
|
||||
defer dag.dagLock.RUnlock()
|
||||
|
||||
return dag.calcTxSequenceLockFromReferencedUTXOEntries(dag.selectedTip(), tx, referencedUTXOEntries)
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) calcTxSequenceLockFromReferencedUTXOEntries(
|
||||
node *blockNode, tx *util.Tx, referencedUTXOEntries []*UTXOEntry) (*SequenceLock, error) {
|
||||
|
||||
// A value of -1 for each relative lock type represents a relative time
|
||||
// lock value that will allow a transaction to be included in a block
|
||||
// at any given height or time.
|
||||
@@ -56,22 +76,14 @@ func (dag *BlockDAG) calcSequenceLock(node *blockNode, utxoSet UTXOSet, tx *util
|
||||
return sequenceLock, nil
|
||||
}
|
||||
|
||||
mTx := tx.MsgTx()
|
||||
for txInIndex, txIn := range mTx.TxIn {
|
||||
entry, ok := utxoSet.Get(txIn.PreviousOutpoint)
|
||||
if !ok {
|
||||
str := fmt.Sprintf("output %s referenced from "+
|
||||
"transaction %s input %d either does not exist or "+
|
||||
"has already been spent", txIn.PreviousOutpoint,
|
||||
tx.ID(), txInIndex)
|
||||
return sequenceLock, ruleError(ErrMissingTxOut, str)
|
||||
}
|
||||
for i, txIn := range tx.MsgTx().TxIn {
|
||||
utxoEntry := referencedUTXOEntries[i]
|
||||
|
||||
// If the input blue score is set to the mempool blue score, then we
|
||||
// assume the transaction makes it into the next block when
|
||||
// evaluating its sequence blocks.
|
||||
inputBlueScore := entry.BlockBlueScore()
|
||||
if entry.IsUnaccepted() {
|
||||
inputBlueScore := utxoEntry.BlockBlueScore()
|
||||
if utxoEntry.IsUnaccepted() {
|
||||
inputBlueScore = dag.virtual.blueScore
|
||||
}
|
||||
|
||||
@@ -97,7 +109,7 @@ func (dag *BlockDAG) calcSequenceLock(node *blockNode, utxoSet UTXOSet, tx *util
|
||||
for blockNode.selectedParent.blueScore > inputBlueScore {
|
||||
blockNode = blockNode.selectedParent
|
||||
}
|
||||
medianTime := blockNode.PastMedianTime(dag)
|
||||
medianTime := blockNode.PastMedianTime()
|
||||
|
||||
// Time based relative time-locks have a time granularity of
|
||||
// appmessage.SequenceLockTimeGranularity, so we shift left by this
|
||||
|
||||
@@ -14,14 +14,14 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database/ffldb/ldb"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/txscript"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
@@ -43,7 +43,7 @@ func FileExists(name string) bool {
|
||||
// The openDB parameter instructs DAGSetup whether or not to also open the
|
||||
// database. Setting it to false is useful in tests that handle database
|
||||
// opening/closing by themselves.
|
||||
func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), error) {
|
||||
func DAGSetup(dbName string, openDb bool, dagConfig Config) (*BlockDAG, func(), error) {
|
||||
var teardown func()
|
||||
|
||||
// To make sure that the teardown function is not called before any goroutines finished to run -
|
||||
@@ -81,7 +81,7 @@ func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), err
|
||||
return nil, nil, errors.Errorf("error creating db: %s", err)
|
||||
}
|
||||
|
||||
config.DatabaseContext = databaseContext
|
||||
dagConfig.DatabaseContext = databaseContext
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
@@ -99,14 +99,15 @@ func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), err
|
||||
}
|
||||
}
|
||||
|
||||
config.TimeSource = NewTimeSource()
|
||||
config.SigCache = txscript.NewSigCache(1000)
|
||||
dagConfig.TimeSource = NewTimeSource()
|
||||
dagConfig.SigCache = txscript.NewSigCache(1000)
|
||||
dagConfig.MaxUTXOCacheSize = config.DefaultConfig().MaxUTXOCacheSize
|
||||
|
||||
// Create the DAG instance.
|
||||
dag, err := New(&config)
|
||||
dag, err := New(&dagConfig)
|
||||
if err != nil {
|
||||
teardown()
|
||||
err := errors.Errorf("failed to create dag instance: %s", err)
|
||||
err := errors.Wrapf(err, "failed to create dag instance")
|
||||
return nil, nil, err
|
||||
}
|
||||
return dag, teardown, nil
|
||||
@@ -147,42 +148,6 @@ func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, su
|
||||
return appmessage.NewNativeMsgTx(appmessage.TxVersion, txIns, txOuts)
|
||||
}
|
||||
|
||||
// VirtualForTest is an exported version for virtualBlock, so that it can be returned by exported test_util methods
|
||||
type VirtualForTest *virtualBlock
|
||||
|
||||
// SetVirtualForTest replaces the dag's virtual block. This function is used for test purposes only
|
||||
func SetVirtualForTest(dag *BlockDAG, virtual VirtualForTest) VirtualForTest {
|
||||
oldVirtual := dag.virtual
|
||||
dag.virtual = virtual
|
||||
return VirtualForTest(oldVirtual)
|
||||
}
|
||||
|
||||
// GetVirtualFromParentsForTest generates a virtual block with the given parents.
|
||||
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (VirtualForTest, error) {
|
||||
parents := newBlockSet()
|
||||
for _, hash := range parentHashes {
|
||||
parent, ok := dag.index.LookupNode(hash)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("GetVirtualFromParentsForTest: didn't found node for hash %s", hash)
|
||||
}
|
||||
parents.add(parent)
|
||||
}
|
||||
virtual := newVirtualBlock(dag, parents)
|
||||
|
||||
pastUTXO, _, _, err := dag.pastUTXO(&virtual.blockNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffUTXO := pastUTXO.clone().(*DiffUTXOSet)
|
||||
err = diffUTXO.meldToBase()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
virtual.utxoSet = diffUTXO.base
|
||||
|
||||
return VirtualForTest(virtual), nil
|
||||
}
|
||||
|
||||
// LoadBlocks reads files containing kaspa gzipped block data from disk
|
||||
// and returns them as an array of util.Block.
|
||||
func LoadBlocks(filename string) (blocks []*util.Block, err error) {
|
||||
@@ -244,6 +209,10 @@ func opTrueAddress(prefix util.Bech32Prefix) (util.Address, error) {
|
||||
}
|
||||
|
||||
// PrepareBlockForTest generates a block with the proper merkle roots, coinbase transaction etc. This function is used for test purposes only
|
||||
//
|
||||
// Note: since we need to calculate acceptedIDMerkleRoot and utxoCommitment, we have to resolve selectedParent's utxo-set.
|
||||
// Therefore, this might skew the test results in a way where blocks that should have been status UTXOPendingVerification have
|
||||
// some other status.
|
||||
func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*appmessage.MsgTx) (*appmessage.MsgBlock, error) {
|
||||
parents := newBlockSet()
|
||||
for _, hash := range parentHashes {
|
||||
@@ -255,6 +224,13 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
|
||||
}
|
||||
node, _ := dag.newBlockNode(nil, parents)
|
||||
|
||||
if dag.index.BlockNodeStatus(node.selectedParent) == statusUTXOPendingVerification {
|
||||
err := resolveNodeStatusForTest(node.selectedParent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
_, selectedParentPastUTXO, txsAcceptanceData, err := dag.pastUTXO(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -262,7 +238,7 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
|
||||
|
||||
calculatedAccepetedIDMerkleRoot := calculateAcceptedIDMerkleRoot(txsAcceptanceData)
|
||||
|
||||
multiset, err := node.calcMultiset(dag, txsAcceptanceData, selectedParentPastUTXO)
|
||||
multiset, err := node.calcMultiset(txsAcceptanceData, selectedParentPastUTXO)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -287,7 +263,8 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blockTransactions[0], err = node.expectedCoinbaseTransaction(dag, txsAcceptanceData, coinbasePayloadScriptPubKey, coinbasePayloadExtraData)
|
||||
blockTransactions[0], err = node.expectedCoinbaseTransaction(
|
||||
txsAcceptanceData, coinbasePayloadScriptPubKey, coinbasePayloadExtraData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -315,7 +292,7 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
|
||||
msgBlock.AddTransaction(tx.MsgTx())
|
||||
}
|
||||
|
||||
timestamp := node.parents.bluest().PastMedianTime(dag)
|
||||
timestamp := node.parents.bluest().PastMedianTime()
|
||||
msgBlock.Header = appmessage.BlockHeader{
|
||||
Version: blockVersion,
|
||||
|
||||
@@ -331,18 +308,39 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
|
||||
return &msgBlock, nil
|
||||
}
|
||||
|
||||
func resolveNodeStatusForTest(node *blockNode) error {
|
||||
dbTx, err := node.dag.databaseContext.NewTx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbTx.RollbackUnlessClosed()
|
||||
|
||||
err = node.dag.resolveNodeStatus(node, dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrepareAndProcessBlockForTest prepares a block that points to the given parent
|
||||
// hashes and process it.
|
||||
func PrepareAndProcessBlockForTest(t *testing.T, dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*appmessage.MsgTx) *appmessage.MsgBlock {
|
||||
func PrepareAndProcessBlockForTest(
|
||||
t *testing.T, dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*appmessage.MsgTx) *appmessage.MsgBlock {
|
||||
|
||||
daghash.Sort(parentHashes)
|
||||
block, err := PrepareBlockForTest(dag, parentHashes, transactions)
|
||||
if err != nil {
|
||||
t.Fatalf("error in PrepareBlockForTest: %s", err)
|
||||
t.Fatalf("error in PrepareBlockForTest: %+v", err)
|
||||
}
|
||||
utilBlock := util.NewBlock(block)
|
||||
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in ProcessBlock: %s", err)
|
||||
t.Fatalf("unexpected error in ProcessBlock: %+v", err)
|
||||
}
|
||||
if isDelayed {
|
||||
t.Fatalf("block is too far in the future")
|
||||
|
||||
19
domain/blockdag/testdata/dags/dag0.json
vendored
19
domain/blockdag/testdata/dags/dag0.json
vendored
@@ -11,6 +11,7 @@
|
||||
"ID": "B",
|
||||
"ExpectedScore": 1,
|
||||
"ExpectedSelectedParent": "A",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"A"
|
||||
],
|
||||
@@ -22,6 +23,7 @@
|
||||
"ID": "C",
|
||||
"ExpectedScore": 2,
|
||||
"ExpectedSelectedParent": "B",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"B"
|
||||
],
|
||||
@@ -33,6 +35,7 @@
|
||||
"ID": "D",
|
||||
"ExpectedScore": 1,
|
||||
"ExpectedSelectedParent": "A",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"A"
|
||||
],
|
||||
@@ -44,6 +47,7 @@
|
||||
"ID": "E",
|
||||
"ExpectedScore": 4,
|
||||
"ExpectedSelectedParent": "C",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"C",
|
||||
"D"
|
||||
@@ -57,6 +61,7 @@
|
||||
"ID": "F",
|
||||
"ExpectedScore": 1,
|
||||
"ExpectedSelectedParent": "A",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"A"
|
||||
],
|
||||
@@ -68,6 +73,7 @@
|
||||
"ID": "G",
|
||||
"ExpectedScore": 2,
|
||||
"ExpectedSelectedParent": "F",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"F"
|
||||
],
|
||||
@@ -79,6 +85,7 @@
|
||||
"ID": "H",
|
||||
"ExpectedScore": 1,
|
||||
"ExpectedSelectedParent": "A",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"A"
|
||||
],
|
||||
@@ -90,6 +97,7 @@
|
||||
"ID": "I",
|
||||
"ExpectedScore": 1,
|
||||
"ExpectedSelectedParent": "A",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"A"
|
||||
],
|
||||
@@ -101,6 +109,7 @@
|
||||
"ID": "J",
|
||||
"ExpectedScore": 7,
|
||||
"ExpectedSelectedParent": "E",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"E",
|
||||
"F",
|
||||
@@ -115,6 +124,7 @@
|
||||
"ID": "K",
|
||||
"ExpectedScore": 8,
|
||||
"ExpectedSelectedParent": "J",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"J"
|
||||
],
|
||||
@@ -126,6 +136,7 @@
|
||||
"ID": "L",
|
||||
"ExpectedScore": 9,
|
||||
"ExpectedSelectedParent": "K",
|
||||
"ExpectedReds": ["I"],
|
||||
"ExpectedBlues": [
|
||||
"K"
|
||||
],
|
||||
@@ -138,6 +149,7 @@
|
||||
"ID": "M",
|
||||
"ExpectedScore": 10,
|
||||
"ExpectedSelectedParent": "L",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"L"
|
||||
],
|
||||
@@ -149,6 +161,7 @@
|
||||
"ID": "N",
|
||||
"ExpectedScore": 11,
|
||||
"ExpectedSelectedParent": "M",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"M"
|
||||
],
|
||||
@@ -160,6 +173,7 @@
|
||||
"ID": "O",
|
||||
"ExpectedScore": 11,
|
||||
"ExpectedSelectedParent": "M",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"M"
|
||||
],
|
||||
@@ -171,6 +185,7 @@
|
||||
"ID": "P",
|
||||
"ExpectedScore": 11,
|
||||
"ExpectedSelectedParent": "M",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"M"
|
||||
],
|
||||
@@ -182,6 +197,7 @@
|
||||
"ID": "Q",
|
||||
"ExpectedScore": 11,
|
||||
"ExpectedSelectedParent": "M",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"M"
|
||||
],
|
||||
@@ -193,6 +209,7 @@
|
||||
"ID": "R",
|
||||
"ExpectedScore": 11,
|
||||
"ExpectedSelectedParent": "M",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"M"
|
||||
],
|
||||
@@ -204,6 +221,7 @@
|
||||
"ID": "S",
|
||||
"ExpectedScore": 12,
|
||||
"ExpectedSelectedParent": "R",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"R"
|
||||
],
|
||||
@@ -215,6 +233,7 @@
|
||||
"ID": "T",
|
||||
"ExpectedScore": 16,
|
||||
"ExpectedSelectedParent": "S",
|
||||
"ExpectedReds": ["Q"],
|
||||
"ExpectedBlues": [
|
||||
"S",
|
||||
"P",
|
||||
|
||||
83
domain/blockdag/testdata/dags/dag1.json
vendored
83
domain/blockdag/testdata/dags/dag1.json
vendored
@@ -21,6 +21,7 @@
|
||||
"ID": "1",
|
||||
"ExpectedScore": 1,
|
||||
"ExpectedSelectedParent": "0",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"0"
|
||||
],
|
||||
@@ -32,6 +33,7 @@
|
||||
"ID": "2",
|
||||
"ExpectedScore": 1,
|
||||
"ExpectedSelectedParent": "0",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"0"
|
||||
],
|
||||
@@ -43,6 +45,7 @@
|
||||
"ID": "3",
|
||||
"ExpectedScore": 1,
|
||||
"ExpectedSelectedParent": "0",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"0"
|
||||
],
|
||||
@@ -54,6 +57,7 @@
|
||||
"ID": "4",
|
||||
"ExpectedScore": 2,
|
||||
"ExpectedSelectedParent": "1",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"1"
|
||||
],
|
||||
@@ -64,10 +68,11 @@
|
||||
{
|
||||
"ID": "5",
|
||||
"ExpectedScore": 3,
|
||||
"ExpectedSelectedParent": "2",
|
||||
"ExpectedSelectedParent": "3",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"2",
|
||||
"3"
|
||||
"3",
|
||||
"2"
|
||||
],
|
||||
"Parents": [
|
||||
"2",
|
||||
@@ -78,6 +83,7 @@
|
||||
"ID": "6",
|
||||
"ExpectedScore": 2,
|
||||
"ExpectedSelectedParent": "3",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"3"
|
||||
],
|
||||
@@ -89,6 +95,7 @@
|
||||
"ID": "7",
|
||||
"ExpectedScore": 3,
|
||||
"ExpectedSelectedParent": "6",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"6"
|
||||
],
|
||||
@@ -99,10 +106,11 @@
|
||||
{
|
||||
"ID": "8",
|
||||
"ExpectedScore": 3,
|
||||
"ExpectedSelectedParent": "2",
|
||||
"ExpectedSelectedParent": "1",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"2",
|
||||
"1"
|
||||
"1",
|
||||
"2"
|
||||
],
|
||||
"Parents": [
|
||||
"1",
|
||||
@@ -113,6 +121,7 @@
|
||||
"ID": "9",
|
||||
"ExpectedScore": 5,
|
||||
"ExpectedSelectedParent": "5",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"5",
|
||||
"6"
|
||||
@@ -126,6 +135,7 @@
|
||||
"ID": "10",
|
||||
"ExpectedScore": 5,
|
||||
"ExpectedSelectedParent": "8",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"8",
|
||||
"4"
|
||||
@@ -139,6 +149,7 @@
|
||||
"ID": "11",
|
||||
"ExpectedScore": 7,
|
||||
"ExpectedSelectedParent": "9",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"9",
|
||||
"7"
|
||||
@@ -151,11 +162,15 @@
|
||||
{
|
||||
"ID": "12",
|
||||
"ExpectedScore": 8,
|
||||
"ExpectedSelectedParent": "9",
|
||||
"ExpectedSelectedParent": "10",
|
||||
"ExpectedReds": [
|
||||
"3",
|
||||
"6"
|
||||
],
|
||||
"ExpectedBlues": [
|
||||
"9",
|
||||
"8",
|
||||
"10"
|
||||
"10",
|
||||
"5",
|
||||
"9"
|
||||
],
|
||||
"Parents": [
|
||||
"10",
|
||||
@@ -166,6 +181,7 @@
|
||||
"ID": "13",
|
||||
"ExpectedScore": 6,
|
||||
"ExpectedSelectedParent": "8",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"8",
|
||||
"3",
|
||||
@@ -180,6 +196,9 @@
|
||||
"ID": "14",
|
||||
"ExpectedScore": 8,
|
||||
"ExpectedSelectedParent": "13",
|
||||
"ExpectedReds": [
|
||||
"4"
|
||||
],
|
||||
"ExpectedBlues": [
|
||||
"13",
|
||||
"10"
|
||||
@@ -193,6 +212,10 @@
|
||||
"ID": "15",
|
||||
"ExpectedScore": 9,
|
||||
"ExpectedSelectedParent": "11",
|
||||
"ExpectedReds": [
|
||||
"1",
|
||||
"8"
|
||||
],
|
||||
"ExpectedBlues": [
|
||||
"11",
|
||||
"13"
|
||||
@@ -206,6 +229,7 @@
|
||||
"ID": "16",
|
||||
"ExpectedScore": 8,
|
||||
"ExpectedSelectedParent": "11",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"11"
|
||||
],
|
||||
@@ -217,6 +241,7 @@
|
||||
"ID": "17",
|
||||
"ExpectedScore": 9,
|
||||
"ExpectedSelectedParent": "14",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"14"
|
||||
],
|
||||
@@ -228,6 +253,7 @@
|
||||
"ID": "18",
|
||||
"ExpectedScore": 7,
|
||||
"ExpectedSelectedParent": "13",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"13"
|
||||
],
|
||||
@@ -239,6 +265,9 @@
|
||||
"ID": "19",
|
||||
"ExpectedScore": 10,
|
||||
"ExpectedSelectedParent": "15",
|
||||
"ExpectedReds": [
|
||||
"18"
|
||||
],
|
||||
"ExpectedBlues": [
|
||||
"15"
|
||||
],
|
||||
@@ -251,6 +280,13 @@
|
||||
"ID": "20",
|
||||
"ExpectedScore": 10,
|
||||
"ExpectedSelectedParent": "17",
|
||||
"ExpectedReds": [
|
||||
"6",
|
||||
"7",
|
||||
"9",
|
||||
"11",
|
||||
"16"
|
||||
],
|
||||
"ExpectedBlues": [
|
||||
"17"
|
||||
],
|
||||
@@ -263,6 +299,7 @@
|
||||
"ID": "21",
|
||||
"ExpectedScore": 12,
|
||||
"ExpectedSelectedParent": "20",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"20",
|
||||
"18"
|
||||
@@ -276,6 +313,10 @@
|
||||
"ID": "22",
|
||||
"ExpectedScore": 13,
|
||||
"ExpectedSelectedParent": "21",
|
||||
"ExpectedReds": [
|
||||
"15",
|
||||
"19"
|
||||
],
|
||||
"ExpectedBlues": [
|
||||
"21"
|
||||
],
|
||||
@@ -288,6 +329,10 @@
|
||||
"ID": "23",
|
||||
"ExpectedScore": 11,
|
||||
"ExpectedSelectedParent": "17",
|
||||
"ExpectedReds": [
|
||||
"6",
|
||||
"9"
|
||||
],
|
||||
"ExpectedBlues": [
|
||||
"17",
|
||||
"12"
|
||||
@@ -301,6 +346,11 @@
|
||||
"ID": "24",
|
||||
"ExpectedScore": 13,
|
||||
"ExpectedSelectedParent": "23",
|
||||
"ExpectedReds": [
|
||||
"7",
|
||||
"11",
|
||||
"16"
|
||||
],
|
||||
"ExpectedBlues": [
|
||||
"23",
|
||||
"20"
|
||||
@@ -314,6 +364,7 @@
|
||||
"ID": "25",
|
||||
"ExpectedScore": 13,
|
||||
"ExpectedSelectedParent": "21",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"21"
|
||||
],
|
||||
@@ -325,6 +376,11 @@
|
||||
"ID": "26",
|
||||
"ExpectedScore": 15,
|
||||
"ExpectedSelectedParent": "22",
|
||||
"ExpectedReds": [
|
||||
"12",
|
||||
"23",
|
||||
"24"
|
||||
],
|
||||
"ExpectedBlues": [
|
||||
"22",
|
||||
"25"
|
||||
@@ -339,6 +395,7 @@
|
||||
"ID": "27",
|
||||
"ExpectedScore": 9,
|
||||
"ExpectedSelectedParent": "16",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"16"
|
||||
],
|
||||
@@ -350,6 +407,10 @@
|
||||
"ID": "28",
|
||||
"ExpectedScore": 14,
|
||||
"ExpectedSelectedParent": "25",
|
||||
"ExpectedReds": [
|
||||
"12",
|
||||
"23"
|
||||
],
|
||||
"ExpectedBlues": [
|
||||
"25"
|
||||
],
|
||||
@@ -362,6 +423,7 @@
|
||||
"ID": "29",
|
||||
"ExpectedScore": 17,
|
||||
"ExpectedSelectedParent": "26",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"26",
|
||||
"28"
|
||||
@@ -375,6 +437,7 @@
|
||||
"ID": "30",
|
||||
"ExpectedScore": 10,
|
||||
"ExpectedSelectedParent": "27",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"27"
|
||||
],
|
||||
|
||||
126
domain/blockdag/testdata/dags/dag2.json
vendored
Normal file
126
domain/blockdag/testdata/dags/dag2.json
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
{
|
||||
"K": 18,
|
||||
"GenesisID": "786",
|
||||
"ExpectedReds": [],
|
||||
"Blocks": [
|
||||
{
|
||||
"ID": "21d",
|
||||
"ExpectedScore": 1,
|
||||
"ExpectedSelectedParent": "786",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"786"
|
||||
],
|
||||
"Parents": [
|
||||
"786"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ID": "6ef",
|
||||
"ExpectedScore": 2,
|
||||
"ExpectedSelectedParent": "21d",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"21d"
|
||||
],
|
||||
"Parents": [
|
||||
"21d"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ID": "c98",
|
||||
"ExpectedScore": 3,
|
||||
"ExpectedSelectedParent": "6ef",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"6ef"
|
||||
],
|
||||
"Parents": [
|
||||
"6ef"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ID": "d1c",
|
||||
"ExpectedScore": 1,
|
||||
"ExpectedSelectedParent": "786",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"786"
|
||||
],
|
||||
"Parents": [
|
||||
"786"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ID": "ec9",
|
||||
"ExpectedScore": 5,
|
||||
"ExpectedSelectedParent": "c98",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"c98",
|
||||
"d1c"
|
||||
],
|
||||
"Parents": [
|
||||
"d1c",
|
||||
"c98"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ID": "154",
|
||||
"ExpectedScore": 1,
|
||||
"ExpectedSelectedParent": "786",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"786"
|
||||
],
|
||||
"Parents": [
|
||||
"786"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ID": "6c7",
|
||||
"ExpectedScore": 4,
|
||||
"ExpectedSelectedParent": "154",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"154",
|
||||
"d1c",
|
||||
"21d"
|
||||
],
|
||||
"Parents": [
|
||||
"d1c",
|
||||
"21d",
|
||||
"154"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ID": "015",
|
||||
"ExpectedScore": 8,
|
||||
"ExpectedSelectedParent": "ec9",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"ec9",
|
||||
"154",
|
||||
"6c7"
|
||||
],
|
||||
"Parents": [
|
||||
"ec9",
|
||||
"6c7"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ID": "crash",
|
||||
"ExpectedScore": 6,
|
||||
"ExpectedSelectedParent": "6c7",
|
||||
"ExpectedReds": [],
|
||||
"ExpectedBlues": [
|
||||
"6c7",
|
||||
"6ef"
|
||||
],
|
||||
"Parents": [
|
||||
"6ef",
|
||||
"6c7"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -5,10 +5,13 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
func buildNode(t *testing.T, dag *BlockDAG, parents blockSet) *blockNode {
|
||||
@@ -30,16 +33,16 @@ func buildNode(t *testing.T, dag *BlockDAG, parents blockSet) *blockNode {
|
||||
return nodeByMsgBlock(t, dag, block)
|
||||
}
|
||||
|
||||
// TestVirtualBlock ensures that VirtualBlock works as expected.
|
||||
func TestVirtualBlock(t *testing.T) {
|
||||
// TestTips ensures that tips are updated as expected.
|
||||
func TestTips(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = 1
|
||||
dag, teardownFunc, err := DAGSetup("TestVirtualBlock", true, Config{
|
||||
dag, teardownFunc, err := DAGSetup("TestTips", true, Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestVirtualBlock: Failed to setup DAG instance: %s", err)
|
||||
t.Fatalf("TestTips: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
@@ -71,13 +74,6 @@ func TestVirtualBlock(t *testing.T) {
|
||||
expectedTips blockSet
|
||||
expectedSelectedParent *blockNode
|
||||
}{
|
||||
{
|
||||
name: "empty virtual",
|
||||
tipsToSet: []*blockNode{},
|
||||
tipsToAdd: []*blockNode{},
|
||||
expectedTips: newBlockSet(),
|
||||
expectedSelectedParent: nil,
|
||||
},
|
||||
{
|
||||
name: "virtual with genesis tip",
|
||||
tipsToSet: []*blockNode{node0},
|
||||
@@ -93,38 +89,41 @@ func TestVirtualBlock(t *testing.T) {
|
||||
expectedSelectedParent: node1,
|
||||
},
|
||||
{
|
||||
name: "empty virtual, add a full DAG",
|
||||
tipsToSet: []*blockNode{},
|
||||
tipsToAdd: []*blockNode{node0, node1, node2, node3, node4, node5, node6},
|
||||
name: "virtual with genesis, add a full DAG",
|
||||
tipsToSet: []*blockNode{node0},
|
||||
tipsToAdd: []*blockNode{node1, node2, node3, node4, node5, node6},
|
||||
expectedTips: blockSetFromSlice(node2, node5, node6),
|
||||
expectedSelectedParent: node5,
|
||||
expectedSelectedParent: node6,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Create an empty VirtualBlock
|
||||
virtual := newVirtualBlock(dag, nil)
|
||||
|
||||
// Set the tips. This will be the initial state
|
||||
virtual.SetTips(blockSetFromSlice(test.tipsToSet...))
|
||||
_, _, err := dag.setTips(blockSetFromSlice(test.tipsToSet...))
|
||||
if err != nil {
|
||||
t.Fatalf("%s: Error setting tips: %+v", test.name, err)
|
||||
}
|
||||
|
||||
// Add all blockNodes in tipsToAdd in order
|
||||
for _, tipToAdd := range test.tipsToAdd {
|
||||
addNodeAsChildToParents(tipToAdd)
|
||||
virtual.AddTip(tipToAdd)
|
||||
_, _, err := dag.addTip(tipToAdd)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: Error adding tip: %+v", test.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that the virtual block's tips are now equal to expectedTips
|
||||
resultTips := virtual.tips()
|
||||
// Ensure that the dag's tips are now equal to expectedTips
|
||||
resultTips := dag.tips
|
||||
if !reflect.DeepEqual(resultTips, test.expectedTips) {
|
||||
t.Errorf("unexpected tips in test \"%s\". "+
|
||||
t.Errorf("%s: unexpected tips. "+
|
||||
"Expected: %v, got: %v.", test.name, test.expectedTips, resultTips)
|
||||
}
|
||||
|
||||
// Ensure that the virtual block's selectedParent is now equal to expectedSelectedParent
|
||||
resultSelectedTip := virtual.selectedParent
|
||||
resultSelectedTip := dag.virtual.selectedParent
|
||||
if !reflect.DeepEqual(resultSelectedTip, test.expectedSelectedParent) {
|
||||
t.Errorf("unexpected selected tip in test \"%s\". "+
|
||||
t.Errorf("%s: unexpected selected tip. "+
|
||||
"Expected: %v, got: %v.", test.name, test.expectedSelectedParent, resultSelectedTip)
|
||||
}
|
||||
}
|
||||
@@ -142,16 +141,18 @@ func TestSelectedPath(t *testing.T) {
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
// Create an empty VirtualBlock
|
||||
virtual := newVirtualBlock(dag, nil)
|
||||
|
||||
initialPath := blockSetFromSlice(dag.genesis)
|
||||
tip := dag.genesis
|
||||
virtual.AddTip(tip)
|
||||
initialPath := blockSetFromSlice(tip)
|
||||
for i := 0; i < 5; i++ {
|
||||
tip = buildNode(t, dag, blockSetFromSlice(tip))
|
||||
tipBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{tip.hash}, nil)
|
||||
|
||||
var ok bool
|
||||
tip, ok = dag.index.LookupNode(tipBlock.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("Couldn't lookup node that was just added")
|
||||
}
|
||||
|
||||
initialPath.add(tip)
|
||||
virtual.AddTip(tip)
|
||||
}
|
||||
initialTip := tip
|
||||
|
||||
@@ -159,17 +160,18 @@ func TestSelectedPath(t *testing.T) {
|
||||
for i := 0; i < 5; i++ {
|
||||
tip = buildNode(t, dag, blockSetFromSlice(tip))
|
||||
firstPath.add(tip)
|
||||
virtual.AddTip(tip)
|
||||
}
|
||||
// For now we don't have any DAG, just chain, the selected path should include all the blocks on the chain.
|
||||
if !reflect.DeepEqual(virtual.selectedParentChainSet, firstPath) {
|
||||
t.Fatalf("TestSelectedPath: selectedPathSet doesn't include the expected values. got %v, want %v", virtual.selectedParent, firstPath)
|
||||
if !reflect.DeepEqual(dag.virtual.selectedParentChainSet, firstPath) {
|
||||
t.Fatalf("TestSelectedPath: selectedPathSet doesn't include the expected values. got %v, want %v",
|
||||
dag.virtual.selectedParent, firstPath)
|
||||
}
|
||||
// We expect that selectedParentChainSlice should have all the blocks we've added so far
|
||||
wantLen := 11
|
||||
gotLen := len(virtual.selectedParentChainSlice)
|
||||
gotLen := len(dag.virtual.selectedParentChainSlice)
|
||||
if wantLen != gotLen {
|
||||
t.Fatalf("TestSelectedPath: selectedParentChainSlice doesn't have the expected length. got %d, want %d", gotLen, wantLen)
|
||||
t.Fatalf("TestSelectedPath: selectedParentChainSlice doesn't have the expected length. got %d, want %d",
|
||||
gotLen, wantLen)
|
||||
}
|
||||
|
||||
secondPath := initialPath.clone()
|
||||
@@ -177,15 +179,15 @@ func TestSelectedPath(t *testing.T) {
|
||||
for i := 0; i < 100; i++ {
|
||||
tip = buildNode(t, dag, blockSetFromSlice(tip))
|
||||
secondPath.add(tip)
|
||||
virtual.AddTip(tip)
|
||||
}
|
||||
// Because we added a chain that is much longer than the previous chain, the selected path should be re-organized.
|
||||
if !reflect.DeepEqual(virtual.selectedParentChainSet, secondPath) {
|
||||
t.Fatalf("TestSelectedPath: selectedPathSet didn't handle the re-org as expected. got %v, want %v", virtual.selectedParent, firstPath)
|
||||
if !reflect.DeepEqual(dag.virtual.selectedParentChainSet, secondPath) {
|
||||
t.Fatalf("TestSelectedPath: selectedPathSet didn't handle the re-org as expected. got %v, want %v",
|
||||
dag.virtual.selectedParent, firstPath)
|
||||
}
|
||||
// We expect that selectedParentChainSlice should have all the blocks we've added so far except the old chain
|
||||
wantLen = 106
|
||||
gotLen = len(virtual.selectedParentChainSlice)
|
||||
gotLen = len(dag.virtual.selectedParentChainSlice)
|
||||
if wantLen != gotLen {
|
||||
t.Fatalf("TestSelectedPath: selectedParentChainSlice doesn't have"+
|
||||
"the expected length, possibly because it didn't handle the re-org as expected. got %d, want %d", gotLen, wantLen)
|
||||
@@ -194,15 +196,15 @@ func TestSelectedPath(t *testing.T) {
|
||||
tip = initialTip
|
||||
for i := 0; i < 3; i++ {
|
||||
tip = buildNode(t, dag, blockSetFromSlice(tip))
|
||||
virtual.AddTip(tip)
|
||||
}
|
||||
// Because we added a very short chain, the selected path should not be affected.
|
||||
if !reflect.DeepEqual(virtual.selectedParentChainSet, secondPath) {
|
||||
t.Fatalf("TestSelectedPath: selectedPathSet did an unexpected re-org. got %v, want %v", virtual.selectedParent, firstPath)
|
||||
if !reflect.DeepEqual(dag.virtual.selectedParentChainSet, secondPath) {
|
||||
t.Fatalf("TestSelectedPath: selectedPathSet did an unexpected re-org. got %v, want %v",
|
||||
dag.virtual.selectedParent, firstPath)
|
||||
}
|
||||
// We expect that selectedParentChainSlice not to change
|
||||
wantLen = 106
|
||||
gotLen = len(virtual.selectedParentChainSlice)
|
||||
gotLen = len(dag.virtual.selectedParentChainSlice)
|
||||
if wantLen != gotLen {
|
||||
t.Fatalf("TestSelectedPath: selectedParentChainSlice doesn't"+
|
||||
"have the expected length, possibly due to unexpected did an unexpected re-org. got %d, want %d", gotLen, wantLen)
|
||||
@@ -218,6 +220,12 @@ func TestSelectedPath(t *testing.T) {
|
||||
virtual2.updateSelectedParentSet(buildNode(t, dag, blockSetFromSlice()))
|
||||
}
|
||||
|
||||
// TestChainUpdates makes sure the chainUpdates from setTips are correct:
|
||||
// It creates two chains: a main-chain to be removed and a side-chain to be added
|
||||
// The main-chain has to be longer than the side-chain, so that the natural selected tip of the DAG is the one
|
||||
// from the main chain.
|
||||
// Then dag.setTip is called with the tip of the side-chain to artificially re-org the DAG, and verify
|
||||
// the chainUpdates return value is correct.
|
||||
func TestChainUpdates(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
@@ -232,18 +240,15 @@ func TestChainUpdates(t *testing.T) {
|
||||
|
||||
genesis := dag.genesis
|
||||
|
||||
// Create a chain to be removed
|
||||
// Create the main-chain to be removed
|
||||
var toBeRemovedNodes []*blockNode
|
||||
toBeRemovedTip := genesis
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := 0; i < 9; i++ {
|
||||
toBeRemovedTip = buildNode(t, dag, blockSetFromSlice(toBeRemovedTip))
|
||||
toBeRemovedNodes = append(toBeRemovedNodes, toBeRemovedTip)
|
||||
}
|
||||
|
||||
// Create a VirtualBlock with the toBeRemoved chain
|
||||
virtual := newVirtualBlock(dag, blockSetFromSlice(toBeRemovedNodes...))
|
||||
|
||||
// Create a chain to be added
|
||||
// Create the side-chain to be added
|
||||
var toBeAddedNodes []*blockNode
|
||||
toBeAddedTip := genesis
|
||||
for i := 0; i < 8; i++ {
|
||||
@@ -251,8 +256,16 @@ func TestChainUpdates(t *testing.T) {
|
||||
toBeAddedNodes = append(toBeAddedNodes, toBeAddedTip)
|
||||
}
|
||||
|
||||
// Set the virtual tip to be the tip of the toBeAdded chain
|
||||
chainUpdates := virtual.setTips(blockSetFromSlice(toBeAddedTip))
|
||||
err = resolveNodeStatusForTest(toBeAddedTip)
|
||||
if err != nil {
|
||||
t.Fatalf("Error resolving status of toBeAddedTip: %+v", err)
|
||||
}
|
||||
|
||||
// Set the virtual tip to be the tip of the toBeAdded side-chain
|
||||
_, chainUpdates, err := dag.setTips(blockSetFromSlice(toBeAddedTip))
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting tips: %+v", err)
|
||||
}
|
||||
|
||||
// Make sure that the removed blocks are as expected (in reverse order)
|
||||
if len(chainUpdates.removedChainBlockHashes) != len(toBeRemovedNodes) {
|
||||
@@ -270,7 +283,7 @@ func TestChainUpdates(t *testing.T) {
|
||||
// Make sure that the added blocks are as expected (in forward order)
|
||||
if len(chainUpdates.addedChainBlockHashes) != len(toBeAddedNodes) {
|
||||
t.Fatalf("TestChainUpdates: wrong added amount. "+
|
||||
"Got: %d, want: %d", len(chainUpdates.removedChainBlockHashes), len(toBeAddedNodes))
|
||||
"Got: %d, want: %d", len(chainUpdates.addedChainBlockHashes), len(toBeAddedNodes))
|
||||
}
|
||||
for i, addedHash := range chainUpdates.addedChainBlockHashes {
|
||||
correspondingAddedNode := toBeAddedNodes[i]
|
||||
@@ -157,9 +157,9 @@ func (diffStore *utxoDiffStore) clearDirtyEntries() {
|
||||
var maxBlueScoreDifferenceToKeepLoaded uint64 = 100
|
||||
|
||||
// clearOldEntries removes entries whose blue score is lower than
|
||||
// virtual.blueScore - maxBlueScoreDifferenceToKeepLoaded. Note
|
||||
// that tips are not removed either even if their blue score is
|
||||
// lower than the above.
|
||||
// virtual.blueScore - maxBlueScoreDifferenceToKeepLoaded.
|
||||
// Note that parents of virtual are not removed even
|
||||
// if their blue score is lower than the above.
|
||||
func (diffStore *utxoDiffStore) clearOldEntries() {
|
||||
diffStore.mtx.HighPriorityWriteLock()
|
||||
defer diffStore.mtx.HighPriorityWriteUnlock()
|
||||
@@ -170,11 +170,11 @@ func (diffStore *utxoDiffStore) clearOldEntries() {
|
||||
minBlueScore = 0
|
||||
}
|
||||
|
||||
tips := diffStore.dag.virtual.tips()
|
||||
virtualParents := diffStore.dag.virtual.parents
|
||||
|
||||
toRemove := make(map[*blockNode]struct{})
|
||||
for node := range diffStore.loaded {
|
||||
if node.blueScore < minBlueScore && !tips.contains(node) {
|
||||
if node.blueScore < minBlueScore && !virtualParents.contains(node) {
|
||||
toRemove[node] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ func TestClearOldEntries(t *testing.T) {
|
||||
// Add 10 blocks
|
||||
blockNodes := make([]*blockNode, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
processedBlock := PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil)
|
||||
processedBlock := PrepareAndProcessBlockForTest(t, dag, dag.VirtualParentHashes(), nil)
|
||||
|
||||
node, ok := dag.index.LookupNode(processedBlock.BlockHash())
|
||||
if !ok {
|
||||
@@ -132,7 +132,7 @@ func TestClearOldEntries(t *testing.T) {
|
||||
|
||||
// Add 10 more blocks on top of the others
|
||||
for i := 0; i < 10; i++ {
|
||||
PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil)
|
||||
PrepareAndProcessBlockForTest(t, dag, dag.VirtualParentHashes(), nil)
|
||||
}
|
||||
|
||||
// Make sure that all the old nodes no longer exist in the loaded set
|
||||
@@ -142,26 +142,4 @@ func TestClearOldEntries(t *testing.T) {
|
||||
t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash)
|
||||
}
|
||||
}
|
||||
|
||||
// Add a block on top of the genesis to force the retrieval of all diffData
|
||||
processedBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
|
||||
node, ok := dag.index.LookupNode(processedBlock.BlockHash())
|
||||
if !ok {
|
||||
t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash())
|
||||
}
|
||||
|
||||
// Make sure that the child-of-genesis node is in the loaded set, since it
|
||||
// is a tip.
|
||||
_, ok = dag.utxoDiffStore.loaded[node]
|
||||
if !ok {
|
||||
t.Fatalf("TestClearOldEntries: diffData for node %s is not in the loaded set", node.hash)
|
||||
}
|
||||
|
||||
// Make sure that all the old nodes still do not exist in the loaded set
|
||||
for _, node := range blockNodes {
|
||||
_, ok := dag.utxoDiffStore.loaded[node]
|
||||
if ok {
|
||||
t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/util/binaryserializer"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
@@ -168,21 +169,18 @@ var p2pkhUTXOEntrySerializeSize = 8 + 8 + appmessage.VarIntSerializeSize(25) + 2
|
||||
// serializeUTXOEntry encodes the entry to the given io.Writer and use compression if useCompression is true.
|
||||
// The compression format is described in detail above.
|
||||
func serializeUTXOEntry(w io.Writer, entry *UTXOEntry) error {
|
||||
buf := [8 + 1 + 8]byte{}
|
||||
// Encode the blueScore.
|
||||
err := binaryserializer.PutUint64(w, byteOrder, entry.blockBlueScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
binary.LittleEndian.PutUint64(buf[:8], entry.blockBlueScore)
|
||||
|
||||
// Encode the packedFlags.
|
||||
err = binaryserializer.PutUint8(w, uint8(entry.packedFlags))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf[8] = uint8(entry.packedFlags)
|
||||
|
||||
err = binaryserializer.PutUint64(w, byteOrder, entry.Amount())
|
||||
binary.LittleEndian.PutUint64(buf[9:], entry.Amount())
|
||||
|
||||
_, err := w.Write(buf[:])
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
err = appmessage.WriteVarInt(w, uint64(len(entry.ScriptPubKey())))
|
||||
|
||||
36
domain/blockdag/utxoio_test.go
Normal file
36
domain/blockdag/utxoio_test.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Benchmark_serializeUTXO(b *testing.B) {
|
||||
entry := &UTXOEntry{
|
||||
amount: 5000000000,
|
||||
scriptPubKey: hexToBytes("76a914ad06dd6ddee55cbca9a9e3713bd7587509a3056488ac"), // p2pkh
|
||||
blockBlueScore: 1432432,
|
||||
packedFlags: 0,
|
||||
}
|
||||
outpoint := &appmessage.Outpoint{
|
||||
TxID: daghash.TxID{
|
||||
0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95,
|
||||
0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3,
|
||||
0x46, 0x11, 0x89, 0x6b, 0x82, 0x1a, 0x68, 0x3b,
|
||||
0x7a, 0x4e, 0xde, 0xfe, 0x2c, 0x00, 0x00, 0x00,
|
||||
},
|
||||
Index: 0xffffffff,
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(make([]byte, 8+1+8+9+len(entry.scriptPubKey)+len(outpoint.TxID)+4))
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Reset()
|
||||
err := serializeUTXO(buf, entry, outpoint)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,17 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
)
|
||||
|
||||
@@ -157,7 +160,7 @@ func (uc utxoCollection) containsWithBlueScore(outpoint appmessage.Outpoint, blu
|
||||
|
||||
// clone returns a clone of this collection
|
||||
func (uc utxoCollection) clone() utxoCollection {
|
||||
clone := utxoCollection{}
|
||||
clone := make(utxoCollection, len(uc))
|
||||
for outpoint, entry := range uc {
|
||||
clone.add(outpoint, entry)
|
||||
}
|
||||
@@ -484,29 +487,27 @@ type UTXOSet interface {
|
||||
|
||||
// FullUTXOSet represents a full list of transaction outputs and their values
|
||||
type FullUTXOSet struct {
|
||||
utxoCollection
|
||||
utxoCache utxoCollection
|
||||
dbContext dbaccess.Context
|
||||
estimatedSize uint64
|
||||
maxUTXOCacheSize uint64
|
||||
outpointBuff *bytes.Buffer
|
||||
}
|
||||
|
||||
// NewFullUTXOSet creates a new utxoSet with full list of transaction outputs and their values
|
||||
func NewFullUTXOSet() *FullUTXOSet {
|
||||
return &FullUTXOSet{
|
||||
utxoCollection: utxoCollection{},
|
||||
utxoCache: utxoCollection{},
|
||||
}
|
||||
}
|
||||
|
||||
// newFullUTXOSetFromUTXOCollection converts a utxoCollection to a FullUTXOSet
|
||||
func newFullUTXOSetFromUTXOCollection(collection utxoCollection) (*FullUTXOSet, error) {
|
||||
var err error
|
||||
multiset := secp256k1.NewMultiset()
|
||||
for outpoint, utxoEntry := range collection {
|
||||
multiset, err = addUTXOToMultiset(multiset, utxoEntry, &outpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// NewFullUTXOSetFromContext creates a new utxoSet and map the data context with caching
|
||||
func NewFullUTXOSetFromContext(context dbaccess.Context, cacheSize uint64) *FullUTXOSet {
|
||||
return &FullUTXOSet{
|
||||
utxoCollection: collection,
|
||||
}, nil
|
||||
dbContext: context,
|
||||
maxUTXOCacheSize: cacheSize,
|
||||
utxoCache: make(utxoCollection),
|
||||
}
|
||||
}
|
||||
|
||||
// diffFrom returns the difference between this utxoSet and another
|
||||
@@ -564,15 +565,93 @@ func (fus *FullUTXOSet) containsInputs(tx *appmessage.MsgTx) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// contains returns a boolean value indicating whether a UTXO entry is in the set
|
||||
func (fus *FullUTXOSet) contains(outpoint appmessage.Outpoint) bool {
|
||||
_, ok := fus.Get(outpoint)
|
||||
return ok
|
||||
}
|
||||
|
||||
// clone returns a clone of this utxoSet
|
||||
func (fus *FullUTXOSet) clone() UTXOSet {
|
||||
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone()}
|
||||
return &FullUTXOSet{
|
||||
utxoCache: fus.utxoCache.clone(),
|
||||
dbContext: fus.dbContext,
|
||||
estimatedSize: fus.estimatedSize,
|
||||
maxUTXOCacheSize: fus.maxUTXOCacheSize,
|
||||
}
|
||||
}
|
||||
|
||||
// get returns the UTXOEntry associated with the given Outpoint, and a boolean indicating if such entry was found
|
||||
func (fus *FullUTXOSet) get(outpoint appmessage.Outpoint) (*UTXOEntry, bool) {
|
||||
return fus.Get(outpoint)
|
||||
}
|
||||
|
||||
// getSizeOfUTXOEntryAndOutpoint returns estimated size of UTXOEntry & Outpoint in bytes
|
||||
func getSizeOfUTXOEntryAndOutpoint(entry *UTXOEntry) uint64 {
|
||||
const staticSize = uint64(unsafe.Sizeof(UTXOEntry{}) + unsafe.Sizeof(appmessage.Outpoint{}))
|
||||
return staticSize + uint64(len(entry.scriptPubKey))
|
||||
}
|
||||
|
||||
// checkAndCleanCachedData checks the FullUTXOSet estimated size and clean it if it reaches the limit
|
||||
func (fus *FullUTXOSet) checkAndCleanCachedData() {
|
||||
if fus.estimatedSize > fus.maxUTXOCacheSize {
|
||||
fus.utxoCache = make(utxoCollection)
|
||||
fus.estimatedSize = 0
|
||||
}
|
||||
}
|
||||
|
||||
// add adds a new UTXO entry to this FullUTXOSet
|
||||
func (fus *FullUTXOSet) add(outpoint appmessage.Outpoint, entry *UTXOEntry) {
|
||||
fus.utxoCache[outpoint] = entry
|
||||
fus.estimatedSize += getSizeOfUTXOEntryAndOutpoint(entry)
|
||||
fus.checkAndCleanCachedData()
|
||||
}
|
||||
|
||||
// remove removes a UTXO entry from this collection if it exists
|
||||
func (fus *FullUTXOSet) remove(outpoint appmessage.Outpoint) {
|
||||
entry, ok := fus.utxoCache.get(outpoint)
|
||||
if ok {
|
||||
delete(fus.utxoCache, outpoint)
|
||||
fus.estimatedSize -= getSizeOfUTXOEntryAndOutpoint(entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the UTXOEntry associated with the given Outpoint, and a boolean indicating if such entry was found
|
||||
// If the UTXOEntry doesn't not exist in the memory then check in the database
|
||||
func (fus *FullUTXOSet) Get(outpoint appmessage.Outpoint) (*UTXOEntry, bool) {
|
||||
utxoEntry, ok := fus.utxoCollection[outpoint]
|
||||
return utxoEntry, ok
|
||||
utxoEntry, ok := fus.utxoCache[outpoint]
|
||||
if ok {
|
||||
return utxoEntry, ok
|
||||
}
|
||||
|
||||
if fus.outpointBuff == nil {
|
||||
fus.outpointBuff = bytes.NewBuffer(make([]byte, outpointSerializeSize))
|
||||
}
|
||||
|
||||
fus.outpointBuff.Reset()
|
||||
err := serializeOutpoint(fus.outpointBuff, &outpoint)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
key := fus.outpointBuff.Bytes()
|
||||
value, err := dbaccess.GetFromUTXOSet(fus.dbContext, key)
|
||||
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
entry, err := deserializeUTXOEntry(bytes.NewReader(value))
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
fus.add(outpoint, entry)
|
||||
return entry, true
|
||||
}
|
||||
|
||||
func (fus *FullUTXOSet) String() string {
|
||||
return fus.utxoCache.String()
|
||||
}
|
||||
|
||||
// DiffUTXOSet represents a utxoSet with a base fullUTXOSet and a UTXODiff
|
||||
|
||||
@@ -1,15 +1,47 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
|
||||
"github.com/kaspanet/kaspad/util/subnetworkid"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
func prepareDatabaseForTest(t *testing.T, testName string) (*dbaccess.DatabaseContext, func()) {
|
||||
var err error
|
||||
tmpDir, err := ioutil.TempDir("", "utxoset_test")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %s", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(tmpDir, testName)
|
||||
_ = os.RemoveAll(dbPath)
|
||||
databaseContext, err := dbaccess.New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating db: %s", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Setup a teardown function for cleaning up. This function is
|
||||
// returned to the caller to be invoked when it is done testing.
|
||||
teardown := func() {
|
||||
databaseContext.Close()
|
||||
os.RemoveAll(dbPath)
|
||||
}
|
||||
|
||||
return databaseContext, teardown
|
||||
|
||||
}
|
||||
|
||||
// TestUTXOCollection makes sure that utxoCollection cloning and string representations work as expected.
|
||||
func TestUTXOCollection(t *testing.T) {
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
@@ -619,7 +651,7 @@ func (d *UTXODiff) equal(other *UTXODiff) bool {
|
||||
}
|
||||
|
||||
func (fus *FullUTXOSet) equal(other *FullUTXOSet) bool {
|
||||
return reflect.DeepEqual(fus.utxoCollection, other.utxoCollection)
|
||||
return reflect.DeepEqual(fus.utxoCache, other.utxoCache)
|
||||
}
|
||||
|
||||
func (dus *DiffUTXOSet) equal(other *DiffUTXOSet) bool {
|
||||
@@ -642,7 +674,10 @@ func TestFullUTXOSet(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test fullUTXOSet creation
|
||||
emptySet := NewFullUTXOSet()
|
||||
fullUTXOCacheSize := config.DefaultConfig().MaxUTXOCacheSize
|
||||
db, teardown := prepareDatabaseForTest(t, "TestDiffUTXOSet")
|
||||
defer teardown()
|
||||
emptySet := NewFullUTXOSetFromContext(db, fullUTXOCacheSize)
|
||||
if len(emptySet.collection()) != 0 {
|
||||
t.Errorf("new set is not empty")
|
||||
}
|
||||
@@ -668,7 +703,8 @@ func TestFullUTXOSet(t *testing.T) {
|
||||
} else if isAccepted {
|
||||
t.Errorf("addTx unexpectedly succeeded")
|
||||
}
|
||||
emptySet = &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}}
|
||||
emptySet = NewFullUTXOSetFromContext(db, fullUTXOCacheSize)
|
||||
emptySet.add(outpoint0, utxoEntry0)
|
||||
if isAccepted, err := emptySet.AddTx(transaction0, 0); err != nil {
|
||||
t.Errorf("addTx unexpectedly failed. Error: %s", err)
|
||||
} else if !isAccepted {
|
||||
@@ -676,7 +712,7 @@ func TestFullUTXOSet(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test fullUTXOSet collection
|
||||
if !reflect.DeepEqual(emptySet.collection(), emptySet.utxoCollection) {
|
||||
if !reflect.DeepEqual(emptySet.collection(), emptySet.utxoCache) {
|
||||
t.Errorf("collection does not equal the set's utxoCollection")
|
||||
}
|
||||
|
||||
@@ -704,9 +740,12 @@ func TestDiffUTXOSet(t *testing.T) {
|
||||
toAdd: utxoCollection{outpoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{outpoint1: utxoEntry1},
|
||||
}
|
||||
fullUTXOCacheSize := config.DefaultConfig().MaxUTXOCacheSize
|
||||
db, teardown := prepareDatabaseForTest(t, "TestDiffUTXOSet")
|
||||
defer teardown()
|
||||
|
||||
// Test diffUTXOSet creation
|
||||
emptySet := NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff())
|
||||
emptySet := NewDiffUTXOSet(NewFullUTXOSetFromContext(db, fullUTXOCacheSize), NewUTXODiff())
|
||||
if collection, err := emptySet.collection(); err != nil {
|
||||
t.Errorf("Error getting emptySet collection: %s", err)
|
||||
} else if len(collection) != 0 {
|
||||
@@ -726,7 +765,7 @@ func TestDiffUTXOSet(t *testing.T) {
|
||||
if !reflect.DeepEqual(withDiffUTXOSet.base, emptySet.base) || !reflect.DeepEqual(withDiffUTXOSet.UTXODiff, withDiff) {
|
||||
t.Errorf("WithDiff is of unexpected composition")
|
||||
}
|
||||
_, err = NewDiffUTXOSet(NewFullUTXOSet(), diff).WithDiff(diff)
|
||||
_, err = NewDiffUTXOSet(NewFullUTXOSetFromContext(db, fullUTXOCacheSize), diff).WithDiff(diff)
|
||||
if err == nil {
|
||||
t.Errorf("WithDiff unexpectedly succeeded")
|
||||
}
|
||||
@@ -748,14 +787,14 @@ func TestDiffUTXOSet(t *testing.T) {
|
||||
{
|
||||
name: "empty base, empty diff",
|
||||
diffSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
base: NewFullUTXOSetFromContext(db, fullUTXOCacheSize),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedMeldSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
base: NewFullUTXOSetFromContext(db, fullUTXOCacheSize),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
@@ -767,14 +806,18 @@ func TestDiffUTXOSet(t *testing.T) {
|
||||
{
|
||||
name: "empty base, one member in diff toAdd",
|
||||
diffSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
base: NewFullUTXOSetFromContext(db, fullUTXOCacheSize),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outpoint0: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedMeldSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}},
|
||||
base: func() *FullUTXOSet {
|
||||
futxo := NewFullUTXOSetFromContext(db, fullUTXOCacheSize)
|
||||
futxo.add(outpoint0, utxoEntry0)
|
||||
return futxo
|
||||
}(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
@@ -786,7 +829,7 @@ func TestDiffUTXOSet(t *testing.T) {
|
||||
{
|
||||
name: "empty base, one member in diff toRemove",
|
||||
diffSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
base: NewFullUTXOSetFromContext(db, fullUTXOCacheSize),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outpoint0: utxoEntry0},
|
||||
@@ -800,19 +843,23 @@ func TestDiffUTXOSet(t *testing.T) {
|
||||
{
|
||||
name: "one member in base toAdd, one member in diff toAdd",
|
||||
diffSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}},
|
||||
base: func() *FullUTXOSet {
|
||||
futxo := NewFullUTXOSetFromContext(db, fullUTXOCacheSize)
|
||||
futxo.add(outpoint0, utxoEntry0)
|
||||
return futxo
|
||||
}(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outpoint1: utxoEntry1},
|
||||
toRemove: utxoCollection{},
|
||||
},
|
||||
},
|
||||
expectedMeldSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{
|
||||
utxoCollection: utxoCollection{
|
||||
outpoint0: utxoEntry0,
|
||||
outpoint1: utxoEntry1,
|
||||
},
|
||||
},
|
||||
base: func() *FullUTXOSet {
|
||||
futxo := NewFullUTXOSetFromContext(db, fullUTXOCacheSize)
|
||||
futxo.add(outpoint0, utxoEntry0)
|
||||
futxo.add(outpoint1, utxoEntry1)
|
||||
return futxo
|
||||
}(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
@@ -827,16 +874,18 @@ func TestDiffUTXOSet(t *testing.T) {
|
||||
{
|
||||
name: "one member in base toAdd, same one member in diff toRemove",
|
||||
diffSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}},
|
||||
base: func() *FullUTXOSet {
|
||||
futxo := NewFullUTXOSetFromContext(db, fullUTXOCacheSize)
|
||||
futxo.add(outpoint0, utxoEntry0)
|
||||
return futxo
|
||||
}(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{outpoint0: utxoEntry0},
|
||||
},
|
||||
},
|
||||
expectedMeldSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{
|
||||
utxoCollection: utxoCollection{},
|
||||
},
|
||||
base: NewFullUTXOSetFromContext(db, fullUTXOCacheSize),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
@@ -949,6 +998,9 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
txOut0 := &appmessage.TxOut{ScriptPubKey: []byte{0}, Value: 10}
|
||||
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
|
||||
coinbaseTX := appmessage.NewSubnetworkMsgTx(1, []*appmessage.TxIn{}, []*appmessage.TxOut{txOut0}, subnetworkid.SubnetworkIDCoinbase, 0, nil)
|
||||
fullUTXOCacheSize := config.DefaultConfig().MaxUTXOCacheSize
|
||||
db, teardown := prepareDatabaseForTest(t, "TestDiffUTXOSet")
|
||||
defer teardown()
|
||||
|
||||
// transaction1 spends coinbaseTX
|
||||
id1 := coinbaseTX.TxID()
|
||||
@@ -982,11 +1034,11 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "add coinbase transaction to empty set",
|
||||
startSet: NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff()),
|
||||
startSet: NewDiffUTXOSet(NewFullUTXOSetFromContext(db, fullUTXOCacheSize), NewUTXODiff()),
|
||||
startHeight: 0,
|
||||
toAdd: []*appmessage.MsgTx{coinbaseTX},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{}},
|
||||
base: NewFullUTXOSetFromContext(db, fullUTXOCacheSize),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outpoint1: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
@@ -995,11 +1047,11 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "add regular transaction to empty set",
|
||||
startSet: NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff()),
|
||||
startSet: NewDiffUTXOSet(NewFullUTXOSetFromContext(db, fullUTXOCacheSize), NewUTXODiff()),
|
||||
startHeight: 0,
|
||||
toAdd: []*appmessage.MsgTx{transaction1},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{}},
|
||||
base: NewFullUTXOSetFromContext(db, fullUTXOCacheSize),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
@@ -1009,7 +1061,11 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
{
|
||||
name: "add transaction to set with its input in base",
|
||||
startSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outpoint1: utxoEntry0}},
|
||||
base: func() *FullUTXOSet {
|
||||
futxo := NewFullUTXOSetFromContext(db, fullUTXOCacheSize)
|
||||
futxo.add(outpoint1, utxoEntry0)
|
||||
return futxo
|
||||
}(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
@@ -1018,7 +1074,11 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
startHeight: 1,
|
||||
toAdd: []*appmessage.MsgTx{transaction1},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outpoint1: utxoEntry0}},
|
||||
base: func() *FullUTXOSet {
|
||||
futxo := NewFullUTXOSetFromContext(db, fullUTXOCacheSize)
|
||||
futxo.add(outpoint1, utxoEntry0)
|
||||
return futxo
|
||||
}(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outpoint2: utxoEntry1},
|
||||
toRemove: utxoCollection{outpoint1: utxoEntry0},
|
||||
@@ -1028,7 +1088,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
{
|
||||
name: "add transaction to set with its input in diff toAdd",
|
||||
startSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
base: NewFullUTXOSetFromContext(db, fullUTXOCacheSize),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outpoint1: utxoEntry0},
|
||||
toRemove: utxoCollection{},
|
||||
@@ -1037,7 +1097,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
startHeight: 1,
|
||||
toAdd: []*appmessage.MsgTx{transaction1},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
base: NewFullUTXOSetFromContext(db, fullUTXOCacheSize),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outpoint2: utxoEntry1},
|
||||
toRemove: utxoCollection{},
|
||||
@@ -1047,7 +1107,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
{
|
||||
name: "add transaction to set with its input in diff toAdd and its output in diff toRemove",
|
||||
startSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
base: NewFullUTXOSetFromContext(db, fullUTXOCacheSize),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outpoint1: utxoEntry0},
|
||||
toRemove: utxoCollection{outpoint2: utxoEntry1},
|
||||
@@ -1056,7 +1116,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
startHeight: 1,
|
||||
toAdd: []*appmessage.MsgTx{transaction1},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: NewFullUTXOSet(),
|
||||
base: NewFullUTXOSetFromContext(db, fullUTXOCacheSize),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
@@ -1066,7 +1126,11 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
{
|
||||
name: "add two transactions, one spending the other, to set with the first input in base",
|
||||
startSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outpoint1: utxoEntry0}},
|
||||
base: func() *FullUTXOSet {
|
||||
futxo := NewFullUTXOSetFromContext(db, fullUTXOCacheSize)
|
||||
futxo.add(outpoint1, utxoEntry0)
|
||||
return futxo
|
||||
}(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{},
|
||||
toRemove: utxoCollection{},
|
||||
@@ -1075,7 +1139,11 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
startHeight: 1,
|
||||
toAdd: []*appmessage.MsgTx{transaction1, transaction2},
|
||||
expectedSet: &DiffUTXOSet{
|
||||
base: &FullUTXOSet{utxoCollection: utxoCollection{outpoint1: utxoEntry0}},
|
||||
base: func() *FullUTXOSet {
|
||||
futxo := NewFullUTXOSetFromContext(db, fullUTXOCacheSize)
|
||||
futxo.add(outpoint1, utxoEntry0)
|
||||
return futxo
|
||||
}(),
|
||||
UTXODiff: &UTXODiff{
|
||||
toAdd: utxoCollection{outpoint3: utxoEntry2},
|
||||
toRemove: utxoCollection{outpoint1: utxoEntry0},
|
||||
@@ -1108,7 +1176,7 @@ testLoop:
|
||||
|
||||
// collection returns a collection of all UTXOs in this set
|
||||
func (fus *FullUTXOSet) collection() utxoCollection {
|
||||
return fus.utxoCollection.clone()
|
||||
return fus.utxoCache.clone()
|
||||
}
|
||||
|
||||
// collection returns a collection of all UTXOs in this set
|
||||
|
||||
@@ -6,10 +6,11 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domain/txscript"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
@@ -43,6 +44,9 @@ const (
|
||||
// MassPerSigOp is the number of grams that any
|
||||
// signature operation adds to a transaction.
|
||||
MassPerSigOp = 10000
|
||||
|
||||
// mergeSetSizeLimit is the maximum allowed merge set size for a block.
|
||||
mergeSetSizeLimit = 1000
|
||||
)
|
||||
|
||||
// isNullOutpoint determines whether or not a previous transaction outpoint
|
||||
@@ -85,13 +89,13 @@ func IsFinalizedTransaction(tx *util.Tx, blockBlueScore uint64, blockTime mstime
|
||||
// which the transaction is finalized or a timestamp depending on if the
|
||||
// value is before the txscript.LockTimeThreshold. When it is under the
|
||||
// threshold it is a block blue score.
|
||||
blockTimeOrBlueScore := int64(0)
|
||||
blockTimeOrBlueScore := uint64(0)
|
||||
if lockTime < txscript.LockTimeThreshold {
|
||||
blockTimeOrBlueScore = int64(blockBlueScore)
|
||||
blockTimeOrBlueScore = blockBlueScore
|
||||
} else {
|
||||
blockTimeOrBlueScore = blockTime.UnixMilliseconds()
|
||||
blockTimeOrBlueScore = uint64(blockTime.UnixMilliseconds())
|
||||
}
|
||||
if int64(lockTime) < blockTimeOrBlueScore {
|
||||
if lockTime < blockTimeOrBlueScore {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -349,44 +353,29 @@ func (dag *BlockDAG) checkProofOfWork(header *appmessage.BlockHeader, flags Beha
|
||||
// ValidateTxMass makes sure that the given transaction's mass does not exceed
|
||||
// the maximum allowed limit. Currently, it is equivalent to the block mass limit.
|
||||
// See CalcTxMass for further details.
|
||||
func ValidateTxMass(tx *util.Tx, utxoSet UTXOSet) error {
|
||||
txMass, err := CalcTxMassFromUTXOSet(tx, utxoSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if txMass > appmessage.MaxMassPerBlock {
|
||||
func ValidateTxMass(tx *util.Tx, referencedUTXOEntries []*UTXOEntry) (txMass uint64, err error) {
|
||||
txMass = calcTxMassFromReferencedUTXOEntries(tx, referencedUTXOEntries)
|
||||
if txMass > appmessage.MaxMassAcceptedByBlock {
|
||||
str := fmt.Sprintf("tx %s has mass %d, which is above the "+
|
||||
"allowed limit of %d", tx.ID(), txMass, appmessage.MaxMassPerBlock)
|
||||
return ruleError(ErrTxMassTooHigh, str)
|
||||
"allowed limit of %d", tx.ID(), txMass, appmessage.MaxMassAcceptedByBlock)
|
||||
return 0, ruleError(ErrTxMassTooHigh, str)
|
||||
}
|
||||
return nil
|
||||
return txMass, nil
|
||||
}
|
||||
|
||||
func validateBlockMass(pastUTXO UTXOSet, transactions []*util.Tx) error {
|
||||
_, err := CalcBlockMass(pastUTXO, transactions)
|
||||
return err
|
||||
}
|
||||
func calcTxMassFromReferencedUTXOEntries(
|
||||
tx *util.Tx, referencedUTXOEntries []*UTXOEntry) uint64 {
|
||||
|
||||
// CalcBlockMass sums up and returns the "mass" of a block. See CalcTxMass
|
||||
// for further details.
|
||||
func CalcBlockMass(pastUTXO UTXOSet, transactions []*util.Tx) (uint64, error) {
|
||||
totalMass := uint64(0)
|
||||
for _, tx := range transactions {
|
||||
txMass, err := CalcTxMassFromUTXOSet(tx, pastUTXO)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
totalMass += txMass
|
||||
|
||||
// We could potentially overflow the accumulator so check for
|
||||
// overflow as well.
|
||||
if totalMass < txMass || totalMass > appmessage.MaxMassPerBlock {
|
||||
str := fmt.Sprintf("block has total mass %d, which is "+
|
||||
"above the allowed limit of %d", totalMass, appmessage.MaxMassPerBlock)
|
||||
return 0, ruleError(ErrBlockMassTooHigh, str)
|
||||
}
|
||||
if tx.IsCoinBase() {
|
||||
return calcCoinbaseTxMass(tx)
|
||||
}
|
||||
return totalMass, nil
|
||||
|
||||
previousScriptPubKeys := make([][]byte, 0, len(tx.MsgTx().TxIn))
|
||||
|
||||
for _, utxoEntry := range referencedUTXOEntries {
|
||||
previousScriptPubKeys = append(previousScriptPubKeys, utxoEntry.ScriptPubKey())
|
||||
}
|
||||
return CalcTxMass(tx, previousScriptPubKeys)
|
||||
}
|
||||
|
||||
// CalcTxMassFromUTXOSet calculates the transaction mass based on the
|
||||
@@ -395,7 +384,7 @@ func CalcBlockMass(pastUTXO UTXOSet, transactions []*util.Tx) (uint64, error) {
|
||||
// See CalcTxMass for more details.
|
||||
func CalcTxMassFromUTXOSet(tx *util.Tx, utxoSet UTXOSet) (uint64, error) {
|
||||
if tx.IsCoinBase() {
|
||||
return CalcTxMass(tx, nil), nil
|
||||
return calcCoinbaseTxMass(tx), nil
|
||||
}
|
||||
previousScriptPubKeys := make([][]byte, len(tx.MsgTx().TxIn))
|
||||
for txInIndex, txIn := range tx.MsgTx().TxIn {
|
||||
@@ -412,6 +401,10 @@ func CalcTxMassFromUTXOSet(tx *util.Tx, utxoSet UTXOSet) (uint64, error) {
|
||||
return CalcTxMass(tx, previousScriptPubKeys), nil
|
||||
}
|
||||
|
||||
func calcCoinbaseTxMass(tx *util.Tx) uint64 {
|
||||
return CalcTxMass(tx, nil)
|
||||
}
|
||||
|
||||
// CalcTxMass sums up and returns the "mass" of a transaction. This number
|
||||
// is an approximation of how many resources (CPU, RAM, etc.) it would take
|
||||
// to process the transaction.
|
||||
@@ -562,9 +555,9 @@ func (dag *BlockDAG) checkBlockContainsLessThanMaxBlockMassTransactions(block *u
|
||||
// else it is certainly over the block mass limit.
|
||||
transactions := block.Transactions()
|
||||
numTx := len(transactions)
|
||||
if numTx > appmessage.MaxMassPerBlock {
|
||||
if numTx > appmessage.MaxMassAcceptedByBlock {
|
||||
str := fmt.Sprintf("block contains too many transactions - "+
|
||||
"got %d, max %d", numTx, appmessage.MaxMassPerBlock)
|
||||
"got %d, max %d", numTx, appmessage.MaxMassAcceptedByBlock)
|
||||
return ruleError(ErrBlockMassTooHigh, str)
|
||||
}
|
||||
return nil
|
||||
@@ -687,18 +680,18 @@ func (dag *BlockDAG) checkBlockHeaderContext(header *appmessage.BlockHeader, blu
|
||||
return err
|
||||
}
|
||||
|
||||
if err := validateMedianTime(dag, header, bluestParent); err != nil {
|
||||
if err := validateMedianTime(header, bluestParent); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateMedianTime(dag *BlockDAG, header *appmessage.BlockHeader, bluestParent *blockNode) error {
|
||||
func validateMedianTime(header *appmessage.BlockHeader, bluestParent *blockNode) error {
|
||||
if !header.IsGenesis() {
|
||||
// Ensure the timestamp for the block header is not before the
|
||||
// median time of the last several blocks (medianTimeBlocks).
|
||||
medianTime := bluestParent.PastMedianTime(dag)
|
||||
medianTime := bluestParent.PastMedianTime()
|
||||
if header.Timestamp.Before(medianTime) {
|
||||
str := fmt.Sprintf("block timestamp of %s is not after expected %s", header.Timestamp, medianTime)
|
||||
return ruleError(ErrTimeTooOld, str)
|
||||
@@ -725,15 +718,13 @@ func (dag *BlockDAG) validateDifficulty(header *appmessage.BlockHeader, bluestPa
|
||||
|
||||
// validateParents validates that no parent is an ancestor of another parent, and no parent is finalized
|
||||
func (dag *BlockDAG) validateParents(blockHeader *appmessage.BlockHeader, parents blockSet) error {
|
||||
for parentA := range parents {
|
||||
// isFinalized might be false-negative because node finality status is
|
||||
// updated in a separate goroutine. This is why later the block is
|
||||
// checked more thoroughly on the finality rules in dag.checkFinalityViolation.
|
||||
if parentA.isFinalized {
|
||||
return ruleError(ErrFinality, fmt.Sprintf("block %s is a finalized "+
|
||||
"parent of block %s", parentA.hash, blockHeader.BlockHash()))
|
||||
}
|
||||
if len(parents) > appmessage.MaxNumParentBlocks {
|
||||
return ruleError(ErrTooManyParents,
|
||||
fmt.Sprintf("block %s points to %d parents > MaxNumParentBlocks: %d",
|
||||
blockHeader.BlockHash(), len(parents), appmessage.MaxNumParentBlocks))
|
||||
}
|
||||
|
||||
for parentA := range parents {
|
||||
for parentB := range parents {
|
||||
if parentA == parentB {
|
||||
continue
|
||||
@@ -790,6 +781,20 @@ func (dag *BlockDAG) checkBlockContext(block *util.Block, flags BehaviorFlags) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *blockNode) checkDAGRelations() error {
|
||||
err := node.checkMergeSizeLimit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = node.checkBoundedMergeDepth()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) handleLookupParentNodesError(block *util.Block, err error) error {
|
||||
var ruleErr RuleError
|
||||
if ok := errors.As(err, &ruleErr); ok && ruleErr.ErrorCode == ErrInvalidAncestorBlock {
|
||||
@@ -802,14 +807,14 @@ func (dag *BlockDAG) handleLookupParentNodesError(block *util.Block, err error)
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockTransactionsFinalized(block *util.Block, node *blockNode, flags BehaviorFlags) error {
|
||||
fastAdd := flags&BFFastAdd == BFFastAdd || dag.index.NodeStatus(node).KnownValid()
|
||||
fastAdd := flags&BFFastAdd == BFFastAdd || dag.index.BlockNodeStatus(node).KnownValid()
|
||||
if fastAdd {
|
||||
return nil
|
||||
}
|
||||
|
||||
blockTime := block.MsgBlock().Header.Timestamp
|
||||
if !block.IsGenesis() {
|
||||
blockTime = node.selectedParent.PastMedianTime(dag)
|
||||
blockTime = node.selectedParent.PastMedianTime()
|
||||
}
|
||||
|
||||
// Ensure all transactions in the block are finalized.
|
||||
@@ -824,6 +829,31 @@ func (dag *BlockDAG) checkBlockTransactionsFinalized(block *util.Block, node *bl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkBlockHasNoChainedTransactions(block *util.Block, node *blockNode, flags BehaviorFlags) error {
|
||||
fastAdd := flags&BFFastAdd == BFFastAdd || dag.index.BlockNodeStatus(node).KnownValid()
|
||||
if fastAdd {
|
||||
return nil
|
||||
}
|
||||
|
||||
transactions := block.Transactions()
|
||||
transactionsSet := make(map[daghash.TxID]struct{}, len(transactions))
|
||||
for _, transaction := range transactions {
|
||||
transactionsSet[*transaction.ID()] = struct{}{}
|
||||
}
|
||||
|
||||
for _, transaction := range transactions {
|
||||
for i, transactionInput := range transaction.MsgTx().TxIn {
|
||||
if _, ok := transactionsSet[transactionInput.PreviousOutpoint.TxID]; ok {
|
||||
str := fmt.Sprintf("block contains chained transactions: Input %d of transaction %s spend"+
|
||||
"an output of transaction %s", i, transaction.ID(), transactionInput.PreviousOutpoint.TxID)
|
||||
return ruleError(ErrChainedTransactions, str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureNoDuplicateTx ensures blocks do not contain duplicate transactions which
|
||||
// 'overwrite' older transactions that are not fully spent. This prevents an
|
||||
// attack where a coinbase and all of its dependent transactions could be
|
||||
@@ -858,6 +888,30 @@ func ensureNoDuplicateTx(utxoSet UTXOSet, transactions []*util.Tx) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTxIsNotDuplicate(tx *util.Tx, utxoSet UTXOSet) error {
|
||||
fetchSet := make(map[appmessage.Outpoint]struct{})
|
||||
|
||||
// Fetch utxos for all of the ouputs in this transaction.
|
||||
// Typically, there will not be any utxos for any of the outputs.
|
||||
prevOut := appmessage.Outpoint{TxID: *tx.ID()}
|
||||
for txOutIdx := range tx.MsgTx().TxOut {
|
||||
prevOut.Index = uint32(txOutIdx)
|
||||
fetchSet[prevOut] = struct{}{}
|
||||
}
|
||||
|
||||
// Duplicate transactions are only allowed if the previous transaction
|
||||
// is fully spent.
|
||||
for outpoint := range fetchSet {
|
||||
if _, ok := utxoSet.Get(outpoint); ok {
|
||||
str := fmt.Sprintf("tried to overwrite transaction %s "+
|
||||
"that is not fully spent", outpoint.TxID)
|
||||
return ruleError(ErrOverwriteTx, str)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckTransactionInputsAndCalulateFee performs a series of checks on the inputs to a
|
||||
// transaction to ensure they are valid. An example of some of the checks
|
||||
// include verifying all inputs exist, ensuring the block reward seasoning
|
||||
@@ -868,7 +922,8 @@ func ensureNoDuplicateTx(utxoSet UTXOSet, transactions []*util.Tx) error {
|
||||
//
|
||||
// NOTE: The transaction MUST have already been sanity checked with the
|
||||
// CheckTransactionSanity function prior to calling this function.
|
||||
func CheckTransactionInputsAndCalulateFee(tx *util.Tx, txBlueScore uint64, utxoSet UTXOSet, dagParams *dagconfig.Params, fastAdd bool) (
|
||||
func CheckTransactionInputsAndCalulateFee(
|
||||
tx *util.Tx, txBlueScore uint64, referencedUTXOEntries []*UTXOEntry, dagParams *dagconfig.Params, fastAdd bool) (
|
||||
txFeeInSompi uint64, err error) {
|
||||
|
||||
// Coinbase transactions have no standard inputs to validate.
|
||||
@@ -876,18 +931,9 @@ func CheckTransactionInputsAndCalulateFee(tx *util.Tx, txBlueScore uint64, utxoS
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
txID := tx.ID()
|
||||
var totalSompiIn uint64
|
||||
for txInIndex, txIn := range tx.MsgTx().TxIn {
|
||||
// Ensure the referenced input transaction is available.
|
||||
entry, ok := utxoSet.Get(txIn.PreviousOutpoint)
|
||||
if !ok {
|
||||
str := fmt.Sprintf("output %s referenced from "+
|
||||
"transaction %s input %d either does not exist or "+
|
||||
"has already been spent", txIn.PreviousOutpoint,
|
||||
tx.ID(), txInIndex)
|
||||
return 0, ruleError(ErrMissingTxOut, str)
|
||||
}
|
||||
entry := referencedUTXOEntries[txInIndex]
|
||||
|
||||
if !fastAdd {
|
||||
if err = validateCoinbaseMaturity(dagParams, entry, txBlueScore, txIn); err != nil {
|
||||
@@ -895,56 +941,39 @@ func CheckTransactionInputsAndCalulateFee(tx *util.Tx, txBlueScore uint64, utxoS
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the transaction amounts are in range. Each of the
|
||||
// output values of the input transactions must not be negative
|
||||
// or more than the max allowed per transaction. All amounts in
|
||||
// a transaction are in a unit value known as a sompi. One
|
||||
// kaspa is a quantity of sompi as defined by the
|
||||
// SompiPerKaspa constant.
|
||||
originTxSompi := entry.Amount()
|
||||
if originTxSompi > util.MaxSompi {
|
||||
str := fmt.Sprintf("transaction output value of %s is "+
|
||||
"higher than max allowed value of %d",
|
||||
util.Amount(originTxSompi),
|
||||
util.MaxSompi)
|
||||
return 0, ruleError(ErrBadTxOutValue, str)
|
||||
}
|
||||
|
||||
// The total of all outputs must not be more than the max
|
||||
// allowed per transaction. Also, we could potentially overflow
|
||||
// the accumulator so check for overflow.
|
||||
lastSompiIn := totalSompiIn
|
||||
totalSompiIn += originTxSompi
|
||||
if totalSompiIn < lastSompiIn ||
|
||||
totalSompiIn > util.MaxSompi {
|
||||
str := fmt.Sprintf("total value of all transaction "+
|
||||
"inputs is %d which is higher than max "+
|
||||
"allowed value of %d", totalSompiIn,
|
||||
util.MaxSompi)
|
||||
return 0, ruleError(ErrBadTxOutValue, str)
|
||||
totalSompiIn, err = checkEntryAmounts(entry, totalSompiIn)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate the total output amount for this transaction. It is safe
|
||||
// to ignore overflow and out of range errors here because those error
|
||||
// conditions would have already been caught by checkTransactionSanity.
|
||||
var totalSompiOut uint64
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
totalSompiOut += txOut.Value
|
||||
}
|
||||
|
||||
// Ensure the transaction does not spend more than its inputs.
|
||||
if totalSompiIn < totalSompiOut {
|
||||
str := fmt.Sprintf("total value of all transaction inputs for "+
|
||||
"transaction %s is %d which is less than the amount "+
|
||||
"spent of %d", txID, totalSompiIn, totalSompiOut)
|
||||
return 0, ruleError(ErrSpendTooHigh, str)
|
||||
totalSompiOut, err := checkTxOutputAmounts(tx, totalSompiIn)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
txFeeInSompi = totalSompiIn - totalSompiOut
|
||||
return txFeeInSompi, nil
|
||||
}
|
||||
|
||||
func checkEntryAmounts(entry *UTXOEntry, totalSompiInBefore uint64) (totalSompiInAfter uint64, err error) {
|
||||
// The total of all outputs must not be more than the max
|
||||
// allowed per transaction. Also, we could potentially overflow
|
||||
// the accumulator so check for overflow.
|
||||
lastSompiIn := totalSompiInBefore
|
||||
originTxSompi := entry.Amount()
|
||||
totalSompiInAfter = totalSompiInBefore + originTxSompi
|
||||
if totalSompiInBefore < lastSompiIn ||
|
||||
totalSompiInBefore > util.MaxSompi {
|
||||
str := fmt.Sprintf("total value of all transaction "+
|
||||
"inputs is %d which is higher than max "+
|
||||
"allowed value of %d", totalSompiInBefore,
|
||||
util.MaxSompi)
|
||||
return 0, ruleError(ErrBadTxOutValue, str)
|
||||
}
|
||||
return totalSompiInAfter, nil
|
||||
}
|
||||
|
||||
func validateCoinbaseMaturity(dagParams *dagconfig.Params, entry *UTXOEntry, txBlueScore uint64, txIn *appmessage.TxIn) error {
|
||||
// Ensure the transaction is not spending coins which have not
|
||||
// yet reached the required coinbase maturity.
|
||||
@@ -964,115 +993,214 @@ func validateCoinbaseMaturity(dagParams *dagconfig.Params, entry *UTXOEntry, txB
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkConnectToPastUTXO performs several checks to confirm connecting the passed
|
||||
// block to the DAG represented by the passed view does not violate any rules.
|
||||
//
|
||||
// An example of some of the checks performed are ensuring connecting the block
|
||||
// would not cause any duplicate transaction hashes for old transactions that
|
||||
// aren't already fully spent, double spends, exceeding the maximum allowed
|
||||
// signature operations per block, invalid values in relation to the expected
|
||||
// block subsidy, or fail transaction script validation.
|
||||
//
|
||||
// It also returns the feeAccumulator for this block.
|
||||
//
|
||||
// This function MUST be called with the dag state lock held (for writes).
|
||||
func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
|
||||
transactions []*util.Tx, fastAdd bool) (compactFeeData, error) {
|
||||
func (dag *BlockDAG) checkConnectBlockToPastUTXO(
|
||||
node *blockNode, pastUTXO UTXOSet, transactions []*util.Tx) (err error) {
|
||||
|
||||
if !fastAdd {
|
||||
err := ensureNoDuplicateTx(pastUTXO, transactions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selectedParentMedianTime := node.selectedParentMedianTime()
|
||||
|
||||
err = checkDoubleSpendsWithBlockPast(pastUTXO, transactions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := validateBlockMass(pastUTXO, transactions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Perform several checks on the inputs for each transaction. Also
|
||||
// accumulate the total fees. This could technically be combined with
|
||||
// the loop above instead of running another loop over the transactions,
|
||||
// but by separating it we can avoid running the more expensive (though
|
||||
// still relatively cheap as compared to running the scripts) checks
|
||||
// against all the inputs when the signature operations are out of
|
||||
// bounds.
|
||||
// In addition - add all fees into a fee accumulator, to be stored and checked
|
||||
// when validating descendants' coinbase transactions.
|
||||
var totalFees uint64
|
||||
compactFeeFactory := newCompactFeeFactory()
|
||||
totalFee := uint64(0)
|
||||
|
||||
for _, tx := range transactions {
|
||||
txFee, err := CheckTransactionInputsAndCalulateFee(tx, block.blueScore, pastUTXO,
|
||||
dag.Params, fastAdd)
|
||||
txFee, _, err :=
|
||||
dag.checkConnectTransactionToPastUTXO(node, tx, pastUTXO, 0, selectedParentMedianTime)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Sum the total fees and ensure we don't overflow the
|
||||
// accumulator.
|
||||
lastTotalFees := totalFees
|
||||
totalFees += txFee
|
||||
if totalFees < lastTotalFees {
|
||||
return nil, ruleError(ErrBadFees, "total fees for block "+
|
||||
"overflows accumulator")
|
||||
}
|
||||
|
||||
err = compactFeeFactory.add(txFee)
|
||||
totalFee, err = dag.checkTotalFee(totalFee, txFee)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("error adding tx %s fee to compactFeeFactory: %s", tx.ID(), err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
feeData, err := compactFeeFactory.data()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkConnectTransactionToPastUTXO(
|
||||
node *blockNode, tx *util.Tx, pastUTXO UTXOSet, accumulatedMassBefore uint64, selectedParentMedianTime mstime.Time) (
|
||||
txFee uint64, accumulatedMassAfter uint64, err error) {
|
||||
|
||||
err = checkTxIsNotDuplicate(tx, pastUTXO)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("error getting bytes of fee data: %s", err)
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
if !fastAdd {
|
||||
scriptFlags := txscript.ScriptNoFlags
|
||||
referencedUTXOEntries, err := dag.getReferencedUTXOEntries(tx, pastUTXO)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// We obtain the MTP of the *previous* block (unless it's genesis block)
|
||||
// in order to determine if transactions in the current block are final.
|
||||
medianTime := block.Header().Timestamp
|
||||
if !block.isGenesis() {
|
||||
medianTime = block.selectedParent.PastMedianTime(dag)
|
||||
}
|
||||
accumulatedMassAfter, err = dag.checkTxMass(tx, referencedUTXOEntries, accumulatedMassBefore)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// We also enforce the relative sequence number based
|
||||
// lock-times within the inputs of all transactions in this
|
||||
// candidate block.
|
||||
for _, tx := range transactions {
|
||||
// A transaction can only be included within a block
|
||||
// once the sequence locks of *all* its inputs are
|
||||
// active.
|
||||
sequenceLock, err := dag.calcSequenceLock(block, pastUTXO, tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !SequenceLockActive(sequenceLock, block.blueScore,
|
||||
medianTime) {
|
||||
str := fmt.Sprintf("block contains " +
|
||||
"transaction whose input sequence " +
|
||||
"locks are not met")
|
||||
return nil, ruleError(ErrUnfinalizedTx, str)
|
||||
}
|
||||
}
|
||||
err = dag.checkTxCoinbaseMaturity(node, tx, referencedUTXOEntries)
|
||||
if err != nil {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
// Now that the inexpensive checks are done and have passed, verify the
|
||||
// transactions are actually allowed to spend the coins by running the
|
||||
// expensive SCHNORR signature check scripts. Doing this last helps
|
||||
// prevent CPU exhaustion attacks.
|
||||
err := checkBlockScripts(block, pastUTXO, transactions, scriptFlags, dag.sigCache)
|
||||
totalSompiIn, err := dag.checkTxInputAmounts(referencedUTXOEntries)
|
||||
if err != nil {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
totalSompiOut, err := checkTxOutputAmounts(tx, totalSompiIn)
|
||||
if err != nil {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
txFee = totalSompiIn - totalSompiOut
|
||||
|
||||
err = dag.checkTxSequenceLock(node, tx, referencedUTXOEntries, selectedParentMedianTime)
|
||||
if err != nil {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
err = ValidateTransactionScripts(tx, referencedUTXOEntries, txscript.ScriptNoFlags, dag.sigCache)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
return txFee, accumulatedMassAfter, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkTxSequenceLock(node *blockNode, tx *util.Tx,
|
||||
referencedUTXOEntries []*UTXOEntry, medianTime mstime.Time) error {
|
||||
|
||||
// A transaction can only be included within a block
|
||||
// once the sequence locks of *all* its inputs are
|
||||
// active.
|
||||
sequenceLock, err := dag.calcTxSequenceLockFromReferencedUTXOEntries(node, tx, referencedUTXOEntries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !SequenceLockActive(sequenceLock, node.blueScore, medianTime) {
|
||||
str := fmt.Sprintf("block contains " +
|
||||
"transaction whose input sequence " +
|
||||
"locks are not met")
|
||||
return ruleError(ErrUnfinalizedTx, str)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTxOutputAmounts(tx *util.Tx, totalSompiIn uint64) (uint64, error) {
|
||||
totalSompiOut := uint64(0)
|
||||
// Calculate the total output amount for this transaction. It is safe
|
||||
// to ignore overflow and out of range errors here because those error
|
||||
// conditions would have already been caught by checkTransactionSanity.
|
||||
for _, txOut := range tx.MsgTx().TxOut {
|
||||
totalSompiOut += txOut.Value
|
||||
}
|
||||
|
||||
// Ensure the transaction does not spend more than its inputs.
|
||||
if totalSompiIn < totalSompiOut {
|
||||
str := fmt.Sprintf("total value of all transaction inputs for "+
|
||||
"transaction %s is %d which is less than the amount "+
|
||||
"spent of %d", tx.ID(), totalSompiIn, totalSompiOut)
|
||||
return 0, ruleError(ErrSpendTooHigh, str)
|
||||
}
|
||||
return totalSompiOut, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkTxInputAmounts(
|
||||
inputUTXOEntries []*UTXOEntry) (totalSompiIn uint64, err error) {
|
||||
|
||||
totalSompiIn = 0
|
||||
|
||||
for _, utxoEntry := range inputUTXOEntries {
|
||||
|
||||
// Ensure the transaction amounts are in range. Each of the
|
||||
// output values of the input transactions must not be negative
|
||||
// or more than the max allowed per transaction. All amounts in
|
||||
// a transaction are in a unit value known as a sompi. One
|
||||
// kaspa is a quantity of sompi as defined by the
|
||||
// SompiPerKaspa constant.
|
||||
totalSompiIn, err = checkEntryAmounts(utxoEntry, totalSompiIn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return feeData, nil
|
||||
|
||||
return totalSompiIn, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkTxCoinbaseMaturity(
|
||||
node *blockNode, tx *util.Tx, referencedUTXOEntries []*UTXOEntry) error {
|
||||
txBlueScore := node.blueScore
|
||||
for i, txIn := range tx.MsgTx().TxIn {
|
||||
utxoEntry := referencedUTXOEntries[i]
|
||||
|
||||
if utxoEntry.IsCoinbase() {
|
||||
originBlueScore := utxoEntry.BlockBlueScore()
|
||||
blueScoreSincePrev := txBlueScore - originBlueScore
|
||||
if blueScoreSincePrev < dag.Params.BlockCoinbaseMaturity {
|
||||
str := fmt.Sprintf("tried to spend coinbase "+
|
||||
"transaction output %s from blue score %d "+
|
||||
"to blue score %d before required maturity "+
|
||||
"of %d", txIn.PreviousOutpoint,
|
||||
originBlueScore, txBlueScore,
|
||||
dag.Params.BlockCoinbaseMaturity)
|
||||
|
||||
return ruleError(ErrImmatureSpend, str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkTxMass(tx *util.Tx, referencedUTXOEntries []*UTXOEntry,
|
||||
accumulatedMassBefore uint64) (accumulatedMassAfter uint64, err error) {
|
||||
|
||||
txMass := calcTxMassFromReferencedUTXOEntries(tx, referencedUTXOEntries)
|
||||
|
||||
accumulatedMassAfter = accumulatedMassBefore + txMass
|
||||
|
||||
// We could potentially overflow the accumulator so check for
|
||||
// overflow as well.
|
||||
if accumulatedMassAfter < txMass || accumulatedMassAfter > appmessage.MaxMassAcceptedByBlock {
|
||||
str := fmt.Sprintf("block accepts transactions with accumulated mass higher then allowed limit of %d",
|
||||
appmessage.MaxMassAcceptedByBlock)
|
||||
return 0, ruleError(ErrBlockMassTooHigh, str)
|
||||
}
|
||||
|
||||
return accumulatedMassAfter, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) getReferencedUTXOEntries(tx *util.Tx, utxoSet UTXOSet) ([]*UTXOEntry, error) {
|
||||
|
||||
txIns := tx.MsgTx().TxIn
|
||||
referencedUTXOEntries := make([]*UTXOEntry, 0, len(txIns))
|
||||
|
||||
for txInIndex, txIn := range txIns {
|
||||
utxoEntry, ok := utxoSet.Get(txIn.PreviousOutpoint)
|
||||
if !ok {
|
||||
str := fmt.Sprintf("output %s referenced from "+
|
||||
"transaction %s input %d either does not exist or "+
|
||||
"has already been spent", txIn.PreviousOutpoint,
|
||||
tx.ID(), txInIndex)
|
||||
return nil, ruleError(ErrMissingTxOut, str)
|
||||
}
|
||||
|
||||
referencedUTXOEntries = append(referencedUTXOEntries, utxoEntry)
|
||||
}
|
||||
|
||||
return referencedUTXOEntries, nil
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkTotalFee(totalFees uint64, txFee uint64) (uint64, error) {
|
||||
// Sum the total fees and ensure we don't overflow the
|
||||
// accumulator.
|
||||
lastTotalFees := totalFees
|
||||
totalFees += txFee
|
||||
if totalFees < lastTotalFees || totalFees > util.MaxSompi {
|
||||
str := fmt.Sprintf("total fees are higher then max allowed value of %d", util.MaxSompi)
|
||||
return 0, ruleError(ErrBadFees, str)
|
||||
}
|
||||
return totalFees, nil
|
||||
}
|
||||
|
||||
func (node *blockNode) validateUTXOCommitment(multiset *secp256k1.MultiSet) error {
|
||||
@@ -1102,7 +1230,6 @@ func (dag *BlockDAG) CheckConnectBlockTemplate(block *util.Block) error {
|
||||
// the DAG does not violate any consensus rules, aside from the proof of
|
||||
// work requirement. The block must connect to the current tip of the main dag.
|
||||
func (dag *BlockDAG) CheckConnectBlockTemplateNoLock(block *util.Block) error {
|
||||
|
||||
// Skip the proof of work check as this is just a block template.
|
||||
flags := BFNoPoWCheck
|
||||
|
||||
@@ -1123,12 +1250,14 @@ func (dag *BlockDAG) CheckConnectBlockTemplateNoLock(block *util.Block) error {
|
||||
return err
|
||||
}
|
||||
|
||||
templateNode, _ := dag.newBlockNode(&header, dag.virtual.tips())
|
||||
templateParents, err := dag.index.LookupNodes(header.ParentHashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = dag.checkConnectToPastUTXO(templateNode,
|
||||
dag.UTXOSet(), block.Transactions(), false)
|
||||
templateNode, _ := dag.newBlockNode(&header, blockSetFromSlice(templateParents...))
|
||||
|
||||
return err
|
||||
return dag.checkConnectBlockToPastUTXO(templateNode, dag.UTXOSet(), block.Transactions())
|
||||
}
|
||||
|
||||
func (dag *BlockDAG) checkDuplicateBlock(blockHash *daghash.Hash, flags BehaviorFlags) error {
|
||||
|
||||
@@ -113,13 +113,6 @@ func TestCheckConnectBlockTemplate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Block 3 should fail to connect since it's already inserted.
|
||||
err = dag.CheckConnectBlockTemplateNoLock(blocks[3])
|
||||
if err == nil {
|
||||
t.Fatal("CheckConnectBlockTemplate: Did not received expected error " +
|
||||
"on block 3")
|
||||
}
|
||||
|
||||
// Block 4 should connect successfully to tip of chain.
|
||||
err = dag.CheckConnectBlockTemplateNoLock(blocks[4])
|
||||
if err != nil {
|
||||
@@ -488,7 +481,7 @@ func TestPastMedianTime(t *testing.T) {
|
||||
node := newTestNode(dag, blockSetFromSlice(tip),
|
||||
blockVersion,
|
||||
dag.powMaxBits,
|
||||
tip.PastMedianTime(dag))
|
||||
tip.PastMedianTime())
|
||||
|
||||
header := node.Header()
|
||||
err := dag.checkBlockHeaderContext(header, node.parents.bluest(), false)
|
||||
@@ -501,7 +494,7 @@ func TestPastMedianTime(t *testing.T) {
|
||||
node = newTestNode(dag, blockSetFromSlice(tip),
|
||||
blockVersion,
|
||||
dag.powMaxBits,
|
||||
tip.PastMedianTime(dag).Add(time.Second))
|
||||
tip.PastMedianTime().Add(time.Second))
|
||||
|
||||
header = node.Header()
|
||||
err = dag.checkBlockHeaderContext(header, node.parents.bluest(), false)
|
||||
@@ -514,7 +507,7 @@ func TestPastMedianTime(t *testing.T) {
|
||||
node = newTestNode(dag, blockSetFromSlice(tip),
|
||||
blockVersion,
|
||||
0,
|
||||
tip.PastMedianTime(dag).Add(-time.Second))
|
||||
tip.PastMedianTime().Add(-time.Second))
|
||||
|
||||
header = node.Header()
|
||||
err = dag.checkBlockHeaderContext(header, node.parents.bluest(), false)
|
||||
|
||||
@@ -5,8 +5,9 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"sync"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// virtualBlock is a virtual block whose parents are the tips of the DAG.
|
||||
@@ -14,7 +15,7 @@ type virtualBlock struct {
|
||||
mtx sync.Mutex
|
||||
dag *BlockDAG
|
||||
utxoSet *FullUTXOSet
|
||||
blockNode
|
||||
*blockNode
|
||||
|
||||
// selectedParentChainSet is a block set that includes all the blocks
|
||||
// that belong to the chain of selected parents from the virtual block.
|
||||
@@ -27,30 +28,18 @@ type virtualBlock struct {
|
||||
}
|
||||
|
||||
// newVirtualBlock creates and returns a new VirtualBlock.
|
||||
func newVirtualBlock(dag *BlockDAG, tips blockSet) *virtualBlock {
|
||||
func newVirtualBlock(dag *BlockDAG, parents blockSet) *virtualBlock {
|
||||
// The mutex is intentionally not held since this is a constructor.
|
||||
var virtual virtualBlock
|
||||
virtual.dag = dag
|
||||
virtual.utxoSet = NewFullUTXOSet()
|
||||
virtual.utxoSet = NewFullUTXOSetFromContext(dag.databaseContext, dag.maxUTXOCacheSize)
|
||||
virtual.selectedParentChainSet = newBlockSet()
|
||||
virtual.selectedParentChainSlice = nil
|
||||
virtual.setTips(tips)
|
||||
virtual.blockNode, _ = dag.newBlockNode(nil, parents)
|
||||
|
||||
return &virtual
|
||||
}
|
||||
|
||||
// setTips replaces the tips of the virtual block with the blocks in the
|
||||
// given blockSet. This only differs from the exported version in that it
|
||||
// is up to the caller to ensure the lock is held.
|
||||
//
|
||||
// This function MUST be called with the view mutex locked (for writes).
|
||||
func (v *virtualBlock) setTips(tips blockSet) *chainUpdates {
|
||||
oldSelectedParent := v.selectedParent
|
||||
node, _ := v.dag.newBlockNode(nil, tips)
|
||||
v.blockNode = *node
|
||||
return v.updateSelectedParentSet(oldSelectedParent)
|
||||
}
|
||||
|
||||
// updateSelectedParentSet updates the selectedParentSet to match the
|
||||
// new selected parent of the virtual block.
|
||||
// Every time the new selected parent is not a child of
|
||||
@@ -59,7 +48,7 @@ func (v *virtualBlock) setTips(tips blockSet) *chainUpdates {
|
||||
// parent and are not selected ancestors of the new one, and adding
|
||||
// blocks that are selected ancestors of the new selected parent
|
||||
// and aren't selected ancestors of the old one.
|
||||
func (v *virtualBlock) updateSelectedParentSet(oldSelectedParent *blockNode) *chainUpdates {
|
||||
func (v *virtualBlock) updateSelectedParentSet(oldSelectedParent *blockNode) *selectedParentChainUpdates {
|
||||
var intersectionNode *blockNode
|
||||
nodesToAdd := make([]*blockNode, 0)
|
||||
for node := v.blockNode.selectedParent; intersectionNode == nil && node != nil; node = node.selectedParent {
|
||||
@@ -101,53 +90,8 @@ func (v *virtualBlock) updateSelectedParentSet(oldSelectedParent *blockNode) *ch
|
||||
}
|
||||
v.selectedParentChainSlice = append(v.selectedParentChainSlice, nodesToAdd...)
|
||||
|
||||
return &chainUpdates{
|
||||
return &selectedParentChainUpdates{
|
||||
removedChainBlockHashes: removedChainBlockHashes,
|
||||
addedChainBlockHashes: addedChainBlockHashes,
|
||||
}
|
||||
}
|
||||
|
||||
// SetTips replaces the tips of the virtual block with the blocks in the
|
||||
// given blockSet.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (v *virtualBlock) SetTips(tips blockSet) {
|
||||
v.mtx.Lock()
|
||||
defer v.mtx.Unlock()
|
||||
v.setTips(tips)
|
||||
}
|
||||
|
||||
// addTip adds the given tip to the set of tips in the virtual block.
|
||||
// All former tips that happen to be the given tips parents are removed
|
||||
// from the set. This only differs from the exported version in that it
|
||||
// is up to the caller to ensure the lock is held.
|
||||
//
|
||||
// This function MUST be called with the view mutex locked (for writes).
|
||||
func (v *virtualBlock) addTip(newTip *blockNode) *chainUpdates {
|
||||
updatedTips := v.tips().clone()
|
||||
for parent := range newTip.parents {
|
||||
updatedTips.remove(parent)
|
||||
}
|
||||
|
||||
updatedTips.add(newTip)
|
||||
return v.setTips(updatedTips)
|
||||
}
|
||||
|
||||
// AddTip adds the given tip to the set of tips in the virtual block.
|
||||
// All former tips that happen to be the given tip's parents are removed
|
||||
// from the set.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (v *virtualBlock) AddTip(newTip *blockNode) *chainUpdates {
|
||||
v.mtx.Lock()
|
||||
defer v.mtx.Unlock()
|
||||
return v.addTip(newTip)
|
||||
}
|
||||
|
||||
// tips returns the current tip block nodes for the DAG. It will return
|
||||
// an empty blockSet if there is no tip.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (v *virtualBlock) tips() blockSet {
|
||||
return v.parents
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
ghostdagK = 15
|
||||
ghostdagK = 18
|
||||
difficultyAdjustmentWindowSize = 2640
|
||||
timestampDeviationTolerance = 132
|
||||
finalityDuration = 24 * time.Hour
|
||||
|
||||
@@ -48,16 +48,13 @@ type Config struct {
|
||||
// to policy.
|
||||
Policy Policy
|
||||
|
||||
// CalcSequenceLockNoLock defines the function to use in order to generate
|
||||
// the current sequence lock for the given transaction using the passed
|
||||
// utxo set.
|
||||
CalcSequenceLockNoLock func(*util.Tx, blockdag.UTXOSet) (*blockdag.SequenceLock, error)
|
||||
|
||||
// SigCache defines a signature cache to use.
|
||||
SigCache *txscript.SigCache
|
||||
|
||||
// DAG is the BlockDAG we want to use (mainly for UTXO checks)
|
||||
DAG *blockdag.BlockDAG
|
||||
|
||||
CalcTxSequenceLockFromReferencedUTXOEntries func(tx *util.Tx, referencedUTXOEntries []*blockdag.UTXOEntry) (*blockdag.SequenceLock, error)
|
||||
}
|
||||
|
||||
// Policy houses the policy (configuration parameters) which is used to
|
||||
@@ -92,7 +89,7 @@ type Policy struct {
|
||||
type TxDesc struct {
|
||||
mining.TxDesc
|
||||
|
||||
// depCount is not 0 for dependent transaction. Dependent transaction is
|
||||
// depCount is not 0 for a chained transaction. A chained transaction is
|
||||
// one that is accepted to pool, but cannot be mined in next block because it
|
||||
// depends on outputs of accepted, but still not mined transaction
|
||||
depCount int
|
||||
@@ -113,22 +110,24 @@ type TxPool struct {
|
||||
// The following variables must only be used atomically.
|
||||
lastUpdated int64 // last time pool was updated
|
||||
|
||||
mtx sync.RWMutex
|
||||
cfg Config
|
||||
pool map[daghash.TxID]*TxDesc
|
||||
depends map[daghash.TxID]*TxDesc
|
||||
dependsByPrev map[appmessage.Outpoint]map[daghash.TxID]*TxDesc
|
||||
mtx sync.RWMutex
|
||||
cfg Config
|
||||
|
||||
pool map[daghash.TxID]*TxDesc
|
||||
|
||||
chainedTransactions map[daghash.TxID]*TxDesc
|
||||
chainedTransactionByPreviousOutpoint map[appmessage.Outpoint]*TxDesc
|
||||
|
||||
orphans map[daghash.TxID]*orphanTx
|
||||
orphansByPrev map[appmessage.Outpoint]map[daghash.TxID]*util.Tx
|
||||
outpoints map[appmessage.Outpoint]*util.Tx
|
||||
|
||||
mempoolUTXOSet *mempoolUTXOSet
|
||||
|
||||
// nextExpireScan is the time after which the orphan pool will be
|
||||
// scanned in order to evict orphans. This is NOT a hard deadline as
|
||||
// the scan will only run when an orphan is added to the pool as opposed
|
||||
// to on an unconditional timer.
|
||||
nextExpireScan mstime.Time
|
||||
|
||||
mpUTXOSet blockdag.UTXOSet
|
||||
}
|
||||
|
||||
// Ensure the TxPool type implements the mining.TxSource interface.
|
||||
@@ -341,7 +340,7 @@ func (mp *TxPool) IsTransactionInPool(hash *daghash.TxID) bool {
|
||||
//
|
||||
// This function MUST be called with the mempool lock held (for reads).
|
||||
func (mp *TxPool) isInDependPool(hash *daghash.TxID) bool {
|
||||
if _, exists := mp.depends[*hash]; exists {
|
||||
if _, exists := mp.chainedTransactions[*hash]; exists {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -405,221 +404,129 @@ func (mp *TxPool) HaveTransaction(txID *daghash.TxID) bool {
|
||||
return haveTx
|
||||
}
|
||||
|
||||
// removeTransactions is the internal function which implements the public
|
||||
// RemoveTransactions. See the comment for RemoveTransactions for more details.
|
||||
//
|
||||
// This method, in contrast to removeTransaction (singular), creates one utxoDiff
|
||||
// and calls removeTransactionWithDiff on it for every transaction. This is an
|
||||
// optimization to save us a good amount of allocations (specifically in
|
||||
// UTXODiff.WithDiff) every time we accept a block.
|
||||
// removeBlockTransactionsFromPool removes the transactions that are found in the block
|
||||
// from the mempool, and move their chained mempool transactions (if any) to the main pool.
|
||||
//
|
||||
// This function MUST be called with the mempool lock held (for writes).
|
||||
func (mp *TxPool) removeTransactions(txs []*util.Tx) error {
|
||||
diff := blockdag.NewUTXODiff()
|
||||
|
||||
for _, tx := range txs {
|
||||
func (mp *TxPool) removeBlockTransactionsFromPool(block *util.Block) error {
|
||||
for _, tx := range block.Transactions()[util.CoinbaseTransactionIndex+1:] {
|
||||
txID := tx.ID()
|
||||
|
||||
if _, exists := mp.fetchTxDesc(txID); !exists {
|
||||
continue
|
||||
}
|
||||
|
||||
err := mp.removeTransactionWithDiff(tx, diff, false)
|
||||
err := mp.cleanTransactionFromSets(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mp.updateBlockTransactionChainedTransactions(tx)
|
||||
}
|
||||
|
||||
var err error
|
||||
mp.mpUTXOSet, err = mp.mpUTXOSet.WithDiff(diff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
atomic.StoreInt64(&mp.lastUpdated, mstime.Now().UnixMilliseconds())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeTransaction is the internal function which implements the public
|
||||
// RemoveTransaction. See the comment for RemoveTransaction for more details.
|
||||
// removeTransactionAndItsChainedTransactions removes a transaction and all of its chained transaction from the mempool.
|
||||
//
|
||||
// This function MUST be called with the mempool lock held (for writes).
|
||||
func (mp *TxPool) removeTransaction(tx *util.Tx, removeDependants bool, restoreInputs bool) error {
|
||||
func (mp *TxPool) removeTransactionAndItsChainedTransactions(tx *util.Tx) error {
|
||||
txID := tx.ID()
|
||||
if removeDependants {
|
||||
// Remove any transactions which rely on this one.
|
||||
for i := uint32(0); i < uint32(len(tx.MsgTx().TxOut)); i++ {
|
||||
prevOut := appmessage.Outpoint{TxID: *txID, Index: i}
|
||||
if txRedeemer, exists := mp.outpoints[prevOut]; exists {
|
||||
err := mp.removeTransaction(txRedeemer, true, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, exists := mp.fetchTxDesc(txID); !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
diff := blockdag.NewUTXODiff()
|
||||
err := mp.removeTransactionWithDiff(tx, diff, restoreInputs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mp.mpUTXOSet, err = mp.mpUTXOSet.WithDiff(diff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
atomic.StoreInt64(&mp.lastUpdated, mstime.Now().UnixMilliseconds())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeTransactionWithDiff removes the transaction tx from the mempool while
|
||||
// updating the UTXODiff diff with appropriate changes. diff is later meant to
|
||||
// be withDiff'd against the mempool UTXOSet to update it.
|
||||
//
|
||||
// This method assumes that tx exists in the mempool.
|
||||
func (mp *TxPool) removeTransactionWithDiff(tx *util.Tx, diff *blockdag.UTXODiff, restoreInputs bool) error {
|
||||
txID := tx.ID()
|
||||
|
||||
err := mp.removeTransactionUTXOEntriesFromDiff(tx, diff)
|
||||
if err != nil {
|
||||
return errors.Errorf("could not remove UTXOEntry from diff: %s", err)
|
||||
}
|
||||
|
||||
err = mp.markTransactionOutputsUnspent(tx, diff, restoreInputs)
|
||||
if err != nil {
|
||||
return errors.Errorf("could not mark transaction output as unspent: %s", err)
|
||||
}
|
||||
|
||||
txDesc, _ := mp.fetchTxDesc(txID)
|
||||
if txDesc.depCount == 0 {
|
||||
delete(mp.pool, *txID)
|
||||
} else {
|
||||
delete(mp.depends, *txID)
|
||||
}
|
||||
|
||||
mp.processRemovedTransactionDependencies(tx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeTransactionUTXOEntriesFromDiff removes tx's UTXOEntries from the diff
|
||||
func (mp *TxPool) removeTransactionUTXOEntriesFromDiff(tx *util.Tx, diff *blockdag.UTXODiff) error {
|
||||
for idx := range tx.MsgTx().TxOut {
|
||||
outpoint := *appmessage.NewOutpoint(tx.ID(), uint32(idx))
|
||||
entry, exists := mp.mpUTXOSet.Get(outpoint)
|
||||
if exists {
|
||||
err := diff.RemoveEntry(outpoint, entry)
|
||||
// Remove any transactions which rely on this one.
|
||||
for i := uint32(0); i < uint32(len(tx.MsgTx().TxOut)); i++ {
|
||||
prevOut := appmessage.Outpoint{TxID: *txID, Index: i}
|
||||
if txRedeemer, exists := mp.mempoolUTXOSet.poolTransactionBySpendingOutpoint(prevOut); exists {
|
||||
err := mp.removeTransactionAndItsChainedTransactions(txRedeemer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// markTransactionOutputsUnspent updates the mempool so that tx's TXOs are unspent
|
||||
// Iff restoreInputs is true then the inputs are restored back into the supplied diff
|
||||
func (mp *TxPool) markTransactionOutputsUnspent(tx *util.Tx, diff *blockdag.UTXODiff, restoreInputs bool) error {
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
if restoreInputs {
|
||||
if prevTxDesc, exists := mp.pool[txIn.PreviousOutpoint.TxID]; exists {
|
||||
prevOut := prevTxDesc.Tx.MsgTx().TxOut[txIn.PreviousOutpoint.Index]
|
||||
entry := blockdag.NewUTXOEntry(prevOut, false, blockdag.UnacceptedBlueScore)
|
||||
err := diff.AddEntry(txIn.PreviousOutpoint, entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if prevTxDesc, exists := mp.depends[txIn.PreviousOutpoint.TxID]; exists {
|
||||
prevOut := prevTxDesc.Tx.MsgTx().TxOut[txIn.PreviousOutpoint.Index]
|
||||
entry := blockdag.NewUTXOEntry(prevOut, false, blockdag.UnacceptedBlueScore)
|
||||
err := diff.AddEntry(txIn.PreviousOutpoint, entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(mp.outpoints, txIn.PreviousOutpoint)
|
||||
if _, exists := mp.chainedTransactions[*tx.ID()]; exists {
|
||||
mp.removeChainTransaction(tx)
|
||||
}
|
||||
|
||||
err := mp.cleanTransactionFromSets(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&mp.lastUpdated, mstime.Now().UnixMilliseconds())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// processRemovedTransactionDependencies processes the dependencies of a
|
||||
// transaction tx that was just now removed from the mempool
|
||||
func (mp *TxPool) processRemovedTransactionDependencies(tx *util.Tx) {
|
||||
// cleanTransactionFromSets removes the transaction from all mempool related transaction sets.
|
||||
// It assumes that any chained transaction is already cleaned from the mempool.
|
||||
//
|
||||
// This function MUST be called with the mempool lock held (for writes).
|
||||
func (mp *TxPool) cleanTransactionFromSets(tx *util.Tx) error {
|
||||
err := mp.mempoolUTXOSet.removeTx(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
txID := *tx.ID()
|
||||
delete(mp.pool, txID)
|
||||
delete(mp.chainedTransactions, txID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBlockTransactionChainedTransactions processes the dependencies of a
|
||||
// transaction that was included in a block and was just now removed from the mempool.
|
||||
//
|
||||
// This function MUST be called with the mempool lock held (for writes).
|
||||
|
||||
func (mp *TxPool) updateBlockTransactionChainedTransactions(tx *util.Tx) {
|
||||
prevOut := appmessage.Outpoint{TxID: *tx.ID()}
|
||||
for txOutIdx := range tx.MsgTx().TxOut {
|
||||
// Skip to the next available output if there are none.
|
||||
prevOut.Index = uint32(txOutIdx)
|
||||
depends, exists := mp.dependsByPrev[prevOut]
|
||||
txDesc, exists := mp.chainedTransactionByPreviousOutpoint[prevOut]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
|
||||
// Move independent transactions into main pool
|
||||
for _, txD := range depends {
|
||||
txD.depCount--
|
||||
if txD.depCount == 0 {
|
||||
// Transaction may be already removed by recursive calls, if removeRedeemers is true.
|
||||
// So avoid moving it into main pool
|
||||
if _, ok := mp.depends[*txD.Tx.ID()]; ok {
|
||||
delete(mp.depends, *txD.Tx.ID())
|
||||
mp.pool[*txD.Tx.ID()] = txD
|
||||
}
|
||||
txDesc.depCount--
|
||||
// If the transaction is not chained anymore, move it into the main pool
|
||||
if txDesc.depCount == 0 {
|
||||
// Transaction may be already removed by recursive calls, if removeRedeemers is true.
|
||||
// So avoid moving it into main pool
|
||||
if _, ok := mp.chainedTransactions[*txDesc.Tx.ID()]; ok {
|
||||
delete(mp.chainedTransactions, *txDesc.Tx.ID())
|
||||
mp.pool[*txDesc.Tx.ID()] = txDesc
|
||||
}
|
||||
}
|
||||
delete(mp.dependsByPrev, prevOut)
|
||||
delete(mp.chainedTransactionByPreviousOutpoint, prevOut)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveTransaction removes the passed transaction from the mempool. When the
|
||||
// removeDependants flag is set, any transactions that depend on the removed
|
||||
// transaction (that is to say, redeem outputs from it) will also be removed
|
||||
// recursively from the mempool, as they would otherwise become orphans.
|
||||
// removeChainTransaction removes a chain transaction and all of its relation as a result of double spend.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (mp *TxPool) RemoveTransaction(tx *util.Tx, removeDependants bool, restoreInputs bool) error {
|
||||
// Protect concurrent access.
|
||||
mp.mtx.Lock()
|
||||
defer mp.mtx.Unlock()
|
||||
return mp.removeTransaction(tx, removeDependants, restoreInputs)
|
||||
// This function MUST be called with the mempool lock held (for writes).
|
||||
func (mp *TxPool) removeChainTransaction(tx *util.Tx) {
|
||||
delete(mp.chainedTransactions, *tx.ID())
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
delete(mp.chainedTransactionByPreviousOutpoint, txIn.PreviousOutpoint)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveTransactions removes the passed transactions from the mempool.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (mp *TxPool) RemoveTransactions(txs []*util.Tx) error {
|
||||
// Protect concurrent access.
|
||||
mp.mtx.Lock()
|
||||
defer mp.mtx.Unlock()
|
||||
return mp.removeTransactions(txs)
|
||||
}
|
||||
|
||||
// RemoveDoubleSpends removes all transactions which spend outputs spent by the
|
||||
// removeDoubleSpends removes all transactions which spend outputs spent by the
|
||||
// passed transaction from the memory pool. Removing those transactions then
|
||||
// leads to removing all transactions which rely on them, recursively. This is
|
||||
// necessary when a block is connected to the DAG because the block may
|
||||
// contain transactions which were previously unknown to the memory pool.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (mp *TxPool) RemoveDoubleSpends(tx *util.Tx) error {
|
||||
// Protect concurrent access.
|
||||
mp.mtx.Lock()
|
||||
defer mp.mtx.Unlock()
|
||||
return mp.removeDoubleSpends(tx)
|
||||
}
|
||||
|
||||
// This function MUST be called with the mempool lock held (for writes).
|
||||
func (mp *TxPool) removeDoubleSpends(tx *util.Tx) error {
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
if txRedeemer, ok := mp.outpoints[txIn.PreviousOutpoint]; ok {
|
||||
if txRedeemer, ok := mp.mempoolUTXOSet.poolTransactionBySpendingOutpoint(txIn.PreviousOutpoint); ok {
|
||||
if !txRedeemer.ID().IsEqual(tx.ID()) {
|
||||
err := mp.removeTransaction(txRedeemer, true, false)
|
||||
err := mp.removeTransactionAndItsChainedTransactions(txRedeemer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -634,13 +541,9 @@ func (mp *TxPool) removeDoubleSpends(tx *util.Tx) error {
|
||||
// helper for maybeAcceptTransaction.
|
||||
//
|
||||
// This function MUST be called with the mempool lock held (for writes).
|
||||
func (mp *TxPool) addTransaction(tx *util.Tx, fee uint64, parentsInPool []*appmessage.Outpoint) (*TxDesc, error) {
|
||||
func (mp *TxPool) addTransaction(tx *util.Tx, mass uint64, fee uint64, parentsInPool []*appmessage.Outpoint) (*TxDesc, error) {
|
||||
// Add the transaction to the pool and mark the referenced outpoints
|
||||
// as spent by the pool.
|
||||
mass, err := blockdag.CalcTxMassFromUTXOSet(tx, mp.mpUTXOSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txD := &TxDesc{
|
||||
TxDesc: mining.TxDesc{
|
||||
Tx: tx,
|
||||
@@ -654,23 +557,17 @@ func (mp *TxPool) addTransaction(tx *util.Tx, fee uint64, parentsInPool []*appme
|
||||
if len(parentsInPool) == 0 {
|
||||
mp.pool[*tx.ID()] = txD
|
||||
} else {
|
||||
mp.depends[*tx.ID()] = txD
|
||||
mp.chainedTransactions[*tx.ID()] = txD
|
||||
for _, previousOutpoint := range parentsInPool {
|
||||
if _, exists := mp.dependsByPrev[*previousOutpoint]; !exists {
|
||||
mp.dependsByPrev[*previousOutpoint] = make(map[daghash.TxID]*TxDesc)
|
||||
}
|
||||
mp.dependsByPrev[*previousOutpoint][*tx.ID()] = txD
|
||||
mp.chainedTransactionByPreviousOutpoint[*previousOutpoint] = txD
|
||||
}
|
||||
}
|
||||
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
mp.outpoints[txIn.PreviousOutpoint] = tx
|
||||
}
|
||||
if isAccepted, err := mp.mpUTXOSet.AddTx(tx.MsgTx(), blockdag.UnacceptedBlueScore); err != nil {
|
||||
err := mp.mempoolUTXOSet.addTx(tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !isAccepted {
|
||||
return nil, errors.Errorf("unexpectedly failed to add tx %s to the mempool utxo set", tx.ID())
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&mp.lastUpdated, mstime.Now().UnixMilliseconds())
|
||||
|
||||
return txD, nil
|
||||
@@ -684,7 +581,7 @@ func (mp *TxPool) addTransaction(tx *util.Tx, fee uint64, parentsInPool []*appme
|
||||
// This function MUST be called with the mempool lock held (for reads).
|
||||
func (mp *TxPool) checkPoolDoubleSpend(tx *util.Tx) error {
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
if txR, exists := mp.outpoints[txIn.PreviousOutpoint]; exists {
|
||||
if txR, exists := mp.mempoolUTXOSet.poolTransactionBySpendingOutpoint(txIn.PreviousOutpoint); exists {
|
||||
str := fmt.Sprintf("output %s already spent by "+
|
||||
"transaction %s in the memory pool",
|
||||
txIn.PreviousOutpoint, txR.ID())
|
||||
@@ -695,22 +592,11 @@ func (mp *TxPool) checkPoolDoubleSpend(tx *util.Tx) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckSpend checks whether the passed outpoint is already spent by a
|
||||
// transaction in the mempool. If that's the case the spending transaction will
|
||||
// be returned, if not nil will be returned.
|
||||
func (mp *TxPool) CheckSpend(op appmessage.Outpoint) *util.Tx {
|
||||
mp.mtx.RLock()
|
||||
defer mp.mtx.RUnlock()
|
||||
txR := mp.outpoints[op]
|
||||
|
||||
return txR
|
||||
}
|
||||
|
||||
// This function MUST be called with the mempool lock held (for reads).
|
||||
func (mp *TxPool) fetchTxDesc(txID *daghash.TxID) (*TxDesc, bool) {
|
||||
txDesc, exists := mp.pool[*txID]
|
||||
if !exists {
|
||||
txDesc, exists = mp.depends[*txID]
|
||||
txDesc, exists = mp.chainedTransactions[*txID]
|
||||
}
|
||||
return txDesc, exists
|
||||
}
|
||||
@@ -755,7 +641,7 @@ func checkTransactionMassSanity(tx *util.Tx) error {
|
||||
serializedTxSize := tx.MsgTx().SerializeSize()
|
||||
if serializedTxSize*blockdag.MassPerTxByte > appmessage.MaxMassPerTx {
|
||||
str := fmt.Sprintf("serialized transaction is too big - got "+
|
||||
"%d, max %d", serializedTxSize, appmessage.MaxMassPerBlock)
|
||||
"%d, max %d", serializedTxSize, appmessage.MaxMassAcceptedByBlock)
|
||||
return txRuleError(RejectInvalid, str)
|
||||
}
|
||||
return nil
|
||||
@@ -885,7 +771,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, rejectDupOrphans bool) ([]
|
||||
prevOut := appmessage.Outpoint{TxID: *txID}
|
||||
for txOutIdx := range tx.MsgTx().TxOut {
|
||||
prevOut.Index = uint32(txOutIdx)
|
||||
_, ok := mp.mpUTXOSet.Get(prevOut)
|
||||
_, _, ok := mp.mempoolUTXOSet.utxoEntryByOutpoint(prevOut)
|
||||
if ok {
|
||||
return nil, nil, txRuleError(RejectDuplicate,
|
||||
"transaction already exists")
|
||||
@@ -896,21 +782,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, rejectDupOrphans bool) ([]
|
||||
// don't exist or are already spent. Adding orphans to the orphan pool
|
||||
// is not handled by this function, and the caller should use
|
||||
// maybeAddOrphan if this behavior is desired.
|
||||
var missingParents []*daghash.TxID
|
||||
var parentsInPool []*appmessage.Outpoint
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
if _, ok := mp.mpUTXOSet.Get(txIn.PreviousOutpoint); !ok {
|
||||
// Must make a copy of the hash here since the iterator
|
||||
// is replaced and taking its address directly would
|
||||
// result in all of the entries pointing to the same
|
||||
// memory location and thus all be the final hash.
|
||||
txIDCopy := txIn.PreviousOutpoint.TxID
|
||||
missingParents = append(missingParents, &txIDCopy)
|
||||
}
|
||||
if mp.isTransactionInPool(&txIn.PreviousOutpoint.TxID) {
|
||||
parentsInPool = append(parentsInPool, &txIn.PreviousOutpoint)
|
||||
}
|
||||
}
|
||||
spentUTXOEntries, parentsInPool, missingParents := mp.mempoolUTXOSet.transactionRelatedUTXOEntries(tx)
|
||||
if len(missingParents) > 0 {
|
||||
return missingParents, nil, nil
|
||||
}
|
||||
@@ -918,7 +790,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, rejectDupOrphans bool) ([]
|
||||
// Don't allow the transaction into the mempool unless its sequence
|
||||
// lock is active, meaning that it'll be allowed into the next block
|
||||
// with respect to its defined relative lock times.
|
||||
sequenceLock, err := mp.cfg.CalcSequenceLockNoLock(tx, mp.mpUTXOSet)
|
||||
sequenceLock, err := mp.cfg.CalcTxSequenceLockFromReferencedUTXOEntries(tx, spentUTXOEntries)
|
||||
if err != nil {
|
||||
var dagRuleErr blockdag.RuleError
|
||||
if ok := errors.As(err, &dagRuleErr); ok {
|
||||
@@ -934,7 +806,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, rejectDupOrphans bool) ([]
|
||||
|
||||
// Don't allow transactions that exceed the maximum allowed
|
||||
// transaction mass.
|
||||
err = blockdag.ValidateTxMass(tx, mp.mpUTXOSet)
|
||||
mass, err := blockdag.ValidateTxMass(tx, spentUTXOEntries)
|
||||
if err != nil {
|
||||
var ruleError blockdag.RuleError
|
||||
if ok := errors.As(err, &ruleError); ok {
|
||||
@@ -948,7 +820,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, rejectDupOrphans bool) ([]
|
||||
// Also returns the fees associated with the transaction which will be
|
||||
// used later.
|
||||
txFee, err := blockdag.CheckTransactionInputsAndCalulateFee(tx, nextBlockBlueScore,
|
||||
mp.mpUTXOSet, mp.cfg.DAG.Params, false)
|
||||
spentUTXOEntries, mp.cfg.DAG.Params, false)
|
||||
if err != nil {
|
||||
var dagRuleErr blockdag.RuleError
|
||||
if ok := errors.As(err, &dagRuleErr); ok {
|
||||
@@ -960,7 +832,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, rejectDupOrphans bool) ([]
|
||||
// Don't allow transactions with non-standard inputs if the network
|
||||
// parameters forbid their acceptance.
|
||||
if !mp.cfg.Policy.AcceptNonStd {
|
||||
err := checkInputsStandard(tx, mp.mpUTXOSet)
|
||||
err := checkInputsStandard(tx, spentUTXOEntries)
|
||||
if err != nil {
|
||||
// Attempt to extract a reject code from the error so
|
||||
// it can be retained. When not possible, fall back to
|
||||
@@ -1008,7 +880,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, rejectDupOrphans bool) ([]
|
||||
|
||||
// Verify crypto signatures for each input and reject the transaction if
|
||||
// any don't verify.
|
||||
err = blockdag.ValidateTransactionScripts(tx, mp.mpUTXOSet,
|
||||
err = blockdag.ValidateTransactionScripts(tx, spentUTXOEntries,
|
||||
txscript.StandardVerifyFlags, mp.cfg.SigCache)
|
||||
if err != nil {
|
||||
var dagRuleErr blockdag.RuleError
|
||||
@@ -1019,7 +891,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, rejectDupOrphans bool) ([]
|
||||
}
|
||||
|
||||
// Add to transaction pool.
|
||||
txD, err := mp.addTransaction(tx, txFee, parentsInPool)
|
||||
txDesc, err := mp.addTransaction(tx, mass, txFee, parentsInPool)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -1027,7 +899,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, rejectDupOrphans bool) ([]
|
||||
log.Debugf("Accepted transaction %s (pool size: %d)", txID,
|
||||
len(mp.pool))
|
||||
|
||||
return nil, txD, nil
|
||||
return nil, txDesc, nil
|
||||
}
|
||||
|
||||
// processOrphans is the internal function which implements the public
|
||||
@@ -1124,8 +996,6 @@ func (mp *TxPool) processOrphans(acceptedTx *util.Tx) []*TxDesc {
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (mp *TxPool) ProcessOrphans(acceptedTx *util.Tx) []*TxDesc {
|
||||
mp.cfg.DAG.RLock()
|
||||
defer mp.cfg.DAG.RUnlock()
|
||||
mp.mtx.Lock()
|
||||
defer mp.mtx.Unlock()
|
||||
acceptedTxns := mp.processOrphans(acceptedTx)
|
||||
@@ -1148,8 +1018,6 @@ func (mp *TxPool) ProcessTransaction(tx *util.Tx, allowOrphan bool) ([]*TxDesc,
|
||||
log.Tracef("Processing transaction %s", tx.ID())
|
||||
|
||||
// Protect concurrent access.
|
||||
mp.cfg.DAG.RLock()
|
||||
defer mp.cfg.DAG.RUnlock()
|
||||
mp.mtx.Lock()
|
||||
defer mp.mtx.Unlock()
|
||||
|
||||
@@ -1210,14 +1078,14 @@ func (mp *TxPool) Count() int {
|
||||
return count
|
||||
}
|
||||
|
||||
// DepCount returns the number of dependent transactions in the main pool. It does not
|
||||
// ChainedCount returns the number of chained transactions in the mempool. It does not
|
||||
// include the orphan pool.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (mp *TxPool) DepCount() int {
|
||||
func (mp *TxPool) ChainedCount() int {
|
||||
mp.mtx.RLock()
|
||||
defer mp.mtx.RUnlock()
|
||||
return len(mp.depends)
|
||||
return len(mp.chainedTransactions)
|
||||
}
|
||||
|
||||
// TxIDs returns a slice of IDs for all of the transactions in the memory
|
||||
@@ -1287,13 +1155,9 @@ func (mp *TxPool) LastUpdated() mstime.Time {
|
||||
// transaction that is already in the DAG
|
||||
func (mp *TxPool) HandleNewBlock(block *util.Block) ([]*util.Tx, error) {
|
||||
// Protect concurrent access.
|
||||
mp.cfg.DAG.RLock()
|
||||
defer mp.cfg.DAG.RUnlock()
|
||||
mp.mtx.Lock()
|
||||
defer mp.mtx.Unlock()
|
||||
|
||||
oldUTXOSet := mp.mpUTXOSet
|
||||
|
||||
// Remove all of the transactions (except the coinbase) in the
|
||||
// connected block from the transaction pool. Secondly, remove any
|
||||
// transactions which are now double spends as a result of these
|
||||
@@ -1301,9 +1165,8 @@ func (mp *TxPool) HandleNewBlock(block *util.Block) ([]*util.Tx, error) {
|
||||
// no longer an orphan. Transactions which depend on a confirmed
|
||||
// transaction are NOT removed recursively because they are still
|
||||
// valid.
|
||||
err := mp.removeTransactions(block.Transactions()[util.CoinbaseTransactionIndex+1:])
|
||||
err := mp.removeBlockTransactionsFromPool(block)
|
||||
if err != nil {
|
||||
mp.mpUTXOSet = oldUTXOSet
|
||||
return nil, err
|
||||
}
|
||||
acceptedTxs := make([]*util.Tx, 0)
|
||||
@@ -1324,17 +1187,14 @@ func (mp *TxPool) HandleNewBlock(block *util.Block) ([]*util.Tx, error) {
|
||||
// New returns a new memory pool for validating and storing standalone
|
||||
// transactions until they are mined into a block.
|
||||
func New(cfg *Config) *TxPool {
|
||||
virtualUTXO := cfg.DAG.UTXOSet()
|
||||
mpUTXO := blockdag.NewDiffUTXOSet(virtualUTXO, blockdag.NewUTXODiff())
|
||||
return &TxPool{
|
||||
cfg: *cfg,
|
||||
pool: make(map[daghash.TxID]*TxDesc),
|
||||
depends: make(map[daghash.TxID]*TxDesc),
|
||||
dependsByPrev: make(map[appmessage.Outpoint]map[daghash.TxID]*TxDesc),
|
||||
orphans: make(map[daghash.TxID]*orphanTx),
|
||||
orphansByPrev: make(map[appmessage.Outpoint]map[daghash.TxID]*util.Tx),
|
||||
nextExpireScan: mstime.Now().Add(orphanExpireScanInterval),
|
||||
outpoints: make(map[appmessage.Outpoint]*util.Tx),
|
||||
mpUTXOSet: mpUTXO,
|
||||
cfg: *cfg,
|
||||
pool: make(map[daghash.TxID]*TxDesc),
|
||||
chainedTransactions: make(map[daghash.TxID]*TxDesc),
|
||||
chainedTransactionByPreviousOutpoint: make(map[appmessage.Outpoint]*TxDesc),
|
||||
orphans: make(map[daghash.TxID]*orphanTx),
|
||||
orphansByPrev: make(map[appmessage.Outpoint]map[daghash.TxID]*util.Tx),
|
||||
nextExpireScan: mstime.Now().Add(orphanExpireScanInterval),
|
||||
mempoolUTXOSet: newMempoolUTXOSet(cfg.DAG),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/blockdag"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domain/mining"
|
||||
"github.com/kaspanet/kaspad/domain/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
@@ -70,8 +69,7 @@ func (s *fakeDAG) SetMedianTimePast(mtp mstime.Time) {
|
||||
s.medianTimePast = mtp
|
||||
}
|
||||
|
||||
func calcSequenceLock(tx *util.Tx,
|
||||
utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
|
||||
func calcTxSequenceLockFromReferencedUTXOEntries(tx *util.Tx, referencedUTXOEntries []*blockdag.UTXOEntry) (*blockdag.SequenceLock, error) {
|
||||
|
||||
return &blockdag.SequenceLock{
|
||||
Milliseconds: -1,
|
||||
@@ -256,7 +254,8 @@ func (tc *testContext) mineTransactions(transactions []*util.Tx, numberOfBlocks
|
||||
if i == 0 {
|
||||
blockTxs = msgTxs
|
||||
}
|
||||
block, err := mining.PrepareBlockForTest(tc.harness.txPool.cfg.DAG, tc.harness.txPool.cfg.DAG.TipHashes(), blockTxs, false)
|
||||
block, err := blockdag.PrepareBlockForTest(
|
||||
tc.harness.txPool.cfg.DAG, tc.harness.txPool.cfg.DAG.VirtualParentHashes(), blockTxs)
|
||||
if err != nil {
|
||||
tc.t.Fatalf("PrepareBlockForTest: %s", err)
|
||||
}
|
||||
@@ -339,8 +338,8 @@ func newPoolHarness(t *testing.T, dagParams *dagconfig.Params, numOutputs uint32
|
||||
MinRelayTxFee: 1000, // 1 sompi per byte
|
||||
MaxTxVersion: 1,
|
||||
},
|
||||
CalcSequenceLockNoLock: calcSequenceLock,
|
||||
SigCache: nil,
|
||||
CalcTxSequenceLockFromReferencedUTXOEntries: calcTxSequenceLockFromReferencedUTXOEntries,
|
||||
SigCache: nil,
|
||||
}),
|
||||
}
|
||||
|
||||
@@ -646,10 +645,8 @@ func TestProcessTransaction(t *testing.T) {
|
||||
t.Fatalf("PayToAddrScript: unexpected error: %v", err)
|
||||
}
|
||||
p2shTx := util.NewTx(appmessage.NewNativeMsgTx(1, nil, []*appmessage.TxOut{{Value: 5000000000, ScriptPubKey: p2shScriptPubKey}}))
|
||||
if isAccepted, err := harness.txPool.mpUTXOSet.AddTx(p2shTx.MsgTx(), currentBlueScore+1); err != nil {
|
||||
if err := harness.txPool.mempoolUTXOSet.addTx(p2shTx); err != nil {
|
||||
t.Fatalf("AddTx unexpectedly failed. Error: %s", err)
|
||||
} else if !isAccepted {
|
||||
t.Fatalf("AddTx unexpectedly didn't add tx %s", p2shTx.ID())
|
||||
}
|
||||
|
||||
txIns := []*appmessage.TxIn{{
|
||||
@@ -691,8 +688,7 @@ func TestProcessTransaction(t *testing.T) {
|
||||
}
|
||||
|
||||
// Checks that transactions get rejected from mempool if sequence lock is not active
|
||||
harness.txPool.cfg.CalcSequenceLockNoLock = func(tx *util.Tx,
|
||||
view blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
|
||||
harness.txPool.cfg.CalcTxSequenceLockFromReferencedUTXOEntries = func(tx *util.Tx, referencedUTXOEntries []*blockdag.UTXOEntry) (*blockdag.SequenceLock, error) {
|
||||
|
||||
return &blockdag.SequenceLock{
|
||||
Milliseconds: math.MaxInt64,
|
||||
@@ -714,7 +710,7 @@ func TestProcessTransaction(t *testing.T) {
|
||||
if err.Error() != expectedErrStr {
|
||||
t.Errorf("Unexpected error message. Expected \"%s\" but got \"%s\"", expectedErrStr, err.Error())
|
||||
}
|
||||
harness.txPool.cfg.CalcSequenceLockNoLock = calcSequenceLock
|
||||
harness.txPool.cfg.CalcTxSequenceLockFromReferencedUTXOEntries = calcTxSequenceLockFromReferencedUTXOEntries
|
||||
|
||||
// Transaction should be rejected from mempool because it has low fee, and its priority is above mining.MinHighPriority
|
||||
tx, err = harness.createTx(spendableOuts[4], 0, 1000)
|
||||
@@ -796,7 +792,7 @@ func TestDoubleSpends(t *testing.T) {
|
||||
|
||||
// Then we assume tx3 is already in the DAG, so we need to remove
|
||||
// transactions that spends the same outpoints from the mempool
|
||||
harness.txPool.RemoveDoubleSpends(tx3)
|
||||
harness.txPool.removeDoubleSpends(tx3)
|
||||
// Ensures that only the transaction that double spends the same
|
||||
// funds as tx3 is removed, and the other one remains unaffected
|
||||
testPoolMembership(tc, tx1, false, false, false)
|
||||
@@ -817,7 +813,7 @@ func TestDoubleSpendsFromDAG(t *testing.T) {
|
||||
}
|
||||
|
||||
dag := harness.txPool.cfg.DAG
|
||||
blockdag.PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), []*appmessage.MsgTx{tx.MsgTx()})
|
||||
blockdag.PrepareAndProcessBlockForTest(t, dag, dag.VirtualParentHashes(), []*appmessage.MsgTx{tx.MsgTx()})
|
||||
|
||||
// Check that a transaction that double spends the DAG UTXO set is orphaned.
|
||||
doubleSpendTx, err := harness.createTx(spendableOuts[0], uint64(txRelayFeeForTest), 2)
|
||||
@@ -1132,10 +1128,10 @@ func TestRemoveTransaction(t *testing.T) {
|
||||
testPoolMembership(tc, chainedTxns[3], false, true, true)
|
||||
testPoolMembership(tc, chainedTxns[4], false, true, true)
|
||||
|
||||
// Checks that when removeRedeemers is true, all of the transaction that are dependent on it get removed
|
||||
err = harness.txPool.RemoveTransaction(chainedTxns[1], true, true)
|
||||
// Checks that all of the transaction that are dependent on it get removed
|
||||
err = harness.txPool.removeTransactionAndItsChainedTransactions(chainedTxns[1])
|
||||
if err != nil {
|
||||
t.Fatalf("RemoveTransaction: %v", err)
|
||||
t.Fatalf("removeTransactionAndItsChainedTransactions: %v", err)
|
||||
}
|
||||
testPoolMembership(tc, chainedTxns[1], false, false, false)
|
||||
testPoolMembership(tc, chainedTxns[2], false, false, false)
|
||||
@@ -1429,9 +1425,9 @@ func TestMultiInputOrphanDoubleSpend(t *testing.T) {
|
||||
testPoolMembership(tc, doubleSpendTx, false, false, false)
|
||||
}
|
||||
|
||||
// TestCheckSpend tests that CheckSpend returns the expected spends found in
|
||||
// TestPoolTransactionBySpendingOutpoint tests that poolTransactionBySpendingOutpoint returns the expected spends found in
|
||||
// the mempool.
|
||||
func TestCheckSpend(t *testing.T) {
|
||||
func TestPoolTransactionBySpendingOutpoint(t *testing.T) {
|
||||
tc, outputs, teardownFunc, err := newPoolHarness(t, &dagconfig.SimnetParams, 1, "TestCheckSpend")
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create test pool: %v", err)
|
||||
@@ -1442,8 +1438,8 @@ func TestCheckSpend(t *testing.T) {
|
||||
// The mempool is empty, so none of the spendable outputs should have a
|
||||
// spend there.
|
||||
for _, op := range outputs {
|
||||
spend := harness.txPool.CheckSpend(op.outpoint)
|
||||
if spend != nil {
|
||||
spend, ok := harness.txPool.mempoolUTXOSet.poolTransactionBySpendingOutpoint(op.outpoint)
|
||||
if ok {
|
||||
t.Fatalf("Unexpeced spend found in pool: %v", spend)
|
||||
}
|
||||
}
|
||||
@@ -1466,7 +1462,7 @@ func TestCheckSpend(t *testing.T) {
|
||||
// The first tx in the chain should be the spend of the spendable
|
||||
// output.
|
||||
op := outputs[0].outpoint
|
||||
spend := harness.txPool.CheckSpend(op)
|
||||
spend, _ := harness.txPool.mempoolUTXOSet.poolTransactionBySpendingOutpoint(op)
|
||||
if spend != chainedTxns[0] {
|
||||
t.Fatalf("expected %v to be spent by %v, instead "+
|
||||
"got %v", op, chainedTxns[0], spend)
|
||||
@@ -1479,7 +1475,7 @@ func TestCheckSpend(t *testing.T) {
|
||||
Index: 0,
|
||||
}
|
||||
expSpend := chainedTxns[i+1]
|
||||
spend = harness.txPool.CheckSpend(op)
|
||||
spend, _ = harness.txPool.mempoolUTXOSet.poolTransactionBySpendingOutpoint(op)
|
||||
if spend != expSpend {
|
||||
t.Fatalf("expected %v to be spent by %v, instead "+
|
||||
"got %v", op, expSpend, spend)
|
||||
@@ -1491,7 +1487,7 @@ func TestCheckSpend(t *testing.T) {
|
||||
TxID: *chainedTxns[txChainLength-1].ID(),
|
||||
Index: 0,
|
||||
}
|
||||
spend = harness.txPool.CheckSpend(op)
|
||||
spend, _ = harness.txPool.mempoolUTXOSet.poolTransactionBySpendingOutpoint(op)
|
||||
if spend != nil {
|
||||
t.Fatalf("Unexpeced spend found in pool: %v", spend)
|
||||
}
|
||||
@@ -1518,16 +1514,21 @@ func TestCount(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("ProcessTransaction: unexpected error: %v", err)
|
||||
}
|
||||
if harness.txPool.Count()+harness.txPool.DepCount() != i+1 {
|
||||
if harness.txPool.Count()+harness.txPool.ChainedCount() != i+1 {
|
||||
t.Errorf("TestCount: txPool expected to have %v transactions but got %v", i+1, harness.txPool.Count())
|
||||
}
|
||||
}
|
||||
|
||||
err = harness.txPool.RemoveTransaction(chainedTxns[0], false, false)
|
||||
// Mimic a situation where the first transaction is found in a block
|
||||
fakeBlock := appmessage.NewMsgBlock(&appmessage.BlockHeader{})
|
||||
fakeCoinbase := &appmessage.MsgTx{}
|
||||
fakeBlock.AddTransaction(fakeCoinbase)
|
||||
fakeBlock.AddTransaction(chainedTxns[0].MsgTx())
|
||||
err = harness.txPool.removeBlockTransactionsFromPool(util.NewBlock(fakeBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("harness.CreateTxChain: unexpected error: %v", err)
|
||||
}
|
||||
if harness.txPool.Count()+harness.txPool.DepCount() != 2 {
|
||||
if harness.txPool.Count()+harness.txPool.ChainedCount() != 2 {
|
||||
t.Errorf("TestCount: txPool expected to have 2 transactions but got %v", harness.txPool.Count())
|
||||
}
|
||||
}
|
||||
@@ -1636,82 +1637,15 @@ func TestHandleNewBlock(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create transaction 1: %v", err)
|
||||
}
|
||||
dummyBlock.Transactions = append(dummyBlock.Transactions, blockTx1.MsgTx(), blockTx2.MsgTx())
|
||||
|
||||
// Create block and add its transactions to UTXO set
|
||||
block := util.NewBlock(&dummyBlock)
|
||||
for i, tx := range block.Transactions() {
|
||||
if isAccepted, err := harness.txPool.mpUTXOSet.AddTx(tx.MsgTx(), 1); err != nil {
|
||||
t.Fatalf("Failed to add transaction (%v,%v) to UTXO set: %v", i, tx.ID(), err)
|
||||
} else if !isAccepted {
|
||||
t.Fatalf("AddTx unexpectedly didn't add tx %s", tx.ID())
|
||||
}
|
||||
}
|
||||
block := blockdag.PrepareAndProcessBlockForTest(t, harness.txPool.cfg.DAG, harness.txPool.cfg.DAG.TipHashes(), []*appmessage.MsgTx{blockTx1.MsgTx(), blockTx2.MsgTx()})
|
||||
|
||||
// Handle new block by pool
|
||||
_, err = harness.txPool.HandleNewBlock(block)
|
||||
_, err = harness.txPool.HandleNewBlock(util.NewBlock(block))
|
||||
|
||||
// ensure that orphan transaction moved to main pool
|
||||
testPoolMembership(tc, orphanTx, false, true, false)
|
||||
}
|
||||
|
||||
// dummyBlock defines a block on the block DAG. It is used to test block operations.
|
||||
var dummyBlock = appmessage.MsgBlock{
|
||||
Header: appmessage.BlockHeader{
|
||||
Version: 1,
|
||||
ParentHashes: []*daghash.Hash{
|
||||
{
|
||||
0x82, 0xdc, 0xbd, 0xe6, 0x88, 0x37, 0x74, 0x5b,
|
||||
0x78, 0x6b, 0x03, 0x1d, 0xa3, 0x48, 0x3c, 0x45,
|
||||
0x3f, 0xc3, 0x2e, 0xd4, 0x53, 0x5b, 0x6f, 0x26,
|
||||
0x26, 0xb0, 0x48, 0x4f, 0x09, 0x00, 0x00, 0x00,
|
||||
}, // Mainnet genesis
|
||||
{
|
||||
0xc1, 0x5b, 0x71, 0xfe, 0x20, 0x70, 0x0f, 0xd0,
|
||||
0x08, 0x49, 0x88, 0x1b, 0x32, 0xb5, 0xbd, 0x13,
|
||||
0x17, 0xbe, 0x75, 0xe7, 0x29, 0x46, 0xdd, 0x03,
|
||||
0x01, 0x92, 0x90, 0xf1, 0xca, 0x8a, 0x88, 0x11,
|
||||
}}, // Simnet genesis
|
||||
HashMerkleRoot: &daghash.Hash{
|
||||
0x66, 0x57, 0xa9, 0x25, 0x2a, 0xac, 0xd5, 0xc0,
|
||||
0xb2, 0x94, 0x09, 0x96, 0xec, 0xff, 0x95, 0x22,
|
||||
0x28, 0xc3, 0x06, 0x7c, 0xc3, 0x8d, 0x48, 0x85,
|
||||
0xef, 0xb5, 0xa4, 0xac, 0x42, 0x47, 0xe9, 0xf3,
|
||||
}, // f3e94742aca4b5ef85488dc37c06c3282295ffec960994b2c0d5ac2a25a95766
|
||||
Timestamp: mstime.UnixMilliseconds(1529483563000), // 2018-06-20 08:32:43 +0000 UTC
|
||||
Bits: 0x1e00ffff, // 503382015
|
||||
Nonce: 0x000ae53f, // 714047
|
||||
},
|
||||
Transactions: []*appmessage.MsgTx{
|
||||
{
|
||||
Version: 1,
|
||||
TxIn: []*appmessage.TxIn{},
|
||||
TxOut: []*appmessage.TxOut{
|
||||
{
|
||||
Value: 0x12a05f200, // 5000000000
|
||||
ScriptPubKey: []byte{
|
||||
0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49,
|
||||
0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, 0xc7,
|
||||
0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87,
|
||||
},
|
||||
},
|
||||
},
|
||||
LockTime: 0,
|
||||
SubnetworkID: *subnetworkid.SubnetworkIDCoinbase,
|
||||
Payload: []byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00,
|
||||
},
|
||||
PayloadHash: &daghash.Hash{
|
||||
0x14, 0x06, 0xe0, 0x58, 0x81, 0xe2, 0x99, 0x36,
|
||||
0x77, 0x66, 0xd3, 0x13, 0xe2, 0x6c, 0x05, 0x56,
|
||||
0x4e, 0xc9, 0x1b, 0xf7, 0x21, 0xd3, 0x17, 0x26,
|
||||
0xbd, 0x6e, 0x46, 0xe6, 0x06, 0x89, 0x53, 0x9a,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestTransactionGas(t *testing.T) {
|
||||
params := dagconfig.SimnetParams
|
||||
params.BlockCoinbaseMaturity = 0
|
||||
|
||||
115
domain/mempool/mempool_utxoset.go
Normal file
115
domain/mempool/mempool_utxoset.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/blockdag"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func newMempoolUTXOSet(dag *blockdag.BlockDAG) *mempoolUTXOSet {
|
||||
return &mempoolUTXOSet{
|
||||
transactionByPreviousOutpoint: make(map[appmessage.Outpoint]*util.Tx),
|
||||
poolUnspentOutputs: make(map[appmessage.Outpoint]*blockdag.UTXOEntry),
|
||||
dag: dag,
|
||||
}
|
||||
}
|
||||
|
||||
type mempoolUTXOSet struct {
|
||||
transactionByPreviousOutpoint map[appmessage.Outpoint]*util.Tx
|
||||
poolUnspentOutputs map[appmessage.Outpoint]*blockdag.UTXOEntry
|
||||
dag *blockdag.BlockDAG
|
||||
}
|
||||
|
||||
func (mpus *mempoolUTXOSet) utxoEntryByOutpoint(outpoint appmessage.Outpoint) (entry *blockdag.UTXOEntry, isInPool bool, exists bool) {
|
||||
entry, exists = mpus.dag.GetUTXOEntry(outpoint)
|
||||
if !exists {
|
||||
entry, exists := mpus.poolUnspentOutputs[outpoint]
|
||||
if !exists {
|
||||
return nil, false, false
|
||||
}
|
||||
return entry, true, true
|
||||
}
|
||||
return entry, false, true
|
||||
}
|
||||
|
||||
// addTx adds a transaction to the mempool UTXO set. It assumes that it doesn't double spend another transaction
|
||||
// in the mempool, and that its outputs doesn't exist in the mempool UTXO set, and returns error otherwise.
|
||||
func (mpus *mempoolUTXOSet) addTx(tx *util.Tx) error {
|
||||
msgTx := tx.MsgTx()
|
||||
for _, txIn := range msgTx.TxIn {
|
||||
if existingTx, exists := mpus.transactionByPreviousOutpoint[txIn.PreviousOutpoint]; exists {
|
||||
return errors.Errorf("outpoint %s is already used by %s", txIn.PreviousOutpoint, existingTx.ID())
|
||||
}
|
||||
mpus.transactionByPreviousOutpoint[txIn.PreviousOutpoint] = tx
|
||||
}
|
||||
|
||||
for i, txOut := range msgTx.TxOut {
|
||||
outpoint := appmessage.NewOutpoint(tx.ID(), uint32(i))
|
||||
if _, exists := mpus.poolUnspentOutputs[*outpoint]; exists {
|
||||
return errors.Errorf("outpoint %s already exists", outpoint)
|
||||
}
|
||||
mpus.poolUnspentOutputs[*outpoint] = blockdag.NewUTXOEntry(txOut, false, blockdag.UnacceptedBlueScore)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeTx removes a transaction to the mempool UTXO set.
|
||||
// Note: it doesn't re-add its previous outputs to the mempool UTXO set.
|
||||
func (mpus *mempoolUTXOSet) removeTx(tx *util.Tx) error {
|
||||
msgTx := tx.MsgTx()
|
||||
for _, txIn := range msgTx.TxIn {
|
||||
if _, exists := mpus.transactionByPreviousOutpoint[txIn.PreviousOutpoint]; !exists {
|
||||
return errors.Errorf("outpoint %s doesn't exist", txIn.PreviousOutpoint)
|
||||
}
|
||||
delete(mpus.transactionByPreviousOutpoint, txIn.PreviousOutpoint)
|
||||
}
|
||||
|
||||
for i := range msgTx.TxOut {
|
||||
outpoint := appmessage.NewOutpoint(tx.ID(), uint32(i))
|
||||
if _, exists := mpus.poolUnspentOutputs[*outpoint]; !exists {
|
||||
return errors.Errorf("outpoint %s doesn't exist", outpoint)
|
||||
}
|
||||
delete(mpus.poolUnspentOutputs, *outpoint)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mpus *mempoolUTXOSet) poolTransactionBySpendingOutpoint(outpoint appmessage.Outpoint) (*util.Tx, bool) {
|
||||
tx, exists := mpus.transactionByPreviousOutpoint[outpoint]
|
||||
return tx, exists
|
||||
}
|
||||
|
||||
func (mpus *mempoolUTXOSet) transactionRelatedUTXOEntries(tx *util.Tx) (spentUTXOEntries []*blockdag.UTXOEntry, parentsInPool []*appmessage.Outpoint, missingParents []*daghash.TxID) {
|
||||
msgTx := tx.MsgTx()
|
||||
spentUTXOEntries = make([]*blockdag.UTXOEntry, len(msgTx.TxIn))
|
||||
missingParents = make([]*daghash.TxID, 0)
|
||||
parentsInPool = make([]*appmessage.Outpoint, 0)
|
||||
|
||||
isOrphan := false
|
||||
for i, txIn := range msgTx.TxIn {
|
||||
entry, isInPool, exists := mpus.utxoEntryByOutpoint(txIn.PreviousOutpoint)
|
||||
if !exists {
|
||||
isOrphan = true
|
||||
missingParents = append(missingParents, &txIn.PreviousOutpoint.TxID)
|
||||
}
|
||||
|
||||
if isOrphan {
|
||||
continue
|
||||
}
|
||||
|
||||
if isInPool {
|
||||
parentsInPool = append(parentsInPool, &txIn.PreviousOutpoint)
|
||||
}
|
||||
|
||||
spentUTXOEntries[i] = entry
|
||||
}
|
||||
|
||||
if isOrphan {
|
||||
return nil, nil, missingParents
|
||||
}
|
||||
|
||||
return spentUTXOEntries, parentsInPool, nil
|
||||
}
|
||||
@@ -80,7 +80,7 @@ func calcMinRequiredTxRelayFee(serializedSize int64, minRelayTxFee util.Amount)
|
||||
// context of this function is one whose referenced public key script is of a
|
||||
// standard form and, for pay-to-script-hash, does not have more than
|
||||
// maxStandardP2SHSigOps signature operations.
|
||||
func checkInputsStandard(tx *util.Tx, utxoSet blockdag.UTXOSet) error {
|
||||
func checkInputsStandard(tx *util.Tx, referencedUTXOEntries []*blockdag.UTXOEntry) error {
|
||||
// NOTE: The reference implementation also does a coinbase check here,
|
||||
// but coinbases have already been rejected prior to calling this
|
||||
// function so no need to recheck.
|
||||
@@ -89,7 +89,7 @@ func checkInputsStandard(tx *util.Tx, utxoSet blockdag.UTXOSet) error {
|
||||
// It is safe to elide existence and index checks here since
|
||||
// they have already been checked prior to calling this
|
||||
// function.
|
||||
entry, _ := utxoSet.Get(txIn.PreviousOutpoint)
|
||||
entry := referencedUTXOEntries[i]
|
||||
originScriptPubKey := entry.ScriptPubKey()
|
||||
switch txscript.GetScriptClass(originScriptPubKey) {
|
||||
case txscript.ScriptHashTy:
|
||||
|
||||
@@ -79,9 +79,6 @@ type BlockTemplate struct {
|
||||
// coinbase, the first entry (offset 0) will contain the negative of the
|
||||
// sum of the fees of all other transactions.
|
||||
Fees []uint64
|
||||
|
||||
// Height is the height at which the block template connects to the DAG
|
||||
Height uint64
|
||||
}
|
||||
|
||||
// BlkTmplGenerator provides a type that can be used to generate block templates
|
||||
@@ -176,10 +173,17 @@ func NewBlkTmplGenerator(policy *Policy,
|
||||
// | <= policy.BlockMinSize) | |
|
||||
// ----------------------------------- --
|
||||
func (g *BlkTmplGenerator) NewBlockTemplate(payToAddress util.Address, extraNonce uint64) (*BlockTemplate, error) {
|
||||
|
||||
mempoolTransactions := g.txSource.MiningDescs()
|
||||
|
||||
// The lock is called only after MiningDescs() to avoid a potential deadlock:
|
||||
// MiningDescs() requires the TxPool's read lock, and TxPool.ProcessTransaction
|
||||
// requires the dag's read lock, so if NewBlockTemplate will call the lock before, it
|
||||
// might cause a dead lock.
|
||||
g.dag.Lock()
|
||||
defer g.dag.Unlock()
|
||||
|
||||
txsForBlockTemplate, err := g.selectTxs(payToAddress, extraNonce)
|
||||
txsForBlockTemplate, err := g.selectTxs(mempoolTransactions, payToAddress, extraNonce)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("failed to select transactions: %s", err)
|
||||
}
|
||||
|
||||
@@ -3,14 +3,8 @@ package mining
|
||||
// This file functions are not considered safe for regular use, and should be used for test purposes only.
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/blockdag"
|
||||
"github.com/kaspanet/kaspad/domain/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
)
|
||||
|
||||
// fakeTxSource is a simple implementation of TxSource interface
|
||||
@@ -34,94 +28,3 @@ func (txs *fakeTxSource) HaveTransaction(txID *daghash.TxID) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PrepareBlockForTest generates a block with the proper merkle roots, coinbase transaction etc. This function is used for test purposes only
|
||||
func PrepareBlockForTest(dag *blockdag.BlockDAG, parentHashes []*daghash.Hash, transactions []*appmessage.MsgTx, forceTransactions bool,
|
||||
) (*appmessage.MsgBlock, error) {
|
||||
|
||||
newVirtual, err := blockdag.GetVirtualFromParentsForTest(dag, parentHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oldVirtual := blockdag.SetVirtualForTest(dag, newVirtual)
|
||||
defer blockdag.SetVirtualForTest(dag, oldVirtual)
|
||||
policy := Policy{
|
||||
BlockMaxMass: 50000,
|
||||
}
|
||||
|
||||
txSource := &fakeTxSource{
|
||||
txDescs: make([]*TxDesc, len(transactions)),
|
||||
}
|
||||
|
||||
for i, tx := range transactions {
|
||||
txSource.txDescs[i] = &TxDesc{
|
||||
Tx: util.NewTx(tx),
|
||||
Fee: 1,
|
||||
}
|
||||
}
|
||||
|
||||
blockTemplateGenerator := NewBlkTmplGenerator(&policy, txSource, dag, txscript.NewSigCache(100000))
|
||||
|
||||
OpTrueAddr, err := OpTrueAddress(dag.Params.Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We create a deterministic extra nonce in order of
|
||||
// creating deterministic coinbase tx ids.
|
||||
extraNonce := GenerateDeterministicExtraNonceForTest()
|
||||
|
||||
template, err := blockTemplateGenerator.NewBlockTemplate(OpTrueAddr, extraNonce)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txsToAdd := make([]*appmessage.MsgTx, 0)
|
||||
for _, tx := range transactions {
|
||||
found := false
|
||||
for _, blockTx := range template.Block.Transactions {
|
||||
if blockTx.TxHash().IsEqual(tx.TxHash()) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
if !forceTransactions {
|
||||
return nil, errors.Errorf("tx %s wasn't found in the block", tx.TxHash())
|
||||
}
|
||||
txsToAdd = append(txsToAdd, tx)
|
||||
}
|
||||
}
|
||||
if forceTransactions && len(txsToAdd) > 0 {
|
||||
template.Block.Transactions = append(template.Block.Transactions, txsToAdd...)
|
||||
}
|
||||
updateHeaderFields := forceTransactions && len(txsToAdd) > 0
|
||||
if updateHeaderFields {
|
||||
utilTxs := make([]*util.Tx, len(template.Block.Transactions))
|
||||
for i, tx := range template.Block.Transactions {
|
||||
utilTxs[i] = util.NewTx(tx)
|
||||
}
|
||||
template.Block.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(utilTxs).Root()
|
||||
|
||||
ms, err := dag.NextBlockMultiset()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
template.Block.Header.UTXOCommitment = (*daghash.Hash)(ms.Finalize())
|
||||
}
|
||||
return template.Block, nil
|
||||
}
|
||||
|
||||
// GenerateDeterministicExtraNonceForTest returns a unique deterministic extra nonce for coinbase data, in order to create unique coinbase transactions.
|
||||
func GenerateDeterministicExtraNonceForTest() uint64 {
|
||||
extraNonceForTest++
|
||||
return extraNonceForTest
|
||||
}
|
||||
|
||||
// OpTrueAddress returns an address pointing to a P2SH anyone-can-spend script
|
||||
func OpTrueAddress(prefix util.Bech32Prefix) (util.Address, error) {
|
||||
return util.NewAddressScriptHash(blockdag.OpTrueScript, prefix)
|
||||
}
|
||||
|
||||
var extraNonceForTest = uint64(0)
|
||||
|
||||
@@ -65,9 +65,8 @@ type txsForBlockTemplate struct {
|
||||
// Once the sum of probabilities of marked transactions is greater than
|
||||
// rebalanceThreshold percent of the sum of probabilities of all transactions,
|
||||
// rebalance.
|
||||
func (g *BlkTmplGenerator) selectTxs(payToAddress util.Address, extraNonce uint64) (*txsForBlockTemplate, error) {
|
||||
// Fetch the source transactions.
|
||||
sourceTxs := g.txSource.MiningDescs()
|
||||
func (g *BlkTmplGenerator) selectTxs(mempoolTransactions []*TxDesc, payToAddress util.Address,
|
||||
extraNonce uint64) (*txsForBlockTemplate, error) {
|
||||
|
||||
// Create a new txsForBlockTemplate struct, onto which all selectedTxs
|
||||
// will be appended.
|
||||
@@ -78,7 +77,7 @@ func (g *BlkTmplGenerator) selectTxs(payToAddress util.Address, extraNonce uint6
|
||||
|
||||
// Collect candidateTxs while excluding txs that will certainly not
|
||||
// be selected.
|
||||
candidateTxs := g.collectCandidatesTxs(sourceTxs)
|
||||
candidateTxs := g.collectCandidatesTxs(mempoolTransactions)
|
||||
|
||||
log.Debugf("Considering %d transactions for inclusion to new block",
|
||||
len(candidateTxs))
|
||||
|
||||
21
go.sum
21
go.sum
@@ -1,4 +1,6 @@
|
||||
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
@@ -6,18 +8,25 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
@@ -40,6 +49,7 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@@ -51,6 +61,7 @@ github.com/kaspanet/go-secp256k1 v0.0.2 h1:KZGXddYHxzS02rx6EPPQYYe2tZ/rREj4P6Xxg
|
||||
github.com/kaspanet/go-secp256k1 v0.0.2/go.mod h1:W9OcWBKzH8P/PN2WAUn9k2YmZG/Uc660WAL1NTS3G3M=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
@@ -62,6 +73,7 @@ github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
|
||||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
@@ -72,10 +84,13 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -85,9 +100,11 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -103,10 +120,13 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20200228224639-71482053b885 h1:y09Juz/HD0YjGlyEd4bLUWG0s8Yx6iPniPqUGzUxNrU=
|
||||
golang.org/x/tools v0.0.0-20200228224639-71482053b885/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
@@ -143,4 +163,5 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
@@ -51,10 +51,11 @@ const (
|
||||
defaultMinRelayTxFee = 1e-5 // 1 sompi per byte
|
||||
defaultMaxOrphanTransactions = 100
|
||||
//DefaultMaxOrphanTxSize is the default maximum size for an orphan transaction
|
||||
DefaultMaxOrphanTxSize = 100000
|
||||
defaultSigCacheMaxSize = 100000
|
||||
sampleConfigFilename = "sample-kaspad.conf"
|
||||
defaultAcceptanceIndex = false
|
||||
DefaultMaxOrphanTxSize = 100000
|
||||
defaultSigCacheMaxSize = 100000
|
||||
sampleConfigFilename = "sample-kaspad.conf"
|
||||
defaultAcceptanceIndex = false
|
||||
defaultMaxUTXOCacheSize = 5000000000
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -121,6 +122,7 @@ type Flags struct {
|
||||
RelayNonStd bool `long:"relaynonstd" description:"Relay non-standard transactions regardless of the default settings for the active network."`
|
||||
RejectNonStd bool `long:"rejectnonstd" description:"Reject non-standard transactions regardless of the default settings for the active network."`
|
||||
ResetDatabase bool `long:"reset-db" description:"Reset database before starting node. It's needed when switching between subnetworks."`
|
||||
MaxUTXOCacheSize uint64 `long:"maxutxocachesize" description:"Max size of loaded UTXO into ram from the disk in bytes"`
|
||||
NetworkFlags
|
||||
}
|
||||
|
||||
@@ -186,6 +188,7 @@ func defaultFlags() *Flags {
|
||||
SigCacheMaxSize: defaultSigCacheMaxSize,
|
||||
MinRelayTxFee: defaultMinRelayTxFee,
|
||||
AcceptanceIndex: defaultAcceptanceIndex,
|
||||
MaxUTXOCacheSize: defaultMaxUTXOCacheSize,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,10 +2,11 @@ package database_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database/ffldb"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
|
||||
)
|
||||
|
||||
type databasePrepareFunc func(t *testing.T, testName string) (db database.Database, name string, teardownFunc func())
|
||||
@@ -14,17 +15,17 @@ type databasePrepareFunc func(t *testing.T, testName string) (db database.Databa
|
||||
// prepares a separate database type for testing.
|
||||
// See testForAllDatabaseTypes for further details.
|
||||
var databasePrepareFuncs = []databasePrepareFunc{
|
||||
prepareFFLDBForTest,
|
||||
prepareLDBForTest,
|
||||
}
|
||||
|
||||
func prepareFFLDBForTest(t *testing.T, testName string) (db database.Database, name string, teardownFunc func()) {
|
||||
func prepareLDBForTest(t *testing.T, testName string) (db database.Database, name string, teardownFunc func()) {
|
||||
// Create a temp db to run tests against
|
||||
path, err := ioutil.TempDir("", testName)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: TempDir unexpectedly "+
|
||||
"failed: %s", testName, err)
|
||||
}
|
||||
db, err = ffldb.Open(path)
|
||||
db, err = ldb.NewLevelDB(path)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: Open unexpectedly "+
|
||||
"failed: %s", testName, err)
|
||||
@@ -36,7 +37,7 @@ func prepareFFLDBForTest(t *testing.T, testName string) (db database.Database, n
|
||||
"failed: %s", testName, err)
|
||||
}
|
||||
}
|
||||
return db, "ffldb", teardownFunc
|
||||
return db, "ldb", teardownFunc
|
||||
}
|
||||
|
||||
// testForAllDatabaseTypes runs the given testFunc for every database
|
||||
|
||||
@@ -19,18 +19,6 @@ type DataAccessor interface {
|
||||
// return an error if the key doesn't exist.
|
||||
Delete(key *Key) error
|
||||
|
||||
// AppendToStore appends the given data to the store
|
||||
// defined by storeName. This function returns a serialized
|
||||
// location handle that's meant to be stored and later used
|
||||
// when querying the data that has just now been inserted.
|
||||
AppendToStore(storeName string, data []byte) ([]byte, error)
|
||||
|
||||
// RetrieveFromStore retrieves data from the store defined by
|
||||
// storeName using the given serialized location handle. It
|
||||
// returns ErrNotFound if the location does not exist. See
|
||||
// AppendToStore for further details.
|
||||
RetrieveFromStore(storeName string, location []byte) ([]byte, error)
|
||||
|
||||
// Cursor begins a new cursor over the given bucket.
|
||||
Cursor(bucket *Bucket) (Cursor, error)
|
||||
}
|
||||
|
||||
@@ -7,8 +7,9 @@ package database_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
)
|
||||
|
||||
func TestDatabasePut(t *testing.T) {
|
||||
@@ -166,42 +167,3 @@ func testDatabaseDelete(t *testing.T, db database.Database, testName string) {
|
||||
"unexpectedly returned that the value exists", testName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatabaseAppendToStoreAndRetrieveFromStore(t *testing.T) {
|
||||
testForAllDatabaseTypes(t, "TestDatabaseAppendToStoreAndRetrieveFromStore", testDatabaseAppendToStoreAndRetrieveFromStore)
|
||||
}
|
||||
|
||||
func testDatabaseAppendToStoreAndRetrieveFromStore(t *testing.T, db database.Database, testName string) {
|
||||
// Append some data into the store
|
||||
storeName := "store"
|
||||
data := []byte("data")
|
||||
location, err := db.AppendToStore(storeName, data)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: AppendToStore "+
|
||||
"unexpectedly failed: %s", testName, err)
|
||||
}
|
||||
|
||||
// Retrieve the data and make sure it's equal to what was appended
|
||||
retrievedData, err := db.RetrieveFromStore(storeName, location)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: RetrieveFromStore "+
|
||||
"unexpectedly failed: %s", testName, err)
|
||||
}
|
||||
if !bytes.Equal(retrievedData, data) {
|
||||
t.Fatalf("%s: RetrieveFromStore "+
|
||||
"returned unexpected data. Want: %s, got: %s",
|
||||
testName, string(data), string(retrievedData))
|
||||
}
|
||||
|
||||
// Make sure that an invalid location returns ErrNotFound
|
||||
fakeLocation := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
|
||||
_, err = db.RetrieveFromStore(storeName, fakeLocation)
|
||||
if err == nil {
|
||||
t.Fatalf("%s: RetrieveFromStore "+
|
||||
"unexpectedly succeeded", testName)
|
||||
}
|
||||
if !database.IsNotFoundError(err) {
|
||||
t.Fatalf("%s: RetrieveFromStore "+
|
||||
"returned wrong error: %s", testName, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,231 +0,0 @@
|
||||
package ff
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"hash/crc32"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxOpenFiles is the max number of open files to maintain in each store's
|
||||
// cache. Note that this does not include the current/write file, so there
|
||||
// will typically be one more than this value open.
|
||||
maxOpenFiles = 25
|
||||
)
|
||||
|
||||
var (
|
||||
// maxFileSize is the maximum size for each file used to store data.
|
||||
//
|
||||
// NOTE: The current code uses uint32 for all offsets, so this value
|
||||
// must be less than 2^32 (4 GiB).
|
||||
// NOTE: This is a var rather than a const for testing purposes.
|
||||
maxFileSize uint32 = 512 * 1024 * 1024 // 512 MiB
|
||||
)
|
||||
|
||||
var (
|
||||
// byteOrder is the preferred byte order used through the flat files.
|
||||
// Sometimes big endian will be used to allow ordered byte sortable
|
||||
// integer values.
|
||||
byteOrder = binary.LittleEndian
|
||||
|
||||
// crc32ByteOrder is the byte order used for CRC-32 checksums.
|
||||
crc32ByteOrder = binary.BigEndian
|
||||
|
||||
// crc32ChecksumLength is the length in bytes of a CRC-32 checksum.
|
||||
crc32ChecksumLength = 4
|
||||
|
||||
// dataLengthLength is the length in bytes of the "data length" section
|
||||
// of a serialized entry in a flat file store.
|
||||
dataLengthLength = 4
|
||||
|
||||
// castagnoli houses the Catagnoli polynomial used for CRC-32 checksums.
|
||||
castagnoli = crc32.MakeTable(crc32.Castagnoli)
|
||||
)
|
||||
|
||||
// flatFileStore houses information used to handle reading and writing data
|
||||
// into flat files with support for multiple concurrent readers.
|
||||
type flatFileStore struct {
|
||||
// basePath is the base path used for the flat files.
|
||||
basePath string
|
||||
|
||||
// storeName is the name of this flat-file store.
|
||||
storeName string
|
||||
|
||||
// The following fields are related to the flat files which hold the
|
||||
// actual data. The number of open files is limited by maxOpenFiles.
|
||||
//
|
||||
// openFilesMutex protects concurrent access to the openFiles map. It
|
||||
// is a RWMutex so multiple readers can simultaneously access open
|
||||
// files.
|
||||
//
|
||||
// openFiles houses the open file handles for existing files which have
|
||||
// been opened read-only along with an individual RWMutex. This scheme
|
||||
// allows multiple concurrent readers to the same file while preventing
|
||||
// the file from being closed out from under them.
|
||||
//
|
||||
// lruMutex protects concurrent access to the least recently used list
|
||||
// and lookup map.
|
||||
//
|
||||
// openFilesLRU tracks how the open files are referenced by pushing the
|
||||
// most recently used files to the front of the list thereby trickling
|
||||
// the least recently used files to end of the list. When a file needs
|
||||
// to be closed due to exceeding the max number of allowed open
|
||||
// files, the one at the end of the list is closed.
|
||||
//
|
||||
// fileNumberToLRUElement is a mapping between a specific file number and
|
||||
// the associated list element on the least recently used list.
|
||||
//
|
||||
// Thus, with the combination of these fields, the database supports
|
||||
// concurrent non-blocking reads across multiple and individual files
|
||||
// along with intelligently limiting the number of open file handles by
|
||||
// closing the least recently used files as needed.
|
||||
//
|
||||
// NOTE: The locking order used throughout is well-defined and MUST be
|
||||
// followed. Failure to do so could lead to deadlocks. In particular,
|
||||
// the locking order is as follows:
|
||||
// 1) openFilesMutex
|
||||
// 2) lruMutex
|
||||
// 3) writeCursor mutex
|
||||
// 4) specific file mutexes
|
||||
//
|
||||
// None of the mutexes are required to be locked at the same time, and
|
||||
// often aren't. However, if they are to be locked simultaneously, they
|
||||
// MUST be locked in the order previously specified.
|
||||
//
|
||||
// Due to the high performance and multi-read concurrency requirements,
|
||||
// write locks should only be held for the minimum time necessary.
|
||||
openFilesMutex sync.RWMutex
|
||||
openFiles map[uint32]*lockableFile
|
||||
lruMutex sync.Mutex
|
||||
openFilesLRU *list.List // Contains uint32 file numbers.
|
||||
fileNumberToLRUElement map[uint32]*list.Element
|
||||
|
||||
// writeCursor houses the state for the current file and location that
|
||||
// new data is written to.
|
||||
writeCursor *writeCursor
|
||||
|
||||
// isClosed is true when the store is closed. Any operations on a closed
|
||||
// store will fail.
|
||||
isClosed bool
|
||||
}
|
||||
|
||||
// writeCursor represents the current file and offset of the flat file on disk
|
||||
// for performing all writes. It also contains a read-write mutex to support
|
||||
// multiple concurrent readers which can reuse the file handle.
|
||||
type writeCursor struct {
|
||||
sync.RWMutex
|
||||
|
||||
// currentFile is the current file that will be appended to when writing
|
||||
// new data.
|
||||
currentFile *lockableFile
|
||||
|
||||
// currentFileNumber is the current file number and is used to allow
|
||||
// readers to use the same open file handle.
|
||||
currentFileNumber uint32
|
||||
|
||||
// currentOffset is the offset in the current file where the next new
|
||||
// data will be written.
|
||||
currentOffset uint32
|
||||
}
|
||||
|
||||
// openFlatFileStore returns a new flat file store with the current file number
|
||||
// and offset set and all fields initialized.
|
||||
func openFlatFileStore(basePath string, storeName string) (*flatFileStore, error) {
|
||||
// Look for the end of the latest file to determine what the write cursor
|
||||
// position is from the viewpoint of the flat files on disk.
|
||||
fileNumber, fileOffset, err := findCurrentLocation(basePath, storeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store := &flatFileStore{
|
||||
basePath: basePath,
|
||||
storeName: storeName,
|
||||
openFiles: make(map[uint32]*lockableFile),
|
||||
openFilesLRU: list.New(),
|
||||
fileNumberToLRUElement: make(map[uint32]*list.Element),
|
||||
writeCursor: &writeCursor{
|
||||
currentFile: &lockableFile{},
|
||||
currentFileNumber: fileNumber,
|
||||
currentOffset: fileOffset,
|
||||
},
|
||||
isClosed: false,
|
||||
}
|
||||
return store, nil
|
||||
}
|
||||
|
||||
func (s *flatFileStore) Close() error {
|
||||
if s.isClosed {
|
||||
return errors.Errorf("cannot close a closed store %s",
|
||||
s.storeName)
|
||||
}
|
||||
s.isClosed = true
|
||||
|
||||
// Close the write cursor. We lock the write cursor here
|
||||
// to let it finish any undergoing writing.
|
||||
s.writeCursor.Lock()
|
||||
defer s.writeCursor.Unlock()
|
||||
err := s.writeCursor.currentFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close all open files
|
||||
for _, openFile := range s.openFiles {
|
||||
err := openFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *flatFileStore) currentLocation() *flatFileLocation {
|
||||
return &flatFileLocation{
|
||||
fileNumber: s.writeCursor.currentFileNumber,
|
||||
fileOffset: s.writeCursor.currentOffset,
|
||||
dataLength: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// findCurrentLocation searches the database directory for all flat files for a given
|
||||
// store to find the end of the most recent file. This position is considered
|
||||
// the current write cursor.
|
||||
func findCurrentLocation(dbPath string, storeName string) (fileNumber uint32, fileLength uint32, err error) {
|
||||
currentFileNumber := uint32(0)
|
||||
currentFileLength := uint32(0)
|
||||
for {
|
||||
currentFilePath := flatFilePath(dbPath, storeName, currentFileNumber)
|
||||
stat, err := os.Stat(currentFilePath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return 0, 0, errors.WithStack(err)
|
||||
}
|
||||
if currentFileNumber > 0 {
|
||||
fileNumber = currentFileNumber - 1
|
||||
}
|
||||
fileLength = currentFileLength
|
||||
break
|
||||
}
|
||||
currentFileLength = uint32(stat.Size())
|
||||
currentFileNumber++
|
||||
}
|
||||
|
||||
log.Tracef("Scan for store '%s' found latest file #%d with length %d",
|
||||
storeName, fileNumber, fileLength)
|
||||
return fileNumber, fileLength, nil
|
||||
}
|
||||
|
||||
// flatFilePath return the file path for the provided store's flat file number.
|
||||
func flatFilePath(dbPath string, storeName string, fileNumber uint32) string {
|
||||
// Choose 9 digits of precision for the filenames. 9 digits provide
|
||||
// 10^9 files @ 512MiB each a total of ~476.84PiB.
|
||||
|
||||
fileName := fmt.Sprintf("%s-%09d.fdb", storeName, fileNumber)
|
||||
return filepath.Join(dbPath, fileName)
|
||||
}
|
||||
@@ -1,175 +0,0 @@
|
||||
package ff
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func prepareStoreForTest(t *testing.T, testName string) (store *flatFileStore, teardownFunc func()) {
|
||||
// Create a temp db to run tests against
|
||||
path, err := ioutil.TempDir("", testName)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: TempDir unexpectedly "+
|
||||
"failed: %s", testName, err)
|
||||
}
|
||||
name := "test"
|
||||
store, err = openFlatFileStore(path, name)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: openFlatFileStore "+
|
||||
"unexpectedly failed: %s", testName, err)
|
||||
}
|
||||
teardownFunc = func() {
|
||||
err = store.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("%s: Close unexpectedly "+
|
||||
"failed: %s", testName, err)
|
||||
}
|
||||
}
|
||||
return store, teardownFunc
|
||||
}
|
||||
|
||||
func TestFlatFileStoreSanity(t *testing.T) {
|
||||
store, teardownFunc := prepareStoreForTest(t, "TestFlatFileStoreSanity")
|
||||
defer teardownFunc()
|
||||
|
||||
// Write something to the store
|
||||
writeData := []byte("Hello world!")
|
||||
location, err := store.write(writeData)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlatFileStoreSanity: Write returned "+
|
||||
"unexpected error: %s", err)
|
||||
}
|
||||
|
||||
// Read from the location previously written to
|
||||
readData, err := store.read(location)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlatFileStoreSanity: read returned "+
|
||||
"unexpected error: %s", err)
|
||||
}
|
||||
|
||||
// Make sure that the written data and the read data are equal
|
||||
if !reflect.DeepEqual(readData, writeData) {
|
||||
t.Fatalf("TestFlatFileStoreSanity: read data and "+
|
||||
"write data are not equal. Wrote: %s, read: %s",
|
||||
string(writeData), string(readData))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlatFilePath(t *testing.T) {
|
||||
tests := []struct {
|
||||
dbPath string
|
||||
storeName string
|
||||
fileNumber uint32
|
||||
expectedPath string
|
||||
}{
|
||||
{
|
||||
dbPath: "path",
|
||||
storeName: "store",
|
||||
fileNumber: 0,
|
||||
expectedPath: "path/store-000000000.fdb",
|
||||
},
|
||||
{
|
||||
dbPath: "path/to/database",
|
||||
storeName: "blocks",
|
||||
fileNumber: 123456789,
|
||||
expectedPath: "path/to/database/blocks-123456789.fdb",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
path := flatFilePath(test.dbPath, test.storeName, test.fileNumber)
|
||||
if path != test.expectedPath {
|
||||
t.Errorf("TestFlatFilePath: unexpected path. Want: %s, got: %s",
|
||||
test.expectedPath, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlatFileMultiFileRollback(t *testing.T) {
|
||||
store, teardownFunc := prepareStoreForTest(t, "TestFlatFileMultiFileRollback")
|
||||
defer teardownFunc()
|
||||
|
||||
// Set the maxFileSize to 16 bytes so that we don't have to write
|
||||
// an enormous amount of data to disk to get multiple files, all
|
||||
// for the sake of this test.
|
||||
currentMaxFileSize := maxFileSize
|
||||
maxFileSize = 16
|
||||
defer func() {
|
||||
maxFileSize = currentMaxFileSize
|
||||
}()
|
||||
|
||||
// Write five 8 byte chunks and keep the last location written to
|
||||
var lastWriteLocation1 *flatFileLocation
|
||||
for i := byte(0); i < 5; i++ {
|
||||
writeData := []byte{i, i, i, i, i, i, i, i}
|
||||
var err error
|
||||
lastWriteLocation1, err = store.write(writeData)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlatFileMultiFileRollback: write returned "+
|
||||
"unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Grab the current location and the current file number
|
||||
currentLocation := store.currentLocation()
|
||||
fileNumberBeforeWriting := store.writeCursor.currentFileNumber
|
||||
|
||||
// Write (2 * maxOpenFiles) more 8 byte chunks and keep the last location written to
|
||||
var lastWriteLocation2 *flatFileLocation
|
||||
for i := byte(0); i < byte(2*maxFileSize); i++ {
|
||||
writeData := []byte{0, 1, 2, 3, 4, 5, 6, 7}
|
||||
var err error
|
||||
lastWriteLocation2, err = store.write(writeData)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlatFileMultiFileRollback: write returned "+
|
||||
"unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Grab the file number again to later make sure its file no longer exists
|
||||
fileNumberAfterWriting := store.writeCursor.currentFileNumber
|
||||
|
||||
// Rollback
|
||||
err := store.rollback(currentLocation)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlatFileMultiFileRollback: rollback returned "+
|
||||
"unexpected error: %s", err)
|
||||
}
|
||||
|
||||
// Make sure that lastWriteLocation1 still exists
|
||||
expectedData := []byte{4, 4, 4, 4, 4, 4, 4, 4}
|
||||
data, err := store.read(lastWriteLocation1)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFlatFileMultiFileRollback: read returned "+
|
||||
"unexpected error: %s", err)
|
||||
}
|
||||
if !bytes.Equal(data, expectedData) {
|
||||
t.Fatalf("TestFlatFileMultiFileRollback: read returned "+
|
||||
"unexpected data. Want: %s, got: %s", string(expectedData),
|
||||
string(data))
|
||||
}
|
||||
|
||||
// Make sure that lastWriteLocation2 does NOT exist
|
||||
_, err = store.read(lastWriteLocation2)
|
||||
if err == nil {
|
||||
t.Fatalf("TestFlatFileMultiFileRollback: read " +
|
||||
"unexpectedly succeeded")
|
||||
}
|
||||
if !database.IsNotFoundError(err) {
|
||||
t.Fatalf("TestFlatFileMultiFileRollback: read "+
|
||||
"returned unexpected error: %s", err)
|
||||
}
|
||||
|
||||
// Make sure that all the appropriate files have been deleted
|
||||
for i := fileNumberAfterWriting; i > fileNumberBeforeWriting; i-- {
|
||||
filePath := flatFilePath(store.basePath, store.storeName, i)
|
||||
if _, err := os.Stat(filePath); err == nil || !os.IsNotExist(err) {
|
||||
t.Fatalf("TestFlatFileMultiFileRollback: file "+
|
||||
"unexpectedly still exists: %s", filePath)
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user