mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-21 11:17:05 +00:00
Compare commits
47 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
beb038c815 | ||
|
|
35a959b56f | ||
|
|
57c6118be8 | ||
|
|
723aebbec9 | ||
|
|
2b395e34b1 | ||
|
|
ada559f007 | ||
|
|
357e8ce73c | ||
|
|
6725902663 | ||
|
|
99bb21c512 | ||
|
|
a4669f3fb5 | ||
|
|
e8f40bdff9 | ||
|
|
68a407ea37 | ||
|
|
80879cabe1 | ||
|
|
71afc62298 | ||
|
|
ca5c8549b9 | ||
|
|
ab73def07a | ||
|
|
3f840233d8 | ||
|
|
90d9edb8e5 | ||
|
|
b9b360bce4 | ||
|
|
27654961f9 | ||
|
|
d45af760d8 | ||
|
|
95fa045297 | ||
|
|
cb65dae63d | ||
|
|
21b82d7efc | ||
|
|
63c6d7443b | ||
|
|
753f4a2ec1 | ||
|
|
ed667f7e54 | ||
|
|
c4a034eb43 | ||
|
|
2eca0f0b5f | ||
|
|
58d627e05a | ||
|
|
639183ba0e | ||
|
|
9fa08442cf | ||
|
|
0dd50394ec | ||
|
|
ac8d4e1341 | ||
|
|
2488fbde78 | ||
|
|
2ab8065142 | ||
|
|
25410b86ae | ||
|
|
4e44dd8510 | ||
|
|
1e56a22b32 | ||
|
|
7a95f0c7a4 | ||
|
|
c81506220b | ||
|
|
e5598c15a7 | ||
|
|
433af5e0fe | ||
|
|
b7be807167 | ||
|
|
e687ceeae7 | ||
|
|
04e35321aa | ||
|
|
061e65be93 |
7
.github/workflows/deploy.yaml
vendored
7
.github/workflows/deploy.yaml
vendored
@@ -19,16 +19,11 @@ jobs:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Increase the pagefile size on Windows to aviod running out of memory
|
||||
- name: Increase pagefile size on Windows
|
||||
if: runner.os == 'Windows'
|
||||
run: powershell -command .github\workflows\SetPageFileSize.ps1
|
||||
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18
|
||||
|
||||
- name: Build on Linux
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
2
.github/workflows/race.yaml
vendored
2
.github/workflows/race.yaml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18
|
||||
|
||||
- name: Set scheduled branch name
|
||||
shell: bash
|
||||
|
||||
6
.github/workflows/tests.yaml
vendored
6
.github/workflows/tests.yaml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18
|
||||
|
||||
|
||||
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
@@ -86,7 +86,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18
|
||||
|
||||
- name: Delete the stability tests from coverage
|
||||
run: rm -r stability-tests
|
||||
|
||||
@@ -15,7 +15,7 @@ Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.18 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -69,6 +69,10 @@ const (
|
||||
CmdReady
|
||||
CmdTrustedData
|
||||
CmdBlockWithTrustedDataV4
|
||||
CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||
CmdRequestIBDChainBlockLocator
|
||||
CmdIBDChainBlockLocator
|
||||
CmdRequestAnticone
|
||||
|
||||
// rpc
|
||||
CmdGetCurrentNetworkRequestMessage
|
||||
@@ -152,6 +156,9 @@ const (
|
||||
CmdVirtualDaaScoreChangedNotificationMessage
|
||||
CmdGetBalancesByAddressesRequestMessage
|
||||
CmdGetBalancesByAddressesResponseMessage
|
||||
CmdNotifyNewBlockTemplateRequestMessage
|
||||
CmdNotifyNewBlockTemplateResponseMessage
|
||||
CmdNewBlockTemplateNotificationMessage
|
||||
)
|
||||
|
||||
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
|
||||
@@ -195,6 +202,10 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
||||
CmdReady: "Ready",
|
||||
CmdTrustedData: "TrustedData",
|
||||
CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4",
|
||||
CmdRequestNextPruningPointAndItsAnticoneBlocks: "RequestNextPruningPointAndItsAnticoneBlocks",
|
||||
CmdRequestIBDChainBlockLocator: "RequestIBDChainBlockLocator",
|
||||
CmdIBDChainBlockLocator: "IBDChainBlockLocator",
|
||||
CmdRequestAnticone: "RequestAnticone",
|
||||
}
|
||||
|
||||
// RPCMessageCommandToString maps all MessageCommands to their string representation
|
||||
@@ -278,6 +289,9 @@ var RPCMessageCommandToString = map[MessageCommand]string{
|
||||
CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification",
|
||||
CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest",
|
||||
CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse",
|
||||
CmdNotifyNewBlockTemplateRequestMessage: "NotifyNewBlockTemplateRequest",
|
||||
CmdNotifyNewBlockTemplateResponseMessage: "NotifyNewBlockTemplateResponse",
|
||||
CmdNewBlockTemplateNotificationMessage: "NewBlockTemplateNotification",
|
||||
}
|
||||
|
||||
// Message is an interface that describes a kaspa message. A type that
|
||||
|
||||
27
app/appmessage/p2p_msgibdchainblocklocator.go
Normal file
27
app/appmessage/p2p_msgibdchainblocklocator.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||
// locator message. It is used to find the blockLocator of a peer that is
|
||||
// syncing with you.
|
||||
type MsgIBDChainBlockLocator struct {
|
||||
baseMessage
|
||||
BlockLocatorHashes []*externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgIBDChainBlockLocator) Command() MessageCommand {
|
||||
return CmdIBDChainBlockLocator
|
||||
}
|
||||
|
||||
// NewMsgIBDChainBlockLocator returns a new kaspa locator message that conforms to
|
||||
// the Message interface. See MsgBlockLocator for details.
|
||||
func NewMsgIBDChainBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgIBDChainBlockLocator {
|
||||
return &MsgIBDChainBlockLocator{
|
||||
BlockLocatorHashes: locatorHashes,
|
||||
}
|
||||
}
|
||||
33
app/appmessage/p2p_msgrequestanticone.go
Normal file
33
app/appmessage/p2p_msgrequestanticone.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgRequestAnticone implements the Message interface and represents a kaspa
|
||||
// RequestHeaders message. It is used to request the set past(ContextHash) \cap anticone(BlockHash)
|
||||
type MsgRequestAnticone struct {
|
||||
baseMessage
|
||||
BlockHash *externalapi.DomainHash
|
||||
ContextHash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestAnticone) Command() MessageCommand {
|
||||
return CmdRequestAnticone
|
||||
}
|
||||
|
||||
// NewMsgRequestAnticone returns a new kaspa RequestPastDiff message that conforms to the
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgRequestAnticone(blockHash, contextHash *externalapi.DomainHash) *MsgRequestAnticone {
|
||||
return &MsgRequestAnticone{
|
||||
BlockHash: blockHash,
|
||||
ContextHash: contextHash,
|
||||
}
|
||||
}
|
||||
31
app/appmessage/p2p_msgrequestibdchainblocklocator.go
Normal file
31
app/appmessage/p2p_msgrequestibdchainblocklocator.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgRequestIBDChainBlockLocator implements the Message interface and represents a kaspa
|
||||
// IBDRequestChainBlockLocator message. It is used to request a block locator between low
|
||||
// and high hash.
|
||||
// The locator is returned via a locator message (MsgIBDChainBlockLocator).
|
||||
type MsgRequestIBDChainBlockLocator struct {
|
||||
baseMessage
|
||||
HighHash *externalapi.DomainHash
|
||||
LowHash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestIBDChainBlockLocator) Command() MessageCommand {
|
||||
return CmdRequestIBDChainBlockLocator
|
||||
}
|
||||
|
||||
// NewMsgIBDRequestChainBlockLocator returns a new IBDRequestChainBlockLocator message that conforms to the
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgIBDRequestChainBlockLocator(highHash, lowHash *externalapi.DomainHash) *MsgRequestIBDChainBlockLocator {
|
||||
return &MsgRequestIBDChainBlockLocator{
|
||||
HighHash: highHash,
|
||||
LowHash: lowHash,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
package appmessage
|
||||
|
||||
// MsgRequestNextPruningPointAndItsAnticoneBlocks implements the Message interface and represents a kaspa
|
||||
// RequestNextPruningPointAndItsAnticoneBlocks message. It is used to notify the IBD syncer peer to send
|
||||
// more blocks from the pruning anticone.
|
||||
//
|
||||
// This message has no payload.
|
||||
type MsgRequestNextPruningPointAndItsAnticoneBlocks struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestNextPruningPointAndItsAnticoneBlocks) Command() MessageCommand {
|
||||
return CmdRequestNextPruningPointAndItsAnticoneBlocks
|
||||
}
|
||||
|
||||
// NewMsgRequestNextPruningPointAndItsAnticoneBlocks returns a new kaspa RequestNextPruningPointAndItsAnticoneBlocks message that conforms to the
|
||||
// Message interface.
|
||||
func NewMsgRequestNextPruningPointAndItsAnticoneBlocks() *MsgRequestNextPruningPointAndItsAnticoneBlocks {
|
||||
return &MsgRequestNextPruningPointAndItsAnticoneBlocks{}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ package appmessage
|
||||
type GetBlockTemplateRequestMessage struct {
|
||||
baseMessage
|
||||
PayAddress string
|
||||
ExtraData string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -13,9 +14,10 @@ func (msg *GetBlockTemplateRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlockTemplateRequestMessage returns a instance of the message
|
||||
func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateRequestMessage {
|
||||
func NewGetBlockTemplateRequestMessage(payAddress, extraData string) *GetBlockTemplateRequestMessage {
|
||||
return &GetBlockTemplateRequestMessage{
|
||||
PayAddress: payAddress,
|
||||
ExtraData: extraData,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
50
app/appmessage/rpc_notify_new_block_template.go
Normal file
50
app/appmessage/rpc_notify_new_block_template.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package appmessage
|
||||
|
||||
// NotifyNewBlockTemplateRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyNewBlockTemplateRequestMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyNewBlockTemplateRequestMessage) Command() MessageCommand {
|
||||
return CmdNotifyNewBlockTemplateRequestMessage
|
||||
}
|
||||
|
||||
// NewNotifyNewBlockTemplateRequestMessage returns an instance of the message
|
||||
func NewNotifyNewBlockTemplateRequestMessage() *NotifyNewBlockTemplateRequestMessage {
|
||||
return &NotifyNewBlockTemplateRequestMessage{}
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplateResponseMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NotifyNewBlockTemplateResponseMessage struct {
|
||||
baseMessage
|
||||
Error *RPCError
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NotifyNewBlockTemplateResponseMessage) Command() MessageCommand {
|
||||
return CmdNotifyNewBlockTemplateResponseMessage
|
||||
}
|
||||
|
||||
// NewNotifyNewBlockTemplateResponseMessage returns an instance of the message
|
||||
func NewNotifyNewBlockTemplateResponseMessage() *NotifyNewBlockTemplateResponseMessage {
|
||||
return &NotifyNewBlockTemplateResponseMessage{}
|
||||
}
|
||||
|
||||
// NewBlockTemplateNotificationMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type NewBlockTemplateNotificationMessage struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
func (msg *NewBlockTemplateNotificationMessage) Command() MessageCommand {
|
||||
return CmdNewBlockTemplateNotificationMessage
|
||||
}
|
||||
|
||||
// NewNewBlockTemplateNotificationMessage returns an instance of the message
|
||||
func NewNewBlockTemplateNotificationMessage() *NewBlockTemplateNotificationMessage {
|
||||
return &NewBlockTemplateNotificationMessage{}
|
||||
}
|
||||
@@ -153,6 +153,7 @@ func setupRPC(
|
||||
shutDownChan,
|
||||
)
|
||||
protocolManager.SetOnVirtualChange(rpcManager.NotifyVirtualChange)
|
||||
protocolManager.SetOnNewBlockTemplateHandler(rpcManager.NotifyNewBlockTemplate)
|
||||
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
|
||||
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@ func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
|
||||
virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
|
||||
hash := consensushashing.BlockHash(block)
|
||||
log.Debugf("OnNewBlock start for block %s", hash)
|
||||
defer log.Debugf("OnNewBlock end for block %s", hash)
|
||||
log.Tracef("OnNewBlock start for block %s", hash)
|
||||
defer log.Tracef("OnNewBlock end for block %s", hash)
|
||||
|
||||
unorphaningResults, err := f.UnorphanBlocks(block)
|
||||
if err != nil {
|
||||
@@ -68,6 +68,15 @@ func (f *FlowContext) OnVirtualChange(virtualChangeSet *externalapi.VirtualChang
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnNewBlockTemplate calls the handler function whenever a new block template is available for miners.
|
||||
func (f *FlowContext) OnNewBlockTemplate() error {
|
||||
if f.onNewBlockTemplateHandler != nil {
|
||||
return f.onNewBlockTemplateHandler()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnPruningPointUTXOSetOverride calls the handler function whenever the UTXO set
|
||||
// resets due to pruning point change via IBD.
|
||||
func (f *FlowContext) OnPruningPointUTXOSetOverride() error {
|
||||
@@ -125,6 +134,10 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
err = f.OnNewBlockTemplate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.OnNewBlock(block, virtualChangeSet)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -150,7 +163,7 @@ func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
|
||||
return false
|
||||
}
|
||||
f.ibdPeer = ibdPeer
|
||||
log.Infof("IBD started")
|
||||
log.Infof("IBD started with peer %s", ibdPeer)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package flowcontext
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
@@ -9,6 +10,11 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPingTimeout signifies that a ping operation timed out.
|
||||
ErrPingTimeout = protocolerrors.New(false, "timeout expired on ping")
|
||||
)
|
||||
|
||||
// HandleError handles an error from a flow,
|
||||
// It sends the error to errChan if isStopping == 0 and increments isStopping
|
||||
//
|
||||
@@ -21,8 +27,15 @@ func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32,
|
||||
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
if errors.Is(err, ErrPingTimeout) {
|
||||
// Avoid printing the call stack on ping timeouts, since users get panicked and this case is not interesting
|
||||
log.Errorf("error from %s: %s", flowName, err)
|
||||
} else {
|
||||
// Explain to the user that this is not a panic, but only a protocol error with a specific peer
|
||||
logFrame := strings.Repeat("=", 52)
|
||||
log.Errorf("Non-critical peer protocol error from %s, printing the full stack for debug purposes: \n%s\n%+v \n%s",
|
||||
flowName, logFrame, err, logFrame)
|
||||
}
|
||||
}
|
||||
|
||||
if atomic.AddUint32(isStopping, 1) == 1 {
|
||||
|
||||
@@ -25,6 +25,9 @@ type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, virtualChange
|
||||
// OnVirtualChangeHandler is a handler function that's triggered when the virtual changes
|
||||
type OnVirtualChangeHandler func(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
|
||||
// OnNewBlockTemplateHandler is a handler function that's triggered when a new block template is available
|
||||
type OnNewBlockTemplateHandler func() error
|
||||
|
||||
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
|
||||
// resets due to pruning point change via IBD.
|
||||
type OnPruningPointUTXOSetOverrideHandler func() error
|
||||
@@ -46,11 +49,13 @@ type FlowContext struct {
|
||||
|
||||
onVirtualChangeHandler OnVirtualChangeHandler
|
||||
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
|
||||
onNewBlockTemplateHandler OnNewBlockTemplateHandler
|
||||
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
|
||||
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
|
||||
|
||||
lastRebroadcastTime time.Time
|
||||
sharedRequestedTransactions *SharedRequestedTransactions
|
||||
expectedDAAWindowDurationInMilliseconds int64
|
||||
lastRebroadcastTime time.Time
|
||||
sharedRequestedTransactions *SharedRequestedTransactions
|
||||
|
||||
sharedRequestedBlocks *SharedRequestedBlocks
|
||||
|
||||
@@ -88,6 +93,8 @@ func New(cfg *config.Config, domain domain.Domain, addressManager *addressmanage
|
||||
transactionIDsToPropagate: []*externalapi.DomainTransactionID{},
|
||||
lastTransactionIDPropagationTime: time.Now(),
|
||||
shutdownChan: make(chan struct{}),
|
||||
expectedDAAWindowDurationInMilliseconds: cfg.NetParams().TargetTimePerBlock.Milliseconds() *
|
||||
int64(cfg.NetParams().DifficultyAdjustmentWindowSize),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,6 +119,11 @@ func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlo
|
||||
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
|
||||
}
|
||||
|
||||
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplateHandler handler
|
||||
func (f *FlowContext) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler OnNewBlockTemplateHandler) {
|
||||
f.onNewBlockTemplateHandler = onNewBlockTemplateHandler
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler
|
||||
func (f *FlowContext) SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler) {
|
||||
f.onPruningPointUTXOSetOverrideHandler = onPruningPointUTXOSetOverrideHandler
|
||||
|
||||
@@ -2,21 +2,12 @@ package flowcontext
|
||||
|
||||
import "github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
const (
|
||||
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
|
||||
)
|
||||
|
||||
// ShouldMine returns whether it's ok to use block template from this node
|
||||
// for mining purposes.
|
||||
func (f *FlowContext) ShouldMine() (bool, error) {
|
||||
// IsNearlySynced returns whether this node is considered synced or close to being synced. This info
|
||||
// is used to determine if it's ok to use a block template from this node for mining purposes.
|
||||
func (f *FlowContext) IsNearlySynced() (bool, error) {
|
||||
peers := f.Peers()
|
||||
if len(peers) == 0 {
|
||||
log.Debugf("The node is not connected, so ShouldMine returns false")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if f.IsIBDRunning() {
|
||||
log.Debugf("IBD is running, so ShouldMine returns false")
|
||||
log.Debugf("The node is not connected to peers, so IsNearlySynced returns false")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -35,13 +26,15 @@ func (f *FlowContext) ShouldMine() (bool, error) {
|
||||
}
|
||||
|
||||
now := mstime.Now().UnixMilliseconds()
|
||||
if now-virtualSelectedParentHeader.TimeInMilliseconds() < maxSelectedParentTimeDiffToAllowMiningInMilliSeconds {
|
||||
log.Debugf("The selected tip timestamp is recent (%d), so ShouldMine returns true",
|
||||
// As a heuristic, we allow the node to mine if he is likely to be within the current DAA window of fully synced nodes.
|
||||
// Such blocks contribute to security by maintaining the current difficulty despite possibly being slightly out of sync.
|
||||
if now-virtualSelectedParentHeader.TimeInMilliseconds() < f.expectedDAAWindowDurationInMilliseconds {
|
||||
log.Debugf("The selected tip timestamp is recent (%d), so IsNearlySynced returns true",
|
||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log.Debugf("The selected tip timestamp is old (%d), so ShouldMine returns false",
|
||||
log.Debugf("The selected tip timestamp is old (%d), so IsNearlySynced returns false",
|
||||
virtualSelectedParentHeader.TimeInMilliseconds())
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -18,9 +18,9 @@ var (
|
||||
|
||||
// minAcceptableProtocolVersion is the lowest protocol version that a
|
||||
// connected peer may support.
|
||||
minAcceptableProtocolVersion = uint32(4)
|
||||
minAcceptableProtocolVersion = uint32(5)
|
||||
|
||||
maxAcceptableProtocolVersion = uint32(4)
|
||||
maxAcceptableProtocolVersion = uint32(5)
|
||||
)
|
||||
|
||||
type receiveVersionFlow struct {
|
||||
|
||||
16
app/protocol/flows/v5/blockrelay/batch_size_test.go
Normal file
16
app/protocol/flows/v5/blockrelay/batch_size_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) {
|
||||
// The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want
|
||||
// to set it to `router.DefaultMaxMessages` to avoid confusion and human errors.
|
||||
// However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages`
|
||||
if ibdBatchSize >= router.DefaultMaxMessages {
|
||||
t.Fatalf("IBD batch size (%d) must be smaller than router.DefaultMaxMessages (%d)",
|
||||
ibdBatchSize, router.DefaultMaxMessages)
|
||||
}
|
||||
}
|
||||
@@ -21,7 +21,7 @@ func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*ex
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||
case *appmessage.MsgBlockLocator:
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
@@ -33,7 +33,7 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
return protocolerrors.Errorf(true, "block %s not found (v5)", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
@@ -0,0 +1,85 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RequestIBDChainBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
|
||||
type RequestIBDChainBlockLocatorContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestIBDChainBlockLocatorFlow struct {
|
||||
RequestIBDChainBlockLocatorContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestIBDChainBlockLocator handles getBlockLocator messages
|
||||
func HandleRequestIBDChainBlockLocator(context RequestIBDChainBlockLocatorContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route) error {
|
||||
|
||||
flow := &handleRequestIBDChainBlockLocatorFlow{
|
||||
RequestIBDChainBlockLocatorContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) start() error {
|
||||
for {
|
||||
highHash, lowHash, err := flow.receiveRequestIBDChainBlockLocator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received getIBDChainBlockLocator with highHash: %s, lowHash: %s", highHash, lowHash)
|
||||
|
||||
var locator externalapi.BlockLocator
|
||||
if highHash == nil || lowHash == nil {
|
||||
locator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
} else {
|
||||
locator, err = flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||
// The chain has been modified, signal it by sending an empty locator
|
||||
locator, err = externalapi.BlockLocator{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Debugf("Received error from CreateHeadersSelectedChainBlockLocator: %s", err)
|
||||
return protocolerrors.Errorf(true, "couldn't build a block "+
|
||||
"locator between %s and %s", lowHash, highHash)
|
||||
}
|
||||
|
||||
err = flow.sendIBDChainBlockLocator(locator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) receiveRequestIBDChainBlockLocator() (highHash, lowHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
msgGetBlockLocator := message.(*appmessage.MsgRequestIBDChainBlockLocator)
|
||||
|
||||
return msgGetBlockLocator.HighHash, msgGetBlockLocator.LowHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestIBDChainBlockLocatorFlow) sendIBDChainBlockLocator(locator externalapi.BlockLocator) error {
|
||||
msgIBDChainBlockLocator := appmessage.NewMsgIBDChainBlockLocator(locator)
|
||||
err := flow.outgoingRoute.Enqueue(msgIBDChainBlockLocator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -118,7 +118,7 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone
|
||||
return err
|
||||
}
|
||||
|
||||
for _, blockHash := range pointAndItsAnticone {
|
||||
for i, blockHash := range pointAndItsAnticone {
|
||||
block, err := context.Domain().Consensus().GetBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -128,6 +128,19 @@ func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticone
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if (i+1)%ibdBatchSize == 0 {
|
||||
// No timeout here, as we don't care if the syncee takes its time computing,
|
||||
// since it only blocks this dedicated flow
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := message.(*appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks); !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks, message.Command())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
|
||||
@@ -7,9 +7,11 @@ import (
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
@@ -26,6 +28,7 @@ type RelayInvsContext interface {
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnNewBlockTemplate() error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks
|
||||
Broadcast(message appmessage.Message) error
|
||||
@@ -34,13 +37,19 @@ type RelayInvsContext interface {
|
||||
IsOrphan(blockHash *externalapi.DomainHash) bool
|
||||
IsIBDRunning() bool
|
||||
IsRecoverableError(err error) bool
|
||||
IsNearlySynced() (bool, error)
|
||||
}
|
||||
|
||||
type invRelayBlock struct {
|
||||
Hash *externalapi.DomainHash
|
||||
IsOrphanRoot bool
|
||||
}
|
||||
|
||||
type handleRelayInvsFlow struct {
|
||||
RelayInvsContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
invsQueue []*appmessage.MsgInvRelayBlock
|
||||
invsQueue []invRelayBlock
|
||||
}
|
||||
|
||||
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
|
||||
@@ -53,7 +62,7 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
|
||||
invsQueue: make([]invRelayBlock, 0),
|
||||
}
|
||||
err := flow.start()
|
||||
// Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now
|
||||
@@ -104,10 +113,16 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
// Block relay is disabled during IBD
|
||||
// Block relay is disabled if the node is already during IBD AND considered out of sync
|
||||
if flow.IsIBDRunning() {
|
||||
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
|
||||
continue
|
||||
isNearlySynced, err := flow.IsNearlySynced()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isNearlySynced {
|
||||
log.Debugf("Got block %s while in IBD and the node is out of sync. Continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Requesting block %s", inv.Hash)
|
||||
@@ -130,7 +145,35 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
// Note we do not apply the heuristic below if inv was queued as an orphan root, since
|
||||
// that means the process started by a proper and relevant relay block
|
||||
if !inv.IsOrphanRoot {
|
||||
// Check bounded merge depth to avoid requesting irrelevant data which cannot be merged under virtual
|
||||
virtualMergeDepthRoot, err := flow.Domain().Consensus().VirtualMergeDepthRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !virtualMergeDepthRoot.Equal(model.VirtualGenesisBlockHash) {
|
||||
mergeDepthRootHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualMergeDepthRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Since `BlueWork` respects topology, this condition means that the relay
|
||||
// block is not in the future of virtual's merge depth root, and thus cannot be merged unless
|
||||
// other valid blocks Kosherize it, in which case it will be obtained once the merger is relayed
|
||||
if block.Header.BlueWork().Cmp(mergeDepthRootHeader.BlueWork()) <= 0 {
|
||||
log.Debugf("Block %s has lower blue work than virtual's merge root %s (%d <= %d), hence we are skipping it",
|
||||
inv.Hash, virtualMergeDepthRoot, block.Header.BlueWork(), mergeDepthRootHeader.BlueWork())
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
missingParents, virtualChangeSet, err := flow.processBlock(block)
|
||||
if err != nil {
|
||||
if errors.Is(err, ruleerrors.ErrPrunedBlock) {
|
||||
@@ -153,11 +196,42 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Relaying block %s", inv.Hash)
|
||||
err = flow.relayBlock(block)
|
||||
oldVirtualParents := hashset.New()
|
||||
for _, parent := range oldVirtualInfo.ParentHashes {
|
||||
oldVirtualParents.Add(parent)
|
||||
}
|
||||
|
||||
newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualHasNewParents := false
|
||||
for _, parent := range newVirtualInfo.ParentHashes {
|
||||
if oldVirtualParents.Contains(parent) {
|
||||
continue
|
||||
}
|
||||
virtualHasNewParents = true
|
||||
block, err := flow.Domain().Consensus().GetBlock(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("Relaying block %s", blockHash)
|
||||
err = flow.relayBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if virtualHasNewParents {
|
||||
log.Debugf("Virtual %d has new parents, raising new block template event", newVirtualInfo.DAAScore)
|
||||
err = flow.OnNewBlockTemplate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
||||
err = flow.OnNewBlock(block, virtualChangeSet)
|
||||
if err != nil {
|
||||
@@ -175,24 +249,24 @@ func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.Domai
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
|
||||
func (flow *handleRelayInvsFlow) readInv() (invRelayBlock, error) {
|
||||
if len(flow.invsQueue) > 0 {
|
||||
var inv *appmessage.MsgInvRelayBlock
|
||||
var inv invRelayBlock
|
||||
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
msg, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return invRelayBlock{}, err
|
||||
}
|
||||
|
||||
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||
msgInv, ok := msg.(*appmessage.MsgInvRelayBlock)
|
||||
if !ok {
|
||||
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||
return invRelayBlock{}, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
|
||||
"expecting an inv message", msg.Command())
|
||||
}
|
||||
return inv, nil
|
||||
return invRelayBlock{Hash: msgInv.Hash, IsOrphanRoot: false}, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
||||
@@ -237,7 +311,7 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgInvRelayBlock:
|
||||
flow.invsQueue = append(flow.invsQueue, message)
|
||||
flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false})
|
||||
case *appmessage.MsgBlock:
|
||||
return message, nil
|
||||
default:
|
||||
@@ -258,7 +332,10 @@ func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([
|
||||
if errors.As(err, missingParentsError) {
|
||||
return missingParentsError.MissingParentHashes, nil, nil
|
||||
}
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
// A duplicate block should not appear to the user as a warning and is already reported in the calling function
|
||||
if !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
}
|
||||
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
}
|
||||
return nil, virtualChangeSet, nil
|
||||
@@ -369,12 +446,16 @@ func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.Domai
|
||||
"probably happened because it was randomly evicted immediately after it was added.", orphan)
|
||||
}
|
||||
|
||||
if len(orphanRoots) == 0 {
|
||||
// In some rare cases we get here when there are no orphan roots already
|
||||
return nil
|
||||
}
|
||||
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
|
||||
|
||||
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
|
||||
invMessages := make([]invRelayBlock, len(orphanRoots))
|
||||
for i, root := range orphanRoots {
|
||||
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
|
||||
invMessages[i] = appmessage.NewMsgInvBlock(root)
|
||||
invMessages[i] = invRelayBlock{Hash: root, IsOrphanRoot: true}
|
||||
}
|
||||
|
||||
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
||||
95
app/protocol/flows/v5/blockrelay/handle_request_anticone.go
Normal file
95
app/protocol/flows/v5/blockrelay/handle_request_anticone.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// RequestAnticoneContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestAnticoneContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
}
|
||||
|
||||
type handleRequestAnticoneFlow struct {
|
||||
RequestAnticoneContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peer.Peer
|
||||
}
|
||||
|
||||
// HandleRequestAnticone handles RequestAnticone messages
|
||||
func HandleRequestAnticone(context RequestAnticoneContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peer.Peer) error {
|
||||
|
||||
flow := &handleRequestAnticoneFlow{
|
||||
RequestAnticoneContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestAnticoneFlow) start() error {
|
||||
for {
|
||||
blockHash, contextHash, err := receiveRequestAnticone(flow.incomingRoute)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Received requestAnticone with blockHash: %s, contextHash: %s", blockHash, contextHash)
|
||||
log.Debugf("Getting past(%s) cap anticone(%s) for peer %s", contextHash, blockHash, flow.peer)
|
||||
|
||||
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
|
||||
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
|
||||
// we relay blocks only if they enter virtual's mergeset. We add 2 for a small margin error.
|
||||
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
|
||||
flow.Config().ActiveNetParams.MergeSetSizeLimit+2)
|
||||
if err != nil {
|
||||
return protocolerrors.Wrap(true, err, "Failed querying anticone")
|
||||
}
|
||||
log.Debugf("Got %d header hashes in past(%s) cap anticone(%s)", len(blockHashes), contextHash, blockHash)
|
||||
|
||||
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
|
||||
}
|
||||
|
||||
// We sort the headers in bottom-up topological order before sending
|
||||
sort.Slice(blockHeaders, func(i, j int) bool {
|
||||
return blockHeaders[i].BlueWork.Cmp(blockHeaders[j].BlueWork) < 0
|
||||
})
|
||||
|
||||
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
|
||||
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func receiveRequestAnticone(incomingRoute *router.Route) (blockHash *externalapi.DomainHash,
|
||||
contextHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
msgRequestAnticone := message.(*appmessage.MsgRequestAnticone)
|
||||
|
||||
return msgRequestAnticone.BlockHash, msgRequestAnticone.ContextHash, nil
|
||||
}
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
const ibdBatchSize = router.DefaultMaxMessages
|
||||
// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p
|
||||
// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well.
|
||||
const ibdBatchSize = 99
|
||||
|
||||
// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow.
|
||||
type RequestHeadersContext interface {
|
||||
@@ -42,7 +44,16 @@ func (flow *handleRequestHeadersFlow) start() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
|
||||
|
||||
isLowSelectedAncestorOfHigh, err := flow.Domain().Consensus().IsInSelectedParentChainOf(lowHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isLowSelectedAncestorOfHigh {
|
||||
return protocolerrors.Errorf(true, "Expected %s to be on the selected chain of %s",
|
||||
lowHash, highHash)
|
||||
}
|
||||
|
||||
for !lowHash.Equal(highHash) {
|
||||
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
@@ -23,6 +22,7 @@ type IBDContext interface {
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock, virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error
|
||||
OnNewBlockTemplate() error
|
||||
OnPruningPointUTXOSetOverride() error
|
||||
IsIBDRunning() bool
|
||||
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
|
||||
@@ -76,17 +76,19 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
flow.logIBDFinished(isFinishedSuccessfully)
|
||||
}()
|
||||
|
||||
highHash := consensushashing.BlockHash(block)
|
||||
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
log.Debugf("Syncing blocks up to %s", highHash)
|
||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, highestSharedBlockFound, err := flow.findHighestSharedBlockHash(highHash)
|
||||
relayBlockHash := consensushashing.BlockHash(block)
|
||||
|
||||
log.Debugf("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash)
|
||||
log.Debugf("Syncing blocks up to %s", relayBlockHash)
|
||||
log.Debugf("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash)
|
||||
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(block, highestSharedBlockFound)
|
||||
shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof(
|
||||
block, highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -97,7 +99,7 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
|
||||
if shouldDownloadHeadersProof {
|
||||
log.Infof("Starting IBD with headers proof")
|
||||
err := flow.ibdWithHeadersProof(highHash, block.Header.DAAScore())
|
||||
err := flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -110,27 +112,162 @@ func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) er
|
||||
|
||||
if isGenesisVirtualSelectedParent {
|
||||
log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+
|
||||
"to the recent pruning point before normal operation can resume.", highHash)
|
||||
"to the recent pruning point before normal operation can resume.", relayBlockHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().Consensus(), highestSharedBlockHash, highHash, block.Header.DAAScore())
|
||||
err = flow.syncPruningPointFutureHeaders(
|
||||
flow.Domain().Consensus(),
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash, block.Header.DAAScore())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = flow.syncMissingBlockBodies(highHash)
|
||||
// We start by syncing missing bodies over the syncer selected chain
|
||||
err = flow.syncMissingBlockBodies(syncerHeaderSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relayBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Relay block might be in the anticone of syncer selected tip, thus
|
||||
// check his chain for missing bodies as well.
|
||||
// Note: this operation can be slightly optimized to avoid the full chain search since relay block
|
||||
// is in syncer virtual mergeset which has bounded size.
|
||||
if relayBlockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
err = flow.syncMissingBlockBodies(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Finished syncing blocks up to %s", highHash)
|
||||
log.Debugf("Finished syncing blocks up to %s", relayBlockHash)
|
||||
isFinishedSuccessfully = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.DomainHash, *externalapi.DomainHash, error) {
|
||||
/*
|
||||
Algorithm:
|
||||
Request full selected chain block locator from syncer
|
||||
Find the highest block which we know
|
||||
Repeat the locator step over the new range until finding max(past(syncee) \cap chain(syncer))
|
||||
*/
|
||||
|
||||
// Empty hashes indicate that the full chain is queried
|
||||
locatorHashes, err := flow.getSyncerChainBlockLocator(nil, nil, common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
log.Debugf("IBD chain negotiation with peer %s started and received %d hashes (%s, %s)", flow.peer,
|
||||
len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
syncerHeaderSelectedTipHash := locatorHashes[0]
|
||||
var highestKnownSyncerChainHash *externalapi.DomainHash
|
||||
chainNegotiationRestartCounter := 0
|
||||
chainNegotiationZoomCounts := 0
|
||||
initialLocatorLen := len(locatorHashes)
|
||||
for {
|
||||
var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash
|
||||
for _, syncerChainHash := range locatorHashes {
|
||||
info, err := flow.Domain().Consensus().GetBlockInfo(syncerChainHash)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if info.Exists {
|
||||
currentHighestKnownSyncerChainHash = syncerChainHash
|
||||
break
|
||||
}
|
||||
lowestUnknownSyncerChainHash = syncerChainHash
|
||||
}
|
||||
// No unknown blocks, break. Note this can only happen in the first iteration
|
||||
if lowestUnknownSyncerChainHash == nil {
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
// No shared block, break
|
||||
if currentHighestKnownSyncerChainHash == nil {
|
||||
highestKnownSyncerChainHash = nil
|
||||
break
|
||||
}
|
||||
// No point in zooming further
|
||||
if len(locatorHashes) == 1 {
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
// Zoom in
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(
|
||||
lowestUnknownSyncerChainHash,
|
||||
currentHighestKnownSyncerChainHash, time.Second*10)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) > 0 {
|
||||
if !locatorHashes[0].Equal(lowestUnknownSyncerChainHash) ||
|
||||
!locatorHashes[len(locatorHashes)-1].Equal(currentHighestKnownSyncerChainHash) {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting the high and low "+
|
||||
"hashes to match the locator bounds")
|
||||
}
|
||||
|
||||
chainNegotiationZoomCounts++
|
||||
log.Debugf("IBD chain negotiation with peer %s zoomed in (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||
chainNegotiationZoomCounts, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
|
||||
if len(locatorHashes) == 2 {
|
||||
// We found our search target
|
||||
highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash
|
||||
break
|
||||
}
|
||||
|
||||
if chainNegotiationZoomCounts > initialLocatorLen*2 {
|
||||
// Since the zoom-in always queries two consecutive entries in the previous locator, it is
|
||||
// expected to decrease in size at least every two iterations
|
||||
return nil, nil, protocolerrors.Errorf(true,
|
||||
"IBD chain negotiation: Number of zoom-in steps %d exceeded the upper bound of 2*%d",
|
||||
chainNegotiationZoomCounts, initialLocatorLen)
|
||||
}
|
||||
|
||||
} else { // Empty locator signals a restart due to chain changes
|
||||
chainNegotiationZoomCounts = 0
|
||||
chainNegotiationRestartCounter++
|
||||
if chainNegotiationRestartCounter > 32 {
|
||||
return nil, nil, protocolerrors.Errorf(false,
|
||||
"IBD chain negotiation with syncer %s exceeded restart limit %d", flow.peer, chainNegotiationRestartCounter)
|
||||
}
|
||||
log.Warnf("IBD chain negotiation with syncer %s restarted %d times", flow.peer, chainNegotiationRestartCounter)
|
||||
|
||||
// An empty locator signals that the syncer chain was modified and no longer contains one of
|
||||
// the queried hashes, so we restart the search. We use a shorter timeout here to avoid a timeout attack
|
||||
locatorHashes, err = flow.getSyncerChainBlockLocator(nil, nil, time.Second*10)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(locatorHashes) == 0 {
|
||||
return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+
|
||||
"to contain at least one element")
|
||||
}
|
||||
log.Infof("IBD chain negotiation with peer %s restarted (%d) and received %d hashes (%s, %s)", flow.peer,
|
||||
chainNegotiationRestartCounter, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1])
|
||||
|
||||
initialLocatorLen = len(locatorHashes)
|
||||
// Reset syncer's header selected tip
|
||||
syncerHeaderSelectedTipHash = locatorHashes[0]
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Found highest known syncer chain block %s from peer %s",
|
||||
highestKnownSyncerChainHash, flow.peer)
|
||||
|
||||
return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
@@ -145,141 +282,57 @@ func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool) {
|
||||
if !isFinishedSuccessfully {
|
||||
successString = "(interrupted)"
|
||||
}
|
||||
log.Infof("IBD finished %s", successString)
|
||||
log.Infof("IBD with peer %s finished %s", flow.peer, successString)
|
||||
}
|
||||
|
||||
// findHighestSharedBlock attempts to find the highest shared block between the peer
|
||||
// and this node. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleIBDFlow) findHighestSharedBlockHash(
|
||||
targetHash *externalapi.DomainHash) (*externalapi.DomainHash, bool, error) {
|
||||
func (flow *handleIBDFlow) getSyncerChainBlockLocator(
|
||||
highHash, lowHash *externalapi.DomainHash, timeout time.Duration) ([]*externalapi.DomainHash, error) {
|
||||
|
||||
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
requestIbdChainBlockLocatorMessage := appmessage.NewMsgIBDRequestChainBlockLocator(highHash, lowHash)
|
||||
err := flow.outgoingRoute.Enqueue(requestIbdChainBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
highestHash, highestHashFound, err := flow.fetchHighestHash(targetHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !highestHashFound {
|
||||
return nil, false, nil
|
||||
}
|
||||
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if highestHashIndex == 0 ||
|
||||
// If the block locator contains only two adjacent chain blocks, the
|
||||
// syncer will always find the same highest chain block, so to avoid
|
||||
// an endless loop, we explicitly stop the loop in such situation.
|
||||
(len(blockLocator) == 2 && highestHashIndex == 1) {
|
||||
|
||||
return highestHash, true, nil
|
||||
}
|
||||
|
||||
locatorHashAboveHighestHash := highestHash
|
||||
if highestHashIndex > 0 {
|
||||
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
|
||||
}
|
||||
|
||||
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
|
||||
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
|
||||
if err != nil {
|
||||
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
|
||||
"restarting with full block locator")
|
||||
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return blockLocator, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) findHighestHashIndex(
|
||||
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
|
||||
|
||||
highestHashIndex := 0
|
||||
highestHashIndexFound := false
|
||||
for i, blockLocatorHash := range blockLocator {
|
||||
if highestHash.Equal(blockLocatorHash) {
|
||||
highestHashIndex = i
|
||||
highestHashIndexFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !highestHashIndexFound {
|
||||
return 0, protocolerrors.Errorf(true, "highest hash %s "+
|
||||
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
|
||||
}
|
||||
log.Debugf("The index of the highest hash in the original "+
|
||||
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
|
||||
|
||||
return highestHashIndex, nil
|
||||
}
|
||||
|
||||
// fetchHighestHash attempts to fetch the highest hash the peer knows amongst the given
|
||||
// blockLocator. This method may fail because the peer and us have conflicting pruning
|
||||
// points. In that case we return (nil, false, nil) so that we may stop IBD gracefully.
|
||||
func (flow *handleIBDFlow) fetchHighestHash(
|
||||
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, bool, error) {
|
||||
|
||||
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
|
||||
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHash:
|
||||
highestHash := message.HighestHash
|
||||
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
|
||||
|
||||
return highestHash, true, nil
|
||||
case *appmessage.MsgIBDBlockLocatorHighestHashNotFound:
|
||||
log.Debugf("Peer %s does not know any block within our blockLocator. "+
|
||||
"This should only happen if there's a DAG split deeper than the pruning point.", flow.peer)
|
||||
return nil, false, nil
|
||||
case *appmessage.MsgIBDChainBlockLocator:
|
||||
if len(message.BlockLocatorHashes) > 64 {
|
||||
return nil, protocolerrors.Errorf(true,
|
||||
"Got block locator of size %d>64 while expecting locator to have size "+
|
||||
"which is logarithmic in DAG size (which should never exceed 2^64)",
|
||||
len(message.BlockLocatorHashes))
|
||||
}
|
||||
return message.BlockLocatorHashes, nil
|
||||
default:
|
||||
return nil, false, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
|
||||
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDChainBlockLocator, message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, highestSharedBlockHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus,
|
||||
syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash *externalapi.DomainHash,
|
||||
highBlockDAAScoreHint uint64) error {
|
||||
|
||||
log.Infof("Downloading headers from %s", flow.peer)
|
||||
|
||||
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
|
||||
if highestKnownSyncerChainHash.Equal(syncerHeaderSelectedTipHash) {
|
||||
// No need to get syncer selected tip headers, so sync relay past and return
|
||||
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
}
|
||||
|
||||
err := flow.sendRequestHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestSharedBlockHash)
|
||||
highestSharedBlockHeader, err := consensus.GetBlockHeader(highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScore, "block headers")
|
||||
progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScoreHint, "block headers")
|
||||
|
||||
// Keep a short queue of BlockHeadersMessages so that there's
|
||||
// never a moment when the node is not validating and inserting
|
||||
@@ -297,6 +350,11 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
close(blockHeadersMessageChan)
|
||||
return
|
||||
}
|
||||
if len(blockHeadersMessage.BlockHeaders) == 0 {
|
||||
// The syncer should have sent a done message if the search completed, and not an empty list
|
||||
errChan <- protocolerrors.Errorf(true, "Received an empty headers message from peer %s", flow.peer)
|
||||
return
|
||||
}
|
||||
|
||||
blockHeadersMessageChan <- blockHeadersMessage
|
||||
|
||||
@@ -312,16 +370,7 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
select {
|
||||
case ibdBlocksMessage, ok := <-blockHeadersMessageChan:
|
||||
if !ok {
|
||||
// If the highHash has not been received, the peer is misbehaving
|
||||
highHashBlockInfo, err := consensus.GetBlockInfo(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !highHashBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"highHash block %s from peer %s during block download", highHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
}
|
||||
for _, header := range ibdBlocksMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
@@ -338,11 +387,70 @@ func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.C
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
||||
func (flow *handleIBDFlow) syncMissingRelayPast(consensus externalapi.Consensus, syncerHeaderSelectedTipHash *externalapi.DomainHash, relayBlockHash *externalapi.DomainHash) error {
|
||||
// Finished downloading syncer selected tip blocks,
|
||||
// check if we already have the triggering relayBlockHash
|
||||
relayBlockInfo, err := consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
// Send a special header request for the selected tip anticone. This is expected to
|
||||
// be a small set, as it is bounded to the size of virtual's mergeset.
|
||||
err = flow.sendRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
anticoneHeadersMessage, anticoneDone, err := flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected one anticone header chunk for past(%s) cap anticone(%s) but got zero",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
_, anticoneDone, err = flow.receiveHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !anticoneDone {
|
||||
return protocolerrors.Errorf(true,
|
||||
"Expected only one anticone header chunk for past(%s) cap anticone(%s)",
|
||||
relayBlockHash, syncerHeaderSelectedTipHash)
|
||||
}
|
||||
for _, header := range anticoneHeadersMessage.BlockHeaders {
|
||||
err = flow.processHeader(consensus, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
|
||||
// If the relayBlockHash has still not been received, the peer is misbehaving
|
||||
relayBlockInfo, err = consensus.GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !relayBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "did not receive "+
|
||||
"relayBlockHash block %s from peer %s during block download", relayBlockHash, flow.peer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestAnticone(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash) error {
|
||||
|
||||
msgRequestAnticone := appmessage.NewMsgRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash)
|
||||
return flow.outgoingRoute.Enqueue(msgRequestAnticone)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestHeaders(
|
||||
highestKnownSyncerChainHash, syncerHeaderSelectedTipHash *externalapi.DomainHash) error {
|
||||
|
||||
msgRequestHeaders := appmessage.NewMsgRequstHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgRequestHeaders)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) {
|
||||
@@ -459,7 +567,7 @@ func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet(
|
||||
|
||||
receivedChunkCount++
|
||||
if receivedChunkCount%ibdBatchSize == 0 {
|
||||
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
log.Infof("Received %d UTXO set chunks so far, totaling in %d UTXOs",
|
||||
receivedChunkCount, receivedUTXOCount)
|
||||
|
||||
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
|
||||
@@ -610,6 +718,10 @@ func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64)
|
||||
|
||||
if isCompletelyResolved {
|
||||
log.Infof("Resolved virtual")
|
||||
err = flow.OnNewBlockTemplate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -10,6 +10,10 @@ type ibdProgressReporter struct {
|
||||
}
|
||||
|
||||
func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter {
|
||||
if highDAAScore <= lowDAAScore {
|
||||
// Avoid a zero or negative diff
|
||||
highDAAScore = lowDAAScore + 1
|
||||
}
|
||||
return &ibdProgressReporter{
|
||||
lowDAAScore: lowDAAScore,
|
||||
highDAAScore: highDAAScore,
|
||||
@@ -23,7 +27,16 @@ func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName
|
||||
func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) {
|
||||
ipr.processed += processedDelta
|
||||
|
||||
relativeDAAScore := highestProcessedDAAScore - ipr.lowDAAScore
|
||||
// Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint
|
||||
if highestProcessedDAAScore > ipr.highDAAScore {
|
||||
ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99%
|
||||
ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
relativeDAAScore := uint64(0)
|
||||
if highestProcessedDAAScore > ipr.lowDAAScore {
|
||||
// Avoid a negative diff
|
||||
relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore
|
||||
}
|
||||
progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100)
|
||||
if progressPercent > ipr.lastReportedProgressPercent {
|
||||
log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent)
|
||||
@@ -12,18 +12,20 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
err := flow.Domain().InitStagingConsensus()
|
||||
func (flow *handleIBDFlow) ibdWithHeadersProof(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
err := flow.Domain().InitStagingConsensusWithoutGenesis()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.downloadHeadersAndPruningUTXOSet(highHash, highBlockDAAScore)
|
||||
err = flow.downloadHeadersAndPruningUTXOSet(syncerHeaderSelectedTipHash, relayBlockHash, highBlockDAAScore)
|
||||
if err != nil {
|
||||
if !flow.IsRecoverableError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus.", flow.peer)
|
||||
deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus()
|
||||
if deleteStagingConsensusErr != nil {
|
||||
return deleteStagingConsensusErr
|
||||
@@ -32,6 +34,8 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash,
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+
|
||||
"Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer)
|
||||
err = flow.Domain().CommitStagingConsensus()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -45,11 +49,29 @@ func (flow *handleIBDFlow) ibdWithHeadersProof(highHash *externalapi.DomainHash,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *externalapi.DomainBlock,
|
||||
highestSharedBlockFound bool) (shouldDownload, shouldSync bool, err error) {
|
||||
func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(
|
||||
relayBlock *externalapi.DomainBlock,
|
||||
highestKnownSyncerChainHash *externalapi.DomainHash) (shouldDownload, shouldSync bool, err error) {
|
||||
|
||||
if !highestSharedBlockFound {
|
||||
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock)
|
||||
var highestSharedBlockFound, isPruningPointInSharedBlockChain bool
|
||||
if highestKnownSyncerChainHash != nil {
|
||||
highestSharedBlockFound = true
|
||||
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
isPruningPointInSharedBlockChain, err = flow.Domain().Consensus().IsInSelectedParentChainOf(
|
||||
pruningPoint, highestKnownSyncerChainHash)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
// Note: in the case where `highestSharedBlockFound == true && isPruningPointInSharedBlockChain == false`
|
||||
// we might have here info which is relevant to finality conflict decisions. This should be taken into
|
||||
// account when we improve this aspect.
|
||||
if !highestSharedBlockFound || !isPruningPointInSharedBlockChain {
|
||||
hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
@@ -64,7 +86,7 @@ func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof(highBlock *ex
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(highBlock *externalapi.DomainBlock) (bool, error) {
|
||||
func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) {
|
||||
headersSelectedTip, err := flow.Domain().Consensus().GetHeadersSelectedTip()
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -75,11 +97,11 @@ func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruni
|
||||
return false, err
|
||||
}
|
||||
|
||||
if highBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||
if relayBlock.Header.BlueScore() < headersSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return highBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
|
||||
return relayBlock.Header.BlueWork().Cmp(headersSelectedTipInfo.BlueWork) > 0, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) {
|
||||
@@ -114,7 +136,10 @@ func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.Doma
|
||||
return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalapi.DomainHash, highBlockDAAScore uint64) error {
|
||||
func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(
|
||||
syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash,
|
||||
highBlockDAAScore uint64) error {
|
||||
|
||||
proofPruningPoint, err := flow.syncAndValidatePruningPointProof()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -131,19 +156,20 @@ func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet(highHash *externalap
|
||||
return protocolerrors.Errorf(true, "the genesis pruning point violates finality")
|
||||
}
|
||||
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), proofPruningPoint, highHash, highBlockDAAScore)
|
||||
err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(),
|
||||
syncerHeaderSelectedTipHash, proofPruningPoint, relayBlockHash, highBlockDAAScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Headers downloaded from peer %s", flow.peer)
|
||||
|
||||
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
|
||||
relayBlockInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(relayBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !highHashInfo.Exists {
|
||||
if !relayBlockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "the triggering IBD block was not sent")
|
||||
}
|
||||
|
||||
@@ -206,7 +232,8 @@ func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruning
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
i := 0
|
||||
for ; ; i++ {
|
||||
blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -220,9 +247,19 @@ func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruning
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We're using i+2 because we want to check if the next block will belong to the next batch, but we already downloaded
|
||||
// the pruning point outside the loop so we use i+2 instead of i+1.
|
||||
if (i+2)%ibdBatchSize == 0 {
|
||||
log.Infof("Downloaded %d blocks from the pruning point anticone", i+1)
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextPruningPointAndItsAnticoneBlocks())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Finished downloading pruning point and its anticone from %s", flow.peer)
|
||||
log.Infof("Finished downloading pruning point and its anticone from %s. Total blocks downloaded: %d", flow.peer, i+1)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -344,6 +381,7 @@ func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consens
|
||||
log.Info("Fetching the pruning point UTXO set")
|
||||
isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint)
|
||||
if err != nil {
|
||||
log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package ping
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
@@ -61,6 +63,9 @@ func (flow *sendPingsFlow) start() error {
|
||||
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
if errors.Is(err, router.ErrTimeout) {
|
||||
return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
pongMessage := message.(*appmessage.MsgPong)
|
||||
@@ -1,14 +1,14 @@
|
||||
package v4
|
||||
package v5
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/rejects"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/rejects"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
@@ -78,6 +78,7 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
|
||||
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints,
|
||||
appmessage.CmdPruningPointProof,
|
||||
appmessage.CmdTrustedData,
|
||||
appmessage.CmdIBDChainBlockLocator,
|
||||
},
|
||||
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBD(m.Context(), incomingRoute,
|
||||
@@ -121,7 +122,7 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone}, isStopping, errChan,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone, appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
@@ -134,6 +135,20 @@ func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStop
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestIBDChainBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDChainBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestIBDChainBlockLocator(m.Context(), incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandleRequestAnticone", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestAnticone}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestAnticone(m.Context(), incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.RegisterFlow("HandlePruningPointProofRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
@@ -1,7 +1,7 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/addressexchange"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -22,7 +22,7 @@ type TransactionsRelayContext interface {
|
||||
SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions
|
||||
OnTransactionAddedToMempool()
|
||||
EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error
|
||||
IsIBDRunning() bool
|
||||
IsNearlySynced() (bool, error)
|
||||
}
|
||||
|
||||
type handleRelayedTransactionsFlow struct {
|
||||
@@ -50,7 +50,12 @@ func (flow *handleRelayedTransactionsFlow) start() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if flow.IsIBDRunning() {
|
||||
isNearlySynced, err := flow.IsNearlySynced()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Transaction relay is disabled if the node is out of sync and thus not mining
|
||||
if !isNearlySynced {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package transactionrelay_test
|
||||
import (
|
||||
"errors"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -47,8 +47,8 @@ func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transa
|
||||
func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() {
|
||||
}
|
||||
|
||||
func (m *mocTransactionsRelayContext) IsIBDRunning() bool {
|
||||
return false
|
||||
func (m *mocTransactionsRelayContext) IsNearlySynced() (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't
|
||||
@@ -2,7 +2,7 @@ package transactionrelay_test
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v4/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5/transactionrelay"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
@@ -100,6 +100,11 @@ func (m *Manager) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler flowconte
|
||||
m.context.SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler)
|
||||
}
|
||||
|
||||
// SetOnNewBlockTemplateHandler sets the onNewBlockTemplate handler
|
||||
func (m *Manager) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler flowcontext.OnNewBlockTemplateHandler) {
|
||||
m.context.SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler)
|
||||
}
|
||||
|
||||
// SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler
|
||||
func (m *Manager) SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler flowcontext.OnPruningPointUTXOSetOverrideHandler) {
|
||||
m.context.SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler)
|
||||
@@ -113,7 +118,7 @@ func (m *Manager) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMemp
|
||||
// ShouldMine returns whether it's ok to use block template from this node
|
||||
// for mining purposes.
|
||||
func (m *Manager) ShouldMine() (bool, error) {
|
||||
return m.context.ShouldMine()
|
||||
return m.context.IsNearlySynced()
|
||||
}
|
||||
|
||||
// IsIBDRunning returns true if IBD is currently marked as running
|
||||
|
||||
@@ -3,7 +3,7 @@ package protocol
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/ready"
|
||||
v4 "github.com/kaspanet/kaspad/app/protocol/flows/v4"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/v5"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
@@ -76,8 +76,8 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
var flows []*common.Flow
|
||||
log.Infof("Registering p2p flows for peer %s for protocol version %d", peer, peer.ProtocolVersion())
|
||||
switch peer.ProtocolVersion() {
|
||||
case 4:
|
||||
flows = v4.Register(m, router, errChan, &isStopping)
|
||||
case 5:
|
||||
flows = v5.Register(m, router, errChan, &isStopping)
|
||||
default:
|
||||
panic(errors.Errorf("no way to handle protocol version %d", peer.ProtocolVersion()))
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, virtualC
|
||||
|
||||
// NotifyVirtualChange notifies the manager that the virtual block has been changed.
|
||||
func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyBlockAddedToDAG")
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualChange")
|
||||
defer onEnd()
|
||||
|
||||
if m.context.Config.UTXOIndex {
|
||||
@@ -96,6 +96,13 @@ func (m *Manager) NotifyVirtualChange(virtualChangeSet *externalapi.VirtualChang
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplate notifies the manager that a new
|
||||
// block template is available for miners
|
||||
func (m *Manager) NotifyNewBlockTemplate() error {
|
||||
notification := appmessage.NewNewBlockTemplateNotificationMessage()
|
||||
return m.context.NotificationManager.NotifyNewBlockTemplate(notification)
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index
|
||||
// resets due to pruning point change via IBD.
|
||||
func (m *Manager) NotifyPruningPointUTXOSetOverride() error {
|
||||
|
||||
@@ -48,6 +48,7 @@ var handlers = map[appmessage.MessageCommand]handler{
|
||||
appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest,
|
||||
appmessage.CmdEstimateNetworkHashesPerSecondRequestMessage: rpchandlers.HandleEstimateNetworkHashesPerSecond,
|
||||
appmessage.CmdNotifyVirtualDaaScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualDaaScoreChanged,
|
||||
appmessage.CmdNotifyNewBlockTemplateRequestMessage: rpchandlers.HandleNotifyNewBlockTemplate,
|
||||
}
|
||||
|
||||
func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) {
|
||||
|
||||
@@ -32,6 +32,7 @@ type NotificationListener struct {
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications bool
|
||||
propagateVirtualDaaScoreChangedNotifications bool
|
||||
propagatePruningPointUTXOSetOverrideNotifications bool
|
||||
propagateNewBlockTemplateNotifications bool
|
||||
|
||||
propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress
|
||||
}
|
||||
@@ -201,6 +202,25 @@ func (nm *NotificationManager) NotifyVirtualDaaScoreChanged(
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyNewBlockTemplate notifies the notification manager that a new
|
||||
// block template is available for miners
|
||||
func (nm *NotificationManager) NotifyNewBlockTemplate(
|
||||
notification *appmessage.NewBlockTemplateNotificationMessage) error {
|
||||
|
||||
nm.RLock()
|
||||
defer nm.RUnlock()
|
||||
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateNewBlockTemplateNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyPruningPointUTXOSetOverride notifies the notification manager that the UTXO index
|
||||
// reset due to pruning point change via IBD.
|
||||
func (nm *NotificationManager) NotifyPruningPointUTXOSetOverride() error {
|
||||
@@ -226,6 +246,7 @@ func newNotificationListener() *NotificationListener {
|
||||
propagateFinalityConflictResolvedNotifications: false,
|
||||
propagateUTXOsChangedNotifications: false,
|
||||
propagateVirtualSelectedParentBlueScoreChangedNotifications: false,
|
||||
propagateNewBlockTemplateNotifications: false,
|
||||
propagatePruningPointUTXOSetOverrideNotifications: false,
|
||||
}
|
||||
}
|
||||
@@ -334,6 +355,12 @@ func (nl *NotificationListener) PropagateVirtualDaaScoreChangedNotifications() {
|
||||
nl.propagateVirtualDaaScoreChangedNotifications = true
|
||||
}
|
||||
|
||||
// PropagateNewBlockTemplateNotifications instructs the listener to send
|
||||
// new block template notifications to the remote listener
|
||||
func (nl *NotificationListener) PropagateNewBlockTemplateNotifications() {
|
||||
nl.propagateNewBlockTemplateNotifications = true
|
||||
}
|
||||
|
||||
// PropagatePruningPointUTXOSetOverrideNotifications instructs the listener to send pruning point UTXO set override notifications
|
||||
// to the remote listener.
|
||||
func (nl *NotificationListener) PropagatePruningPointUTXOSetOverrideNotifications() {
|
||||
|
||||
@@ -4,9 +4,11 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionhelper"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
)
|
||||
|
||||
// HandleGetBlockTemplate handles the respectively named RPC command
|
||||
@@ -15,7 +17,7 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
|
||||
payAddress, err := util.DecodeAddress(getBlockTemplateRequest.PayAddress, context.Config.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.GetBlockResponseMessage{}
|
||||
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not decode address: %s", err)
|
||||
return errorMessage, nil
|
||||
}
|
||||
@@ -25,12 +27,18 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
return nil, err
|
||||
}
|
||||
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey}
|
||||
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version() + "/" + getBlockTemplateRequest.ExtraData)}
|
||||
templateBlock, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uint64(len(templateBlock.Transactions[transactionhelper.CoinbaseTransactionIndex].Payload)) > context.Config.NetParams().MaxCoinbasePayloadLength {
|
||||
errorMessage := &appmessage.GetBlockTemplateResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Coinbase payload is above max length (%d). Try to shorten the extra data.", context.Config.NetParams().MaxCoinbasePayloadLength)
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock)
|
||||
|
||||
isSynced, err := context.ProtocolManager.ShouldMine()
|
||||
|
||||
@@ -31,7 +31,7 @@ func (d fakeDomain) StagingConsensus() externalapi.Consensus {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (d fakeDomain) InitStagingConsensus() error {
|
||||
func (d fakeDomain) InitStagingConsensusWithoutGenesis() error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
||||
19
app/rpc/rpchandlers/notify_new_block_template.go
Normal file
19
app/rpc/rpchandlers/notify_new_block_template.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleNotifyNewBlockTemplate handles the respectively named RPC command
|
||||
func HandleNotifyNewBlockTemplate(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
listener, err := context.NotificationManager.Listener(router)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener.PropagateNewBlockTemplateNotifications()
|
||||
|
||||
response := appmessage.NewNotifyNewBlockTemplateResponseMessage()
|
||||
return response, nil
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package rpchandlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
@@ -58,6 +59,12 @@ func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request ap
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jsonBytes, _ := json.MarshalIndent(submitBlockRequest.Block.Header, "", " ")
|
||||
if jsonBytes != nil {
|
||||
log.Warnf("The RPC submitted block triggered a rule/protocol error (%s), printing "+
|
||||
"the full header for debug purposes: \n%s", err, string(jsonBytes))
|
||||
}
|
||||
|
||||
return &appmessage.SubmitBlockResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Block rejected. Reason: %s", err),
|
||||
RejectReason: appmessage.RejectReasonBlockInvalid,
|
||||
|
||||
@@ -5,9 +5,8 @@ FLAGS=$@
|
||||
go version
|
||||
|
||||
go get $FLAGS -t -d ./...
|
||||
# This is to bypass a go bug: https://github.com/golang/go/issues/27643
|
||||
GO111MODULE=off go get $FLAGS golang.org/x/lint/golint \
|
||||
honnef.co/go/tools/cmd/staticcheck
|
||||
GO111MODULE=off go get $FLAGS golang.org/x/lint/golint
|
||||
go install $FLAGS honnef.co/go/tools/cmd/staticcheck@latest
|
||||
|
||||
test -z "$(go fmt ./...)"
|
||||
|
||||
|
||||
@@ -1,3 +1,75 @@
|
||||
Kaspad v0.12.0 - 2022-04-14
|
||||
===========================
|
||||
Breaking changes:
|
||||
Hard-fork at DAA score 14687583 (estimated to be on 28/04 16:38 UTC) which includes:
|
||||
* Using separate depth than finality depth for merge set calculations (#2013)
|
||||
* Not counting the header size as part of the block mass (#2013)
|
||||
* Increasing block version to 1 (#2013)
|
||||
* Removing the limit on amount of KAS that can be sent in one transaction (#2013)
|
||||
|
||||
Bug fixes:
|
||||
* Making a workaround for the UTXO diff child bug (#2020)
|
||||
* Use cosigner index 0 for read only wallets (#2014)
|
||||
|
||||
Non-breaking changes:
|
||||
* Adding a "sweep" command to `kaspawallet` (#2018)
|
||||
* Use `blue work` heuristic to skip irrelevant relay blocks
|
||||
* Kaspawallet daemon: Add Send and Sign commands (#2016)
|
||||
|
||||
Kaspad v0.11.17 - 2022-04-06
|
||||
===========================
|
||||
* Decrement estimatedHeaderUpperBound from mempool's MaxBlockMass (#2009)
|
||||
|
||||
Kaspad v0.11.16 - 2022-04-05
|
||||
===========================
|
||||
* Don't skip wallet address with different cosigner index (#2007)
|
||||
|
||||
Kaspad v0.11.15 - 2022-04-05
|
||||
===========================
|
||||
* Add support for auto-compound in `kaspawallet send` (#1951)
|
||||
* Unite reachability stores (#1963, #1993, #2001)
|
||||
* Add names to nameless routes (#1986)
|
||||
* Optimize the miner-kaspad flow and latency (#1988)
|
||||
* Upgrade to go 1.18 (#1992)
|
||||
* Add package name to kaspawalletd .proto file (#1991)
|
||||
* Block template cache (#1994)
|
||||
* Add extra data to GetBlockTemplate request (#1995, #1997)
|
||||
* New definition for "out of sync" (#1996)
|
||||
* Remove v4 p2p version (#1998)
|
||||
* Remove increase pagefile from deploy.yaml (#2000)
|
||||
* Cache the pruning point anticone (#2002)
|
||||
* Add DB compaction after the deletion of a DB prefix (#2003)
|
||||
* Fixed a bug in staging of pruning point by index (#2005)
|
||||
* Clean up debug log level by moving many frequent logs to trace level (#2004)
|
||||
|
||||
Kaspad v0.11.14 - 2022-03-20
|
||||
===========================
|
||||
* Fix a bug in the new p2p v5 IBD chain negotiation (#1981)
|
||||
|
||||
Kaspad v0.11.13 - 2022-03-16
|
||||
===========================
|
||||
* Display progress of IBD process in Kaspad logs (#1938, #1939, #1949, #1977)
|
||||
* Optimize DB writes during fresh IBD (#1937)
|
||||
* Add AllowConnectionToDifferentVersions flag to kaspactl (#1940)
|
||||
* Drop support for p2p v3 (#1942)
|
||||
* Various transaction processing fixes and workarounds (#1943, #1946, #1971, #1974)
|
||||
* Make kaspawallet store the utxos sorted by amount (#1947)
|
||||
* Implement a `parse` sub command in the kaspawallet (#1953)
|
||||
* Set MaxBlockLevels for non-mainnet networks to 250 (#1952)
|
||||
* Add cache to DAA block window (#1948)
|
||||
* kaspactl: string slice parser for GetUtxosByAddresses (#1955, first contribution by @icook)
|
||||
* Add MergeSet and IsChainBlock to RPC (#1961)
|
||||
* Ignore transaction invs during IBD (#1960)
|
||||
* Optimize validation of expected header pruning point (#1962)
|
||||
* Fix a bug in bounded marge depth validation (#1966)
|
||||
* Don't relay blocks in virtual anticone (#1970)
|
||||
* Add version to block template to allow tracking of miner's kaspad version (#1967)
|
||||
* New p2p version: v5 (#1969)
|
||||
* Fix IBD shared past negotiation to be non quadratic also in the worst-case (#1969, p2p v5)
|
||||
* Send pruning point anticone in batches (#1973, p2p v5)
|
||||
* Cleanup log output mistakes and try to be more clear to the user (#1976, #1978)
|
||||
* Apply avoiding IBD logic from patch10 to p2p v4 IBD handling (#1979)
|
||||
|
||||
Kaspad v0.11.11 - 2022-01-27
|
||||
===========================
|
||||
* Fix for rare consensus bug regarding DAA window order. The bug only affected IBD from scratch and only today (#1934)
|
||||
|
||||
@@ -4,7 +4,7 @@ kaspactl is an RPC client for kaspad
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.18 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.16-alpine AS build
|
||||
FROM golang:1.18-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ Kaspaminer is a CPU-based miner for kaspad
|
||||
|
||||
## Requirements
|
||||
|
||||
Go 1.16 or later.
|
||||
Go 1.18 or later.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@ const minerTimeout = 10 * time.Second
|
||||
type minerClient struct {
|
||||
*rpcclient.RPCClient
|
||||
|
||||
cfg *configFlags
|
||||
blockAddedNotificationChan chan struct{}
|
||||
cfg *configFlags
|
||||
newBlockTemplateNotificationChan chan struct{}
|
||||
}
|
||||
|
||||
func (mc *minerClient) connect() error {
|
||||
@@ -30,14 +30,14 @@ func (mc *minerClient) connect() error {
|
||||
mc.SetTimeout(minerTimeout)
|
||||
mc.SetLogger(backendLog, logger.LevelTrace)
|
||||
|
||||
err = mc.RegisterForBlockAddedNotifications(func(_ *appmessage.BlockAddedNotificationMessage) {
|
||||
err = mc.RegisterForNewBlockTemplateNotifications(func(_ *appmessage.NewBlockTemplateNotificationMessage) {
|
||||
select {
|
||||
case mc.blockAddedNotificationChan <- struct{}{}:
|
||||
case mc.newBlockTemplateNotificationChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error requesting block-added notifications")
|
||||
return errors.Wrapf(err, "error requesting new-block-template notifications")
|
||||
}
|
||||
|
||||
log.Infof("Connected to %s", rpcAddress)
|
||||
@@ -47,8 +47,8 @@ func (mc *minerClient) connect() error {
|
||||
|
||||
func newMinerClient(cfg *configFlags) (*minerClient, error) {
|
||||
minerClient := &minerClient{
|
||||
cfg: cfg,
|
||||
blockAddedNotificationChan: make(chan struct{}),
|
||||
cfg: cfg,
|
||||
newBlockTemplateNotificationChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
err := minerClient.connect()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.16-alpine AS build
|
||||
FROM golang:1.18-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
nativeerrors "errors"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -187,7 +188,7 @@ func getBlockForMining(mineWhenNotSynced bool) (*externalapi.DomainBlock, *pow.S
|
||||
|
||||
func templatesLoop(client *minerClient, miningAddr util.Address, errChan chan error) {
|
||||
getBlockTemplate := func() {
|
||||
template, err := client.GetBlockTemplate(miningAddr.String())
|
||||
template, err := client.GetBlockTemplate(miningAddr.String(), "kaspaminer-"+version.Version())
|
||||
if nativeerrors.Is(err, router.ErrTimeout) {
|
||||
log.Warnf("Got timeout while requesting block template from %s: %s", client.Address(), err)
|
||||
reconnectErr := client.Reconnect()
|
||||
@@ -217,7 +218,7 @@ func templatesLoop(client *minerClient, miningAddr util.Address, errChan chan er
|
||||
ticker := time.NewTicker(tickerTime)
|
||||
for {
|
||||
select {
|
||||
case <-client.blockAddedNotificationChan:
|
||||
case <-client.newBlockTemplateNotificationChan:
|
||||
getBlockTemplate()
|
||||
ticker.Reset(tickerTime)
|
||||
case <-ticker.C:
|
||||
|
||||
@@ -3,19 +3,12 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/client"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/utils"
|
||||
)
|
||||
|
||||
func formatKas(amount uint64) string {
|
||||
res := " "
|
||||
if amount > 0 {
|
||||
res = fmt.Sprintf("%19.8f", float64(amount)/constants.SompiPerKaspa)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func balance(conf *balanceConfig) error {
|
||||
daemonClient, tearDown, err := client.Connect(conf.DaemonAddress)
|
||||
if err != nil {
|
||||
@@ -39,12 +32,12 @@ func balance(conf *balanceConfig) error {
|
||||
println("Address Available Pending")
|
||||
println("-----------------------------------------------------------------------------------------------------------")
|
||||
for _, addressBalance := range response.AddressBalances {
|
||||
fmt.Printf("%s %s %s\n", addressBalance.Address, formatKas(addressBalance.Available), formatKas(addressBalance.Pending))
|
||||
fmt.Printf("%s %s %s\n", addressBalance.Address, utils.FormatKas(addressBalance.Available), utils.FormatKas(addressBalance.Pending))
|
||||
}
|
||||
println("-----------------------------------------------------------------------------------------------------------")
|
||||
print(" ")
|
||||
}
|
||||
fmt.Printf("Total balance, KAS %s %s%s\n", formatKas(response.Available), formatKas(response.Pending), pendingSuffix)
|
||||
fmt.Printf("Total balance, KAS %s %s%s\n", utils.FormatKas(response.Available), utils.FormatKas(response.Pending), pendingSuffix)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,13 +2,13 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/client"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
"github.com/pkg/errors"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func broadcast(conf *broadcastConfig) error {
|
||||
@@ -21,34 +21,36 @@ func broadcast(conf *broadcastConfig) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), daemonTimeout)
|
||||
defer cancel()
|
||||
|
||||
if conf.Transaction == "" && conf.TransactionFile == "" {
|
||||
if conf.Transactions == "" && conf.TransactionsFile == "" {
|
||||
return errors.Errorf("Either --transaction or --transaction-file is required")
|
||||
}
|
||||
if conf.Transaction != "" && conf.TransactionFile != "" {
|
||||
if conf.Transactions != "" && conf.TransactionsFile != "" {
|
||||
return errors.Errorf("Both --transaction and --transaction-file cannot be passed at the same time")
|
||||
}
|
||||
|
||||
transactionHex := conf.Transaction
|
||||
if conf.TransactionFile != "" {
|
||||
transactionHexBytes, err := ioutil.ReadFile(conf.TransactionFile)
|
||||
transactionsHex := conf.Transactions
|
||||
if conf.TransactionsFile != "" {
|
||||
transactionHexBytes, err := ioutil.ReadFile(conf.TransactionsFile)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Could not read hex from %s", conf.TransactionFile)
|
||||
return errors.Wrapf(err, "Could not read hex from %s", conf.TransactionsFile)
|
||||
}
|
||||
transactionHex = strings.TrimSpace(string(transactionHexBytes))
|
||||
transactionsHex = strings.TrimSpace(string(transactionHexBytes))
|
||||
}
|
||||
|
||||
transaction, err := hex.DecodeString(transactionHex)
|
||||
transactions, err := decodeTransactionsFromHex(transactionsHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
response, err := daemonClient.Broadcast(ctx, &pb.BroadcastRequest{Transaction: transaction})
|
||||
response, err := daemonClient.Broadcast(ctx, &pb.BroadcastRequest{Transactions: transactions})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Transaction was sent successfully")
|
||||
fmt.Printf("Transaction ID: \t%s\n", response.TxID)
|
||||
fmt.Println("Transactions were sent successfully")
|
||||
fmt.Println("Transaction ID(s): ")
|
||||
for _, txID := range response.TxIDs {
|
||||
fmt.Printf("\\t%s\\n", txID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
|
||||
"github.com/jessevdk/go-flags"
|
||||
)
|
||||
@@ -12,6 +13,7 @@ const (
|
||||
createSubCmd = "create"
|
||||
balanceSubCmd = "balance"
|
||||
sendSubCmd = "send"
|
||||
sweepSubCmd = "sweep"
|
||||
createUnsignedTransactionSubCmd = "create-unsigned-transaction"
|
||||
signSubCmd = "sign"
|
||||
broadcastSubCmd = "broadcast"
|
||||
@@ -58,6 +60,12 @@ type sendConfig struct {
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
type sweepConfig struct {
|
||||
PrivateKey string `long:"private-key" short:"k" description:"Private key in hex format"`
|
||||
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
type createUnsignedTransactionConfig struct {
|
||||
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
|
||||
ToAddress string `long:"to-address" short:"t" description:"The public address to send Kaspa to" required:"true"`
|
||||
@@ -68,15 +76,15 @@ type createUnsignedTransactionConfig struct {
|
||||
type signConfig struct {
|
||||
KeysFile string `long:"keys-file" short:"f" description:"Keys file location (default: ~/.kaspawallet/keys.json (*nix), %USERPROFILE%\\AppData\\Local\\Kaspawallet\\key.json (Windows))"`
|
||||
Password string `long:"password" short:"p" description:"Wallet password"`
|
||||
Transaction string `long:"transaction" short:"t" description:"The unsigned transaction to sign on (encoded in hex)"`
|
||||
TransactionFile string `long:"transaction-file" short:"F" description:"The file containing the unsigned transaction to sign on (encoded in hex)"`
|
||||
Transaction string `long:"transaction" short:"t" description:"The unsigned transaction(s) to sign on (encoded in hex)"`
|
||||
TransactionFile string `long:"transaction-file" short:"F" description:"The file containing the unsigned transaction(s) to sign on (encoded in hex)"`
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
type broadcastConfig struct {
|
||||
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
|
||||
Transaction string `long:"transaction" short:"t" description:"The signed transaction to broadcast (encoded in hex)"`
|
||||
TransactionFile string `long:"transaction-file" short:"F" description:"The file containing the unsigned transaction to sign on (encoded in hex)"`
|
||||
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
|
||||
Transactions string `long:"transaction" short:"t" description:"The signed transaction to broadcast (encoded in hex)"`
|
||||
TransactionsFile string `long:"transaction-file" short:"F" description:"The file containing the unsigned transaction to sign on (encoded in hex)"`
|
||||
config.NetworkFlags
|
||||
}
|
||||
|
||||
@@ -129,6 +137,10 @@ func parseCommandLine() (subCommand string, config interface{}) {
|
||||
parser.AddCommand(sendSubCmd, "Sends a Kaspa transaction to a public address",
|
||||
"Sends a Kaspa transaction to a public address", sendConf)
|
||||
|
||||
sweepConf := &sweepConfig{DaemonAddress: defaultListen}
|
||||
parser.AddCommand(sweepSubCmd, "Sends all funds associated with the given private key, to a new address of the wallet",
|
||||
"Sends all funds associated with the private key, to a given change address of the wallet", sweepConf)
|
||||
|
||||
createUnsignedTransactionConf := &createUnsignedTransactionConfig{DaemonAddress: defaultListen}
|
||||
parser.AddCommand(createUnsignedTransactionSubCmd, "Create an unsigned Kaspa transaction",
|
||||
"Create an unsigned Kaspa transaction", createUnsignedTransactionConf)
|
||||
@@ -198,6 +210,13 @@ func parseCommandLine() (subCommand string, config interface{}) {
|
||||
printErrorAndExit(err)
|
||||
}
|
||||
config = sendConf
|
||||
case sweepSubCmd:
|
||||
combineNetworkFlags(&sweepConf.NetworkFlags, &cfg.NetworkFlags)
|
||||
err := sweepConf.ResolveNetwork(parser)
|
||||
if err != nil {
|
||||
printErrorAndExit(err)
|
||||
}
|
||||
config = sweepConf
|
||||
case createUnsignedTransactionSubCmd:
|
||||
combineNetworkFlags(&createUnsignedTransactionConf.NetworkFlags, &cfg.NetworkFlags)
|
||||
err := createUnsignedTransactionConf.ResolveNetwork(parser)
|
||||
|
||||
@@ -50,9 +50,13 @@ func create(conf *createConfig) error {
|
||||
extendedPublicKeys = append(extendedPublicKeys, string(extendedPublicKey))
|
||||
}
|
||||
|
||||
cosignerIndex, err := libkaspawallet.MinimumCosignerIndex(signerExtendedPublicKeys, extendedPublicKeys)
|
||||
if err != nil {
|
||||
return err
|
||||
// For a read only wallet the cosigner index is 0
|
||||
cosignerIndex := uint32(0)
|
||||
if len(signerExtendedPublicKeys) > 0 {
|
||||
cosignerIndex, err = libkaspawallet.MinimumCosignerIndex(signerExtendedPublicKeys, extendedPublicKeys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
file := keys.File{
|
||||
|
||||
@@ -2,8 +2,8 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/client"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
@@ -20,7 +20,7 @@ func createUnsignedTransaction(conf *createUnsignedTransactionConfig) error {
|
||||
defer cancel()
|
||||
|
||||
sendAmountSompi := uint64(conf.SendAmount * constants.SompiPerKaspa)
|
||||
response, err := daemonClient.CreateUnsignedTransaction(ctx, &pb.CreateUnsignedTransactionRequest{
|
||||
response, err := daemonClient.CreateUnsignedTransactions(ctx, &pb.CreateUnsignedTransactionsRequest{
|
||||
Address: conf.ToAddress,
|
||||
Amount: sendAmountSompi,
|
||||
})
|
||||
@@ -29,6 +29,6 @@ func createUnsignedTransaction(conf *createUnsignedTransactionConfig) error {
|
||||
}
|
||||
|
||||
fmt.Println("Created unsigned transaction")
|
||||
fmt.Println(hex.EncodeToString(response.UnsignedTransaction))
|
||||
fmt.Println(encodeTransactionsToHex(response.UnsignedTransactions))
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,14 +1,20 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb";
|
||||
package kaspawalletd;
|
||||
|
||||
service kaspawalletd {
|
||||
rpc GetBalance (GetBalanceRequest) returns (GetBalanceResponse) {}
|
||||
rpc CreateUnsignedTransaction (CreateUnsignedTransactionRequest) returns (CreateUnsignedTransactionResponse) {}
|
||||
rpc GetExternalSpendableUTXOs (GetExternalSpendableUTXOsRequest) returns (GetExternalSpendableUTXOsResponse) {}
|
||||
rpc CreateUnsignedTransactions (CreateUnsignedTransactionsRequest) returns (CreateUnsignedTransactionsResponse) {}
|
||||
rpc ShowAddresses (ShowAddressesRequest) returns (ShowAddressesResponse) {}
|
||||
rpc NewAddress (NewAddressRequest) returns (NewAddressResponse) {}
|
||||
rpc Shutdown (ShutdownRequest) returns (ShutdownResponse) {}
|
||||
rpc Broadcast (BroadcastRequest) returns (BroadcastResponse) {}
|
||||
// Since SendRequest contains a password - this command should only be used on a trusted or secure connection
|
||||
rpc Send(SendRequest) returns (SendResponse) {}
|
||||
// Since SignRequest contains a password - this command should only be used on a trusted or secure connection
|
||||
rpc Sign(SignRequest) returns (SignResponse) {}
|
||||
}
|
||||
|
||||
message GetBalanceRequest {
|
||||
@@ -21,18 +27,18 @@ message GetBalanceResponse {
|
||||
}
|
||||
|
||||
message AddressBalances {
|
||||
string address = 1;
|
||||
uint64 available = 2;
|
||||
uint64 pending = 3;
|
||||
string address = 1;
|
||||
uint64 available = 2;
|
||||
uint64 pending = 3;
|
||||
}
|
||||
|
||||
message CreateUnsignedTransactionRequest {
|
||||
message CreateUnsignedTransactionsRequest {
|
||||
string address = 1;
|
||||
uint64 amount = 2;
|
||||
}
|
||||
|
||||
message CreateUnsignedTransactionResponse {
|
||||
bytes unsignedTransaction = 1;
|
||||
message CreateUnsignedTransactionsResponse {
|
||||
repeated bytes unsignedTransactions = 1;
|
||||
}
|
||||
|
||||
message ShowAddressesRequest {
|
||||
@@ -50,11 +56,12 @@ message NewAddressResponse {
|
||||
}
|
||||
|
||||
message BroadcastRequest {
|
||||
bytes transaction = 1;
|
||||
bool isDomain = 1;
|
||||
repeated bytes transactions = 2;
|
||||
}
|
||||
|
||||
message BroadcastResponse {
|
||||
string txID = 1;
|
||||
repeated string txIDs = 1;
|
||||
}
|
||||
|
||||
message ShutdownRequest {
|
||||
@@ -62,3 +69,54 @@ message ShutdownRequest {
|
||||
|
||||
message ShutdownResponse {
|
||||
}
|
||||
|
||||
message Outpoint {
|
||||
string transactionId = 1;
|
||||
uint32 index = 2;
|
||||
}
|
||||
|
||||
message UtxosByAddressesEntry {
|
||||
string address = 1;
|
||||
Outpoint outpoint = 2;
|
||||
UtxoEntry utxoEntry = 3;
|
||||
}
|
||||
|
||||
message ScriptPublicKey {
|
||||
uint32 version = 1;
|
||||
string scriptPublicKey = 2;
|
||||
}
|
||||
|
||||
message UtxoEntry {
|
||||
uint64 amount = 1;
|
||||
ScriptPublicKey scriptPublicKey = 2;
|
||||
uint64 blockDaaScore = 3;
|
||||
bool isCoinbase = 4;
|
||||
}
|
||||
|
||||
message GetExternalSpendableUTXOsRequest{
|
||||
string address = 1;
|
||||
}
|
||||
|
||||
message GetExternalSpendableUTXOsResponse{
|
||||
repeated UtxosByAddressesEntry Entries = 1;
|
||||
}
|
||||
// Since SendRequest contains a password - this command should only be used on a trusted or secure connection
|
||||
message SendRequest{
|
||||
string toAddress = 1;
|
||||
uint64 amount = 2;
|
||||
string password = 3;
|
||||
}
|
||||
|
||||
message SendResponse{
|
||||
repeated string txIDs = 1;
|
||||
}
|
||||
|
||||
// Since SignRequest contains a password - this command should only be used on a trusted or secure connection
|
||||
message SignRequest{
|
||||
repeated bytes unsignedTransactions = 1;
|
||||
string password = 2;
|
||||
}
|
||||
|
||||
message SignResponse{
|
||||
repeated bytes signedTransactions = 1;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.17.2
|
||||
// source: kaspawalletd.proto
|
||||
|
||||
package pb
|
||||
|
||||
@@ -19,11 +23,16 @@ const _ = grpc.SupportPackageIsVersion7
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type KaspawalletdClient interface {
|
||||
GetBalance(ctx context.Context, in *GetBalanceRequest, opts ...grpc.CallOption) (*GetBalanceResponse, error)
|
||||
CreateUnsignedTransaction(ctx context.Context, in *CreateUnsignedTransactionRequest, opts ...grpc.CallOption) (*CreateUnsignedTransactionResponse, error)
|
||||
GetExternalSpendableUTXOs(ctx context.Context, in *GetExternalSpendableUTXOsRequest, opts ...grpc.CallOption) (*GetExternalSpendableUTXOsResponse, error)
|
||||
CreateUnsignedTransactions(ctx context.Context, in *CreateUnsignedTransactionsRequest, opts ...grpc.CallOption) (*CreateUnsignedTransactionsResponse, error)
|
||||
ShowAddresses(ctx context.Context, in *ShowAddressesRequest, opts ...grpc.CallOption) (*ShowAddressesResponse, error)
|
||||
NewAddress(ctx context.Context, in *NewAddressRequest, opts ...grpc.CallOption) (*NewAddressResponse, error)
|
||||
Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error)
|
||||
Broadcast(ctx context.Context, in *BroadcastRequest, opts ...grpc.CallOption) (*BroadcastResponse, error)
|
||||
// Since SendRequest contains a password - this command should only be used on a trusted or secure connection
|
||||
Send(ctx context.Context, in *SendRequest, opts ...grpc.CallOption) (*SendResponse, error)
|
||||
// Since SignRequest contains a password - this command should only be used on a trusted or secure connection
|
||||
Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error)
|
||||
}
|
||||
|
||||
type kaspawalletdClient struct {
|
||||
@@ -36,16 +45,25 @@ func NewKaspawalletdClient(cc grpc.ClientConnInterface) KaspawalletdClient {
|
||||
|
||||
func (c *kaspawalletdClient) GetBalance(ctx context.Context, in *GetBalanceRequest, opts ...grpc.CallOption) (*GetBalanceResponse, error) {
|
||||
out := new(GetBalanceResponse)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd/GetBalance", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd.kaspawalletd/GetBalance", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *kaspawalletdClient) CreateUnsignedTransaction(ctx context.Context, in *CreateUnsignedTransactionRequest, opts ...grpc.CallOption) (*CreateUnsignedTransactionResponse, error) {
|
||||
out := new(CreateUnsignedTransactionResponse)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd/CreateUnsignedTransaction", in, out, opts...)
|
||||
func (c *kaspawalletdClient) GetExternalSpendableUTXOs(ctx context.Context, in *GetExternalSpendableUTXOsRequest, opts ...grpc.CallOption) (*GetExternalSpendableUTXOsResponse, error) {
|
||||
out := new(GetExternalSpendableUTXOsResponse)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd.kaspawalletd/GetExternalSpendableUTXOs", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *kaspawalletdClient) CreateUnsignedTransactions(ctx context.Context, in *CreateUnsignedTransactionsRequest, opts ...grpc.CallOption) (*CreateUnsignedTransactionsResponse, error) {
|
||||
out := new(CreateUnsignedTransactionsResponse)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd.kaspawalletd/CreateUnsignedTransactions", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -54,7 +72,7 @@ func (c *kaspawalletdClient) CreateUnsignedTransaction(ctx context.Context, in *
|
||||
|
||||
func (c *kaspawalletdClient) ShowAddresses(ctx context.Context, in *ShowAddressesRequest, opts ...grpc.CallOption) (*ShowAddressesResponse, error) {
|
||||
out := new(ShowAddressesResponse)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd/ShowAddresses", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd.kaspawalletd/ShowAddresses", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -63,7 +81,7 @@ func (c *kaspawalletdClient) ShowAddresses(ctx context.Context, in *ShowAddresse
|
||||
|
||||
func (c *kaspawalletdClient) NewAddress(ctx context.Context, in *NewAddressRequest, opts ...grpc.CallOption) (*NewAddressResponse, error) {
|
||||
out := new(NewAddressResponse)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd/NewAddress", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd.kaspawalletd/NewAddress", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -72,7 +90,7 @@ func (c *kaspawalletdClient) NewAddress(ctx context.Context, in *NewAddressReque
|
||||
|
||||
func (c *kaspawalletdClient) Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) {
|
||||
out := new(ShutdownResponse)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd/Shutdown", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd.kaspawalletd/Shutdown", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -81,7 +99,25 @@ func (c *kaspawalletdClient) Shutdown(ctx context.Context, in *ShutdownRequest,
|
||||
|
||||
func (c *kaspawalletdClient) Broadcast(ctx context.Context, in *BroadcastRequest, opts ...grpc.CallOption) (*BroadcastResponse, error) {
|
||||
out := new(BroadcastResponse)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd/Broadcast", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd.kaspawalletd/Broadcast", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *kaspawalletdClient) Send(ctx context.Context, in *SendRequest, opts ...grpc.CallOption) (*SendResponse, error) {
|
||||
out := new(SendResponse)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd.kaspawalletd/Send", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *kaspawalletdClient) Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error) {
|
||||
out := new(SignResponse)
|
||||
err := c.cc.Invoke(ctx, "/kaspawalletd.kaspawalletd/Sign", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -93,11 +129,16 @@ func (c *kaspawalletdClient) Broadcast(ctx context.Context, in *BroadcastRequest
|
||||
// for forward compatibility
|
||||
type KaspawalletdServer interface {
|
||||
GetBalance(context.Context, *GetBalanceRequest) (*GetBalanceResponse, error)
|
||||
CreateUnsignedTransaction(context.Context, *CreateUnsignedTransactionRequest) (*CreateUnsignedTransactionResponse, error)
|
||||
GetExternalSpendableUTXOs(context.Context, *GetExternalSpendableUTXOsRequest) (*GetExternalSpendableUTXOsResponse, error)
|
||||
CreateUnsignedTransactions(context.Context, *CreateUnsignedTransactionsRequest) (*CreateUnsignedTransactionsResponse, error)
|
||||
ShowAddresses(context.Context, *ShowAddressesRequest) (*ShowAddressesResponse, error)
|
||||
NewAddress(context.Context, *NewAddressRequest) (*NewAddressResponse, error)
|
||||
Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error)
|
||||
Broadcast(context.Context, *BroadcastRequest) (*BroadcastResponse, error)
|
||||
// Since SendRequest contains a password - this command should only be used on a trusted or secure connection
|
||||
Send(context.Context, *SendRequest) (*SendResponse, error)
|
||||
// Since SignRequest contains a password - this command should only be used on a trusted or secure connection
|
||||
Sign(context.Context, *SignRequest) (*SignResponse, error)
|
||||
mustEmbedUnimplementedKaspawalletdServer()
|
||||
}
|
||||
|
||||
@@ -108,8 +149,11 @@ type UnimplementedKaspawalletdServer struct {
|
||||
func (UnimplementedKaspawalletdServer) GetBalance(context.Context, *GetBalanceRequest) (*GetBalanceResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetBalance not implemented")
|
||||
}
|
||||
func (UnimplementedKaspawalletdServer) CreateUnsignedTransaction(context.Context, *CreateUnsignedTransactionRequest) (*CreateUnsignedTransactionResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateUnsignedTransaction not implemented")
|
||||
func (UnimplementedKaspawalletdServer) GetExternalSpendableUTXOs(context.Context, *GetExternalSpendableUTXOsRequest) (*GetExternalSpendableUTXOsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetExternalSpendableUTXOs not implemented")
|
||||
}
|
||||
func (UnimplementedKaspawalletdServer) CreateUnsignedTransactions(context.Context, *CreateUnsignedTransactionsRequest) (*CreateUnsignedTransactionsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateUnsignedTransactions not implemented")
|
||||
}
|
||||
func (UnimplementedKaspawalletdServer) ShowAddresses(context.Context, *ShowAddressesRequest) (*ShowAddressesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ShowAddresses not implemented")
|
||||
@@ -123,6 +167,12 @@ func (UnimplementedKaspawalletdServer) Shutdown(context.Context, *ShutdownReques
|
||||
func (UnimplementedKaspawalletdServer) Broadcast(context.Context, *BroadcastRequest) (*BroadcastResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Broadcast not implemented")
|
||||
}
|
||||
func (UnimplementedKaspawalletdServer) Send(context.Context, *SendRequest) (*SendResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Send not implemented")
|
||||
}
|
||||
func (UnimplementedKaspawalletdServer) Sign(context.Context, *SignRequest) (*SignResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Sign not implemented")
|
||||
}
|
||||
func (UnimplementedKaspawalletdServer) mustEmbedUnimplementedKaspawalletdServer() {}
|
||||
|
||||
// UnsafeKaspawalletdServer may be embedded to opt out of forward compatibility for this service.
|
||||
@@ -146,7 +196,7 @@ func _Kaspawalletd_GetBalance_Handler(srv interface{}, ctx context.Context, dec
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/kaspawalletd/GetBalance",
|
||||
FullMethod: "/kaspawalletd.kaspawalletd/GetBalance",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(KaspawalletdServer).GetBalance(ctx, req.(*GetBalanceRequest))
|
||||
@@ -154,20 +204,38 @@ func _Kaspawalletd_GetBalance_Handler(srv interface{}, ctx context.Context, dec
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Kaspawalletd_CreateUnsignedTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateUnsignedTransactionRequest)
|
||||
func _Kaspawalletd_GetExternalSpendableUTXOs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetExternalSpendableUTXOsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(KaspawalletdServer).CreateUnsignedTransaction(ctx, in)
|
||||
return srv.(KaspawalletdServer).GetExternalSpendableUTXOs(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/kaspawalletd/CreateUnsignedTransaction",
|
||||
FullMethod: "/kaspawalletd.kaspawalletd/GetExternalSpendableUTXOs",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(KaspawalletdServer).CreateUnsignedTransaction(ctx, req.(*CreateUnsignedTransactionRequest))
|
||||
return srv.(KaspawalletdServer).GetExternalSpendableUTXOs(ctx, req.(*GetExternalSpendableUTXOsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Kaspawalletd_CreateUnsignedTransactions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateUnsignedTransactionsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(KaspawalletdServer).CreateUnsignedTransactions(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/kaspawalletd.kaspawalletd/CreateUnsignedTransactions",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(KaspawalletdServer).CreateUnsignedTransactions(ctx, req.(*CreateUnsignedTransactionsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
@@ -182,7 +250,7 @@ func _Kaspawalletd_ShowAddresses_Handler(srv interface{}, ctx context.Context, d
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/kaspawalletd/ShowAddresses",
|
||||
FullMethod: "/kaspawalletd.kaspawalletd/ShowAddresses",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(KaspawalletdServer).ShowAddresses(ctx, req.(*ShowAddressesRequest))
|
||||
@@ -200,7 +268,7 @@ func _Kaspawalletd_NewAddress_Handler(srv interface{}, ctx context.Context, dec
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/kaspawalletd/NewAddress",
|
||||
FullMethod: "/kaspawalletd.kaspawalletd/NewAddress",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(KaspawalletdServer).NewAddress(ctx, req.(*NewAddressRequest))
|
||||
@@ -218,7 +286,7 @@ func _Kaspawalletd_Shutdown_Handler(srv interface{}, ctx context.Context, dec fu
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/kaspawalletd/Shutdown",
|
||||
FullMethod: "/kaspawalletd.kaspawalletd/Shutdown",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(KaspawalletdServer).Shutdown(ctx, req.(*ShutdownRequest))
|
||||
@@ -236,7 +304,7 @@ func _Kaspawalletd_Broadcast_Handler(srv interface{}, ctx context.Context, dec f
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/kaspawalletd/Broadcast",
|
||||
FullMethod: "/kaspawalletd.kaspawalletd/Broadcast",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(KaspawalletdServer).Broadcast(ctx, req.(*BroadcastRequest))
|
||||
@@ -244,11 +312,47 @@ func _Kaspawalletd_Broadcast_Handler(srv interface{}, ctx context.Context, dec f
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Kaspawalletd_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SendRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(KaspawalletdServer).Send(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/kaspawalletd.kaspawalletd/Send",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(KaspawalletdServer).Send(ctx, req.(*SendRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Kaspawalletd_Sign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SignRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(KaspawalletdServer).Sign(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/kaspawalletd.kaspawalletd/Sign",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(KaspawalletdServer).Sign(ctx, req.(*SignRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// Kaspawalletd_ServiceDesc is the grpc.ServiceDesc for Kaspawalletd service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var Kaspawalletd_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "kaspawalletd",
|
||||
ServiceName: "kaspawalletd.kaspawalletd",
|
||||
HandlerType: (*KaspawalletdServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
@@ -256,8 +360,12 @@ var Kaspawalletd_ServiceDesc = grpc.ServiceDesc{
|
||||
Handler: _Kaspawalletd_GetBalance_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateUnsignedTransaction",
|
||||
Handler: _Kaspawalletd_CreateUnsignedTransaction_Handler,
|
||||
MethodName: "GetExternalSpendableUTXOs",
|
||||
Handler: _Kaspawalletd_GetExternalSpendableUTXOs_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateUnsignedTransactions",
|
||||
Handler: _Kaspawalletd_CreateUnsignedTransactions_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ShowAddresses",
|
||||
@@ -275,6 +383,14 @@ var Kaspawalletd_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "Broadcast",
|
||||
Handler: _Kaspawalletd_Broadcast_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Send",
|
||||
Handler: _Kaspawalletd_Send_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Sign",
|
||||
Handler: _Kaspawalletd_Sign_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "kaspawalletd.proto",
|
||||
|
||||
@@ -10,15 +10,15 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (s *server) changeAddress() (util.Address, error) {
|
||||
func (s *server) changeAddress() (util.Address, *walletAddress, error) {
|
||||
err := s.keysFile.SetLastUsedInternalIndex(s.keysFile.LastUsedInternalIndex() + 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
err = s.keysFile.Save()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
walletAddr := &walletAddress{
|
||||
@@ -27,7 +27,11 @@ func (s *server) changeAddress() (util.Address, error) {
|
||||
keyChain: libkaspawallet.InternalKeychain,
|
||||
}
|
||||
path := s.walletAddressPath(walletAddr)
|
||||
return libkaspawallet.Address(s.params, s.keysFile.ExtendedPublicKeys, s.keysFile.MinimumSignatures, path, s.keysFile.ECDSA)
|
||||
address, err := libkaspawallet.Address(s.params, s.keysFile.ExtendedPublicKeys, s.keysFile.MinimumSignatures, path, s.keysFile.ECDSA)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return address, walletAddr, nil
|
||||
}
|
||||
|
||||
func (s *server) ShowAddresses(_ context.Context, request *pb.ShowAddressesRequest) (*pb.ShowAddressesResponse, error) {
|
||||
|
||||
@@ -6,23 +6,48 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (s *server) Broadcast(_ context.Context, request *pb.BroadcastRequest) (*pb.BroadcastResponse, error) {
|
||||
tx, err := libkaspawallet.ExtractTransaction(request.Transaction, s.keysFile.ECDSA)
|
||||
txIDs, err := s.broadcast(request.Transactions, request.IsDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txID, err := sendTransaction(s.rpcClient, tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return &pb.BroadcastResponse{TxIDs: txIDs}, nil
|
||||
}
|
||||
|
||||
func (s *server) broadcast(transactions [][]byte, isDomain bool) ([]string, error) {
|
||||
|
||||
txIDs := make([]string, len(transactions))
|
||||
var tx *externalapi.DomainTransaction
|
||||
var err error
|
||||
|
||||
for i, transaction := range transactions {
|
||||
|
||||
if isDomain {
|
||||
tx, err = serialization.DeserializeDomainTransaction(transaction)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if !isDomain { //default in proto3 is false
|
||||
tx, err = libkaspawallet.ExtractTransaction(transaction, s.keysFile.ECDSA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
txIDs[i], err = sendTransaction(s.rpcClient, tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &pb.BroadcastResponse{TxID: txID}, nil
|
||||
return txIDs, nil
|
||||
}
|
||||
|
||||
func sendTransaction(client *rpcclient.RPCClient, tx *externalapi.DomainTransaction) (string, error) {
|
||||
|
||||
@@ -10,10 +10,23 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (s *server) CreateUnsignedTransaction(_ context.Context, request *pb.CreateUnsignedTransactionRequest) (*pb.CreateUnsignedTransactionResponse, error) {
|
||||
// TODO: Implement a better fee estimation mechanism
|
||||
const feePerInput = 10000
|
||||
|
||||
func (s *server) CreateUnsignedTransactions(_ context.Context, request *pb.CreateUnsignedTransactionsRequest) (
|
||||
*pb.CreateUnsignedTransactionsResponse, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
unsignedTransactions, err := s.createUnsignedTransactions(request.Address, request.Amount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &pb.CreateUnsignedTransactionsResponse{UnsignedTransactions: unsignedTransactions}, nil
|
||||
}
|
||||
|
||||
func (s *server) createUnsignedTransactions(address string, amount uint64) ([][]byte, error) {
|
||||
if !s.isSynced() {
|
||||
return nil, errors.New("server is not synced")
|
||||
}
|
||||
@@ -23,19 +36,17 @@ func (s *server) CreateUnsignedTransaction(_ context.Context, request *pb.Create
|
||||
return nil, err
|
||||
}
|
||||
|
||||
toAddress, err := util.DecodeAddress(request.Address, s.params.Prefix)
|
||||
toAddress, err := util.DecodeAddress(address, s.params.Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: Implement a better fee estimation mechanism
|
||||
const feePerInput = 10000
|
||||
selectedUTXOs, changeSompi, err := s.selectUTXOs(request.Amount, feePerInput)
|
||||
selectedUTXOs, changeSompi, err := s.selectUTXOs(amount, feePerInput)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changeAddress, err := s.changeAddress()
|
||||
changeAddress, changeWalletAddress, err := s.changeAddress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -44,7 +55,7 @@ func (s *server) CreateUnsignedTransaction(_ context.Context, request *pb.Create
|
||||
s.keysFile.MinimumSignatures,
|
||||
[]*libkaspawallet.Payment{{
|
||||
Address: toAddress,
|
||||
Amount: request.Amount,
|
||||
Amount: amount,
|
||||
}, {
|
||||
Address: changeAddress,
|
||||
Amount: changeSompi,
|
||||
@@ -53,7 +64,11 @@ func (s *server) CreateUnsignedTransaction(_ context.Context, request *pb.Create
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &pb.CreateUnsignedTransactionResponse{UnsignedTransaction: unsignedTransaction}, nil
|
||||
unsignedTransactions, err := s.maybeAutoCompoundTransaction(unsignedTransaction, toAddress, changeAddress, changeWalletAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return unsignedTransactions, nil
|
||||
}
|
||||
|
||||
func (s *server) selectUTXOs(spendAmount uint64, feePerInput uint64) (
|
||||
|
||||
63
cmd/kaspawallet/daemon/server/external_spendable_utxos.go
Normal file
63
cmd/kaspawallet/daemon/server/external_spendable_utxos.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
func (s *server) GetExternalSpendableUTXOs(_ context.Context, request *pb.GetExternalSpendableUTXOsRequest) (*pb.GetExternalSpendableUTXOsResponse, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
_, err := util.DecodeAddress(request.Address, s.params.Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
externalUTXOs, err := s.rpcClient.GetUTXOsByAddresses([]string{request.Address})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selectedUTXOs, err := s.selectExternalSpendableUTXOs(externalUTXOs, request.Address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.GetExternalSpendableUTXOsResponse{
|
||||
Entries: selectedUTXOs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *server) selectExternalSpendableUTXOs(externalUTXOs *appmessage.GetUTXOsByAddressesResponseMessage, address string) ([]*pb.UtxosByAddressesEntry, error) {
|
||||
|
||||
dagInfo, err := s.rpcClient.GetBlockDAGInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
daaScore := dagInfo.VirtualDAAScore
|
||||
maturity := s.params.BlockCoinbaseMaturity
|
||||
|
||||
//we do not make because we do not know size, because of unspendable utxos
|
||||
var selectedExternalUtxos []*pb.UtxosByAddressesEntry
|
||||
|
||||
for _, entry := range externalUTXOs.Entries {
|
||||
if !isExternalUTXOSpendable(entry, daaScore, maturity) {
|
||||
continue
|
||||
}
|
||||
selectedExternalUtxos = append(selectedExternalUtxos, libkaspawallet.AppMessageUTXOToKaspawalletdUTXO(entry))
|
||||
}
|
||||
|
||||
return selectedExternalUtxos, nil
|
||||
}
|
||||
|
||||
func isExternalUTXOSpendable(entry *appmessage.UTXOsByAddressesEntry, virtualDAAScore uint64, coinbaseMaturity uint64) bool {
|
||||
if !entry.UTXOEntry.IsCoinbase {
|
||||
return true
|
||||
} else if entry.UTXOEntry.Amount <= feePerInput {
|
||||
return false
|
||||
}
|
||||
return entry.UTXOEntry.BlockDAAScore+coinbaseMaturity < virtualDAAScore
|
||||
}
|
||||
25
cmd/kaspawallet/daemon/server/send.go
Normal file
25
cmd/kaspawallet/daemon/server/send.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
)
|
||||
|
||||
func (s *server) Send(_ context.Context, request *pb.SendRequest) (*pb.SendResponse, error) {
|
||||
unsignedTransactions, err := s.createUnsignedTransactions(request.ToAddress, request.Amount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signedTransactions, err := s.signTransactions(unsignedTransactions, request.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txIDs, err := s.broadcast(signedTransactions, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &pb.SendResponse{TxIDs: txIDs}, nil
|
||||
}
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/txmass"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/profiling"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
@@ -32,6 +34,7 @@ type server struct {
|
||||
keysFile *keys.File
|
||||
shutdown chan struct{}
|
||||
addressSet walletAddressSet
|
||||
txMassCalculator *txmass.Calculator
|
||||
}
|
||||
|
||||
// Start starts the kaspawalletd server
|
||||
@@ -69,6 +72,7 @@ func Start(params *dagconfig.Params, listen, rpcServer string, keysFilePath stri
|
||||
keysFile: keysFile,
|
||||
shutdown: make(chan struct{}),
|
||||
addressSet: make(walletAddressSet),
|
||||
txMassCalculator: txmass.NewCalculator(params.MassPerTxByte, params.MassPerScriptPubKeyByte, params.MassPerSigOp),
|
||||
}
|
||||
|
||||
spawn("serverInstance.sync", func() {
|
||||
|
||||
36
cmd/kaspawallet/daemon/server/sign.go
Normal file
36
cmd/kaspawallet/daemon/server/sign.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
)
|
||||
|
||||
func (s *server) Sign(_ context.Context, request *pb.SignRequest) (*pb.SignResponse, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
signedTransactions, err := s.signTransactions(request.UnsignedTransactions, request.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.SignResponse{SignedTransactions: signedTransactions}, nil
|
||||
}
|
||||
|
||||
func (s *server) signTransactions(unsignedTransactions [][]byte, password string) ([][]byte, error) {
|
||||
mnemonics, err := s.keysFile.DecryptMnemonics(password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signedTransactions := make([][]byte, len(unsignedTransactions))
|
||||
for i, unsignedTransaction := range unsignedTransactions {
|
||||
signedTransaction, err := libkaspawallet.Sign(s.params, mnemonics, unsignedTransaction, s.keysFile.ECDSA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signedTransactions[i] = signedTransaction
|
||||
}
|
||||
return signedTransactions, nil
|
||||
}
|
||||
278
cmd/kaspawallet/daemon/server/split_transaction.go
Normal file
278
cmd/kaspawallet/daemon/server/split_transaction.go
Normal file
@@ -0,0 +1,278 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
)
|
||||
|
||||
// maybeAutoCompoundTransaction checks if a transaction's mass is higher that what is allowed for a standard
|
||||
// transaction.
|
||||
// If it is - the transaction is split into multiple transactions, each with a portion of the inputs and a single output
|
||||
// into a change address.
|
||||
// An additional `mergeTransaction` is generated - which merges the outputs of the above splits into a single output
|
||||
// paying to the original transaction's payee.
|
||||
func (s *server) maybeAutoCompoundTransaction(transactionBytes []byte, toAddress util.Address,
|
||||
changeAddress util.Address, changeWalletAddress *walletAddress) ([][]byte, error) {
|
||||
transaction, err := serialization.DeserializePartiallySignedTransaction(transactionBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
splitTransactions, err := s.maybeSplitTransaction(transaction, changeAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(splitTransactions) > 1 {
|
||||
mergeTransaction, err := s.mergeTransaction(splitTransactions, transaction, toAddress, changeAddress, changeWalletAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
splitTransactions = append(splitTransactions, mergeTransaction)
|
||||
}
|
||||
|
||||
splitTransactionsBytes := make([][]byte, len(splitTransactions))
|
||||
for i, splitTransaction := range splitTransactions {
|
||||
splitTransactionsBytes[i], err = serialization.SerializePartiallySignedTransaction(splitTransaction)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return splitTransactionsBytes, nil
|
||||
}
|
||||
|
||||
func (s *server) mergeTransaction(
|
||||
splitTransactions []*serialization.PartiallySignedTransaction,
|
||||
originalTransaction *serialization.PartiallySignedTransaction,
|
||||
toAddress util.Address,
|
||||
changeAddress util.Address,
|
||||
changeWalletAddress *walletAddress,
|
||||
) (*serialization.PartiallySignedTransaction, error) {
|
||||
numOutputs := len(originalTransaction.Tx.Outputs)
|
||||
if numOutputs > 2 || numOutputs == 0 {
|
||||
// This is a sanity check to make sure originalTransaction has either 1 or 2 outputs:
|
||||
// 1. For the payment itself
|
||||
// 2. (optional) for change
|
||||
return nil, errors.Errorf("original transaction has %d outputs, while 1 or 2 are expected",
|
||||
len(originalTransaction.Tx.Outputs))
|
||||
}
|
||||
|
||||
totalValue := uint64(0)
|
||||
sentValue := originalTransaction.Tx.Outputs[0].Value
|
||||
utxos := make([]*libkaspawallet.UTXO, len(splitTransactions))
|
||||
for i, splitTransaction := range splitTransactions {
|
||||
output := splitTransaction.Tx.Outputs[0]
|
||||
utxos[i] = &libkaspawallet.UTXO{
|
||||
Outpoint: &externalapi.DomainOutpoint{
|
||||
TransactionID: *consensushashing.TransactionID(splitTransaction.Tx),
|
||||
Index: 0,
|
||||
},
|
||||
UTXOEntry: utxo.NewUTXOEntry(output.Value, output.ScriptPublicKey, false, constants.UnacceptedDAAScore),
|
||||
DerivationPath: s.walletAddressPath(changeWalletAddress),
|
||||
}
|
||||
totalValue += output.Value
|
||||
totalValue -= feePerInput
|
||||
}
|
||||
|
||||
if totalValue < sentValue {
|
||||
// sometimes the fees from compound transactions make the total output higher than what's available from selected
|
||||
// utxos, in such cases - find one more UTXO and use it.
|
||||
additionalUTXOs, totalValueAdded, err := s.moreUTXOsForMergeTransaction(utxos, sentValue-totalValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxos = append(utxos, additionalUTXOs...)
|
||||
totalValue += totalValueAdded
|
||||
}
|
||||
|
||||
payments := []*libkaspawallet.Payment{{
|
||||
Address: toAddress,
|
||||
Amount: sentValue,
|
||||
}}
|
||||
if totalValue > sentValue {
|
||||
payments = append(payments, &libkaspawallet.Payment{
|
||||
Address: changeAddress,
|
||||
Amount: totalValue - sentValue,
|
||||
})
|
||||
}
|
||||
|
||||
mergeTransactionBytes, err := libkaspawallet.CreateUnsignedTransaction(s.keysFile.ExtendedPublicKeys,
|
||||
s.keysFile.MinimumSignatures, payments, utxos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return serialization.DeserializePartiallySignedTransaction(mergeTransactionBytes)
|
||||
}
|
||||
|
||||
func (s *server) maybeSplitTransaction(transaction *serialization.PartiallySignedTransaction,
|
||||
changeAddress util.Address) ([]*serialization.PartiallySignedTransaction, error) {
|
||||
|
||||
transactionMass, err := s.estimateMassAfterSignatures(transaction)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if transactionMass < mempool.MaximumStandardTransactionMass {
|
||||
return []*serialization.PartiallySignedTransaction{transaction}, nil
|
||||
}
|
||||
|
||||
splitCount, inputCountPerSplit, err := s.splitAndInputPerSplitCounts(transaction, transactionMass, changeAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
splitTransactions := make([]*serialization.PartiallySignedTransaction, splitCount)
|
||||
for i := 0; i < splitCount; i++ {
|
||||
startIndex := i * inputCountPerSplit
|
||||
endIndex := startIndex + inputCountPerSplit
|
||||
var err error
|
||||
splitTransactions[i], err = s.createSplitTransaction(transaction, changeAddress, startIndex, endIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return splitTransactions, nil
|
||||
}
|
||||
|
||||
// splitAndInputPerSplitCounts calculates the number of splits to create, and the number of inputs to assign per split.
|
||||
func (s *server) splitAndInputPerSplitCounts(transaction *serialization.PartiallySignedTransaction, transactionMass uint64,
|
||||
changeAddress util.Address) (splitCount, inputsPerSplitCount int, err error) {
|
||||
|
||||
// Create a dummy transaction which is a clone of the original transaction, but without inputs,
|
||||
// to calculate how much mass do all the inputs have
|
||||
transactionWithoutInputs := transaction.Tx.Clone()
|
||||
transactionWithoutInputs.Inputs = []*externalapi.DomainTransactionInput{}
|
||||
massWithoutInputs := s.txMassCalculator.CalculateTransactionMass(transactionWithoutInputs)
|
||||
|
||||
massOfAllInputs := transactionMass - massWithoutInputs
|
||||
|
||||
// Since the transaction was generated by kaspawallet, we assume all inputs have the same number of signatures, and
|
||||
// thus - the same mass.
|
||||
inputCount := len(transaction.Tx.Inputs)
|
||||
massPerInput := massOfAllInputs / uint64(inputCount)
|
||||
if massOfAllInputs%uint64(inputCount) > 0 {
|
||||
massPerInput++
|
||||
}
|
||||
|
||||
// Create another dummy transaction, this time one similar to the split transactions we wish to generate,
|
||||
// but with 0 inputs, to calculate how much mass for inputs do we have available in the split transactions
|
||||
splitTransactionWithoutInputs, err := s.createSplitTransaction(transaction, changeAddress, 0, 0)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
massForEverythingExceptInputsInSplitTransaction :=
|
||||
s.txMassCalculator.CalculateTransactionMass(splitTransactionWithoutInputs.Tx)
|
||||
massForInputsInSplitTransaction := mempool.MaximumStandardTransactionMass - massForEverythingExceptInputsInSplitTransaction
|
||||
|
||||
inputsPerSplitCount = int(massForInputsInSplitTransaction / massPerInput)
|
||||
splitCount = inputCount / inputsPerSplitCount
|
||||
if inputCount%inputsPerSplitCount > 0 {
|
||||
splitCount++
|
||||
}
|
||||
|
||||
return splitCount, inputsPerSplitCount, nil
|
||||
}
|
||||
|
||||
func (s *server) createSplitTransaction(transaction *serialization.PartiallySignedTransaction,
|
||||
changeAddress util.Address, startIndex int, endIndex int) (*serialization.PartiallySignedTransaction, error) {
|
||||
|
||||
selectedUTXOs := make([]*libkaspawallet.UTXO, 0, endIndex-startIndex)
|
||||
totalSompi := uint64(0)
|
||||
|
||||
for i := startIndex; i < endIndex && i < len(transaction.PartiallySignedInputs); i++ {
|
||||
partiallySignedInput := transaction.PartiallySignedInputs[i]
|
||||
selectedUTXOs = append(selectedUTXOs, &libkaspawallet.UTXO{
|
||||
Outpoint: &transaction.Tx.Inputs[i].PreviousOutpoint,
|
||||
UTXOEntry: utxo.NewUTXOEntry(
|
||||
partiallySignedInput.PrevOutput.Value, partiallySignedInput.PrevOutput.ScriptPublicKey,
|
||||
false, constants.UnacceptedDAAScore),
|
||||
DerivationPath: partiallySignedInput.DerivationPath,
|
||||
})
|
||||
|
||||
totalSompi += selectedUTXOs[i-startIndex].UTXOEntry.Amount()
|
||||
totalSompi -= feePerInput
|
||||
}
|
||||
unsignedTransactionBytes, err := libkaspawallet.CreateUnsignedTransaction(s.keysFile.ExtendedPublicKeys,
|
||||
s.keysFile.MinimumSignatures,
|
||||
[]*libkaspawallet.Payment{{
|
||||
Address: changeAddress,
|
||||
Amount: totalSompi,
|
||||
}}, selectedUTXOs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return serialization.DeserializePartiallySignedTransaction(unsignedTransactionBytes)
|
||||
}
|
||||
|
||||
func (s *server) estimateMassAfterSignatures(transaction *serialization.PartiallySignedTransaction) (uint64, error) {
|
||||
transaction = transaction.Clone()
|
||||
var signatureSize uint64
|
||||
if s.keysFile.ECDSA {
|
||||
signatureSize = secp256k1.SerializedECDSASignatureSize
|
||||
} else {
|
||||
signatureSize = secp256k1.SerializedSchnorrSignatureSize
|
||||
}
|
||||
|
||||
for i, input := range transaction.PartiallySignedInputs {
|
||||
for j, pubKeyPair := range input.PubKeySignaturePairs {
|
||||
if uint32(j) >= s.keysFile.MinimumSignatures {
|
||||
break
|
||||
}
|
||||
pubKeyPair.Signature = make([]byte, signatureSize+1) // +1 for SigHashType
|
||||
}
|
||||
transaction.Tx.Inputs[i].SigOpCount = byte(len(input.PubKeySignaturePairs))
|
||||
}
|
||||
|
||||
transactionWithSignatures, err := libkaspawallet.ExtractTransactionDeserialized(transaction, s.keysFile.ECDSA)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return s.txMassCalculator.CalculateTransactionMass(transactionWithSignatures), nil
|
||||
}
|
||||
|
||||
func (s *server) moreUTXOsForMergeTransaction(alreadySelectedUTXOs []*libkaspawallet.UTXO, requiredAmount uint64) (
|
||||
additionalUTXOs []*libkaspawallet.UTXO, totalValueAdded uint64, err error) {
|
||||
|
||||
dagInfo, err := s.rpcClient.GetBlockDAGInfo()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
alreadySelectedUTXOsMap := make(map[externalapi.DomainOutpoint]struct{}, len(alreadySelectedUTXOs))
|
||||
for _, alreadySelectedUTXO := range alreadySelectedUTXOs {
|
||||
alreadySelectedUTXOsMap[*alreadySelectedUTXO.Outpoint] = struct{}{}
|
||||
}
|
||||
|
||||
for _, utxo := range s.utxosSortedByAmount {
|
||||
if _, ok := alreadySelectedUTXOsMap[*utxo.Outpoint]; ok {
|
||||
continue
|
||||
}
|
||||
if !isUTXOSpendable(utxo, dagInfo.VirtualDAAScore, s.params.BlockCoinbaseMaturity) {
|
||||
continue
|
||||
}
|
||||
additionalUTXOs = append(additionalUTXOs, &libkaspawallet.UTXO{
|
||||
Outpoint: utxo.Outpoint,
|
||||
UTXOEntry: utxo.UTXOEntry,
|
||||
DerivationPath: s.walletAddressPath(utxo.address)})
|
||||
totalValueAdded += utxo.UTXOEntry.Amount() - feePerInput
|
||||
if totalValueAdded >= requiredAmount {
|
||||
break
|
||||
}
|
||||
}
|
||||
if totalValueAdded < requiredAmount {
|
||||
return nil, 0, errors.Errorf("Insufficient funds for merge transaction")
|
||||
}
|
||||
|
||||
return additionalUTXOs, totalValueAdded, nil
|
||||
}
|
||||
152
cmd/kaspawallet/daemon/server/split_transaction_test.go
Normal file
152
cmd/kaspawallet/daemon/server/split_transaction_test.go
Normal file
@@ -0,0 +1,152 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet/serialization"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/keys"
|
||||
"github.com/kaspanet/kaspad/util/txmass"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
)
|
||||
|
||||
func TestEstimateMassAfterSignatures(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
unsignedTransactionBytes, mnemonics, params, teardown := testEstimateMassIncreaseForSignaturesSetUp(t, consensusConfig)
|
||||
defer teardown(false)
|
||||
|
||||
serverInstance := &server{
|
||||
params: params,
|
||||
keysFile: &keys.File{MinimumSignatures: 2},
|
||||
shutdown: make(chan struct{}),
|
||||
addressSet: make(walletAddressSet),
|
||||
txMassCalculator: txmass.NewCalculator(params.MassPerTxByte, params.MassPerScriptPubKeyByte, params.MassPerSigOp),
|
||||
}
|
||||
|
||||
unsignedTransaction, err := serialization.DeserializePartiallySignedTransaction(unsignedTransactionBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error deserializing unsignedTransaction: %s", err)
|
||||
}
|
||||
|
||||
estimatedMassAfterSignatures, err := serverInstance.estimateMassAfterSignatures(unsignedTransaction)
|
||||
if err != nil {
|
||||
t.Fatalf("Error from estimateMassAfterSignatures: %s", err)
|
||||
}
|
||||
|
||||
signedTxStep1Bytes, err := libkaspawallet.Sign(params, mnemonics[:1], unsignedTransactionBytes, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Sign: %+v", err)
|
||||
}
|
||||
|
||||
signedTxStep2Bytes, err := libkaspawallet.Sign(params, mnemonics[1:2], signedTxStep1Bytes, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Sign: %+v", err)
|
||||
}
|
||||
|
||||
extractedSignedTx, err := libkaspawallet.ExtractTransaction(signedTxStep2Bytes, false)
|
||||
if err != nil {
|
||||
t.Fatalf("ExtractTransaction: %+v", err)
|
||||
}
|
||||
|
||||
actualMassAfterSignatures := serverInstance.txMassCalculator.CalculateTransactionMass(extractedSignedTx)
|
||||
|
||||
if estimatedMassAfterSignatures != actualMassAfterSignatures {
|
||||
t.Errorf("Estimated mass after signatures: %d but actually got %d",
|
||||
estimatedMassAfterSignatures, actualMassAfterSignatures)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func testEstimateMassIncreaseForSignaturesSetUp(t *testing.T, consensusConfig *consensus.Config) (
|
||||
[]byte, []string, *dagconfig.Params, func(keepDataDir bool)) {
|
||||
|
||||
consensusConfig.BlockCoinbaseMaturity = 0
|
||||
params := &consensusConfig.Params
|
||||
|
||||
tc, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestMultisig")
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up tc: %+v", err)
|
||||
}
|
||||
|
||||
const numKeys = 3
|
||||
mnemonics := make([]string, numKeys)
|
||||
publicKeys := make([]string, numKeys)
|
||||
for i := 0; i < numKeys; i++ {
|
||||
var err error
|
||||
mnemonics[i], err = libkaspawallet.CreateMnemonic()
|
||||
if err != nil {
|
||||
t.Fatalf("CreateMnemonic: %+v", err)
|
||||
}
|
||||
|
||||
publicKeys[i], err = libkaspawallet.MasterPublicKeyFromMnemonic(&consensusConfig.Params, mnemonics[i], true)
|
||||
if err != nil {
|
||||
t.Fatalf("MasterPublicKeyFromMnemonic: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
const minimumSignatures = 2
|
||||
path := "m/1/2/3"
|
||||
address, err := libkaspawallet.Address(params, publicKeys, minimumSignatures, path, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Address: %+v", err)
|
||||
}
|
||||
|
||||
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
t.Fatalf("PayToAddrScript: %+v", err)
|
||||
}
|
||||
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{
|
||||
ScriptPublicKey: scriptPublicKey,
|
||||
ExtraData: nil,
|
||||
}
|
||||
|
||||
fundingBlockHash, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, coinbaseData, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("AddBlock: %+v", err)
|
||||
}
|
||||
|
||||
block1Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlockHash}, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("AddBlock: %+v", err)
|
||||
}
|
||||
|
||||
block1, err := tc.GetBlock(block1Hash)
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlock: %+v", err)
|
||||
}
|
||||
|
||||
block1Tx := block1.Transactions[0]
|
||||
block1TxOut := block1Tx.Outputs[0]
|
||||
selectedUTXOs := []*libkaspawallet.UTXO{
|
||||
{
|
||||
Outpoint: &externalapi.DomainOutpoint{
|
||||
TransactionID: *consensushashing.TransactionID(block1.Transactions[0]),
|
||||
Index: 0,
|
||||
},
|
||||
UTXOEntry: utxo.NewUTXOEntry(block1TxOut.Value, block1TxOut.ScriptPublicKey, true, 0),
|
||||
DerivationPath: path,
|
||||
},
|
||||
}
|
||||
|
||||
unsignedTransaction, err := libkaspawallet.CreateUnsignedTransaction(publicKeys, minimumSignatures,
|
||||
[]*libkaspawallet.Payment{{
|
||||
Address: address,
|
||||
Amount: 10,
|
||||
}}, selectedUTXOs)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateUnsignedTransactions: %+v", err)
|
||||
}
|
||||
|
||||
return unsignedTransaction, mnemonics, params, teardown
|
||||
}
|
||||
@@ -159,10 +159,6 @@ func (s *server) updateAddressesAndLastUsedIndexes(requestedAddressSet walletAdd
|
||||
continue
|
||||
}
|
||||
|
||||
if walletAddress.cosignerIndex != s.keysFile.CosignerIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
s.addressSet[entry.Address] = walletAddress
|
||||
|
||||
if walletAddress.keyChain == libkaspawallet.ExternalKeychain {
|
||||
|
||||
@@ -3,10 +3,11 @@ package main
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/keys"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/utils"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -24,6 +25,9 @@ func dumpUnencryptedData(conf *dumpUnencryptedDataConfig) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(conf.Password) == 0 {
|
||||
conf.Password = keys.GetPassword("Password:")
|
||||
}
|
||||
mnemonics, err := keysFile.DecryptMnemonics(conf.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -5,12 +5,13 @@ import (
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/utils"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tyler-smith/go-bip39"
|
||||
"os"
|
||||
)
|
||||
|
||||
// CreateMnemonics generates `numKeys` number of mnemonics.
|
||||
@@ -52,8 +53,8 @@ func encryptedMnemonicExtendedPublicKeyPairs(params *dagconfig.Params, mnemonics
|
||||
password := []byte(cmdLinePassword)
|
||||
if len(password) == 0 {
|
||||
|
||||
password = getPassword("Enter password for the key file:")
|
||||
confirmPassword := getPassword("Confirm password:")
|
||||
password = []byte(GetPassword("Enter password for the key file:"))
|
||||
confirmPassword := []byte(GetPassword("Confirm password:"))
|
||||
|
||||
if subtle.ConstantTimeCompare(password, confirmPassword) != 1 {
|
||||
return nil, nil, errors.New("Passwords are not identical")
|
||||
|
||||
@@ -2,14 +2,15 @@ package keys
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"golang.org/x/term"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
// getPassword was adapted from https://gist.github.com/jlinoff/e8e26b4ffa38d379c7f1891fd174a6d0#file-getpassword2-go
|
||||
func getPassword(prompt string) []byte {
|
||||
// GetPassword was adapted from https://gist.github.com/jlinoff/e8e26b4ffa38d379c7f1891fd174a6d0#file-getpassword2-go
|
||||
func GetPassword(prompt string) string {
|
||||
// Get the initial state of the terminal.
|
||||
initialTermState, e1 := term.GetState(int(syscall.Stdin))
|
||||
if e1 != nil {
|
||||
@@ -37,5 +38,5 @@ func getPassword(prompt string) []byte {
|
||||
// Stop looking for ^C on the channel.
|
||||
signal.Stop(c)
|
||||
|
||||
return p
|
||||
return string(p)
|
||||
}
|
||||
|
||||
@@ -206,16 +206,13 @@ func (d *File) LastUsedInternalIndex() uint32 {
|
||||
|
||||
// DecryptMnemonics asks the user to enter the password for the private keys and
|
||||
// returns the decrypted private keys.
|
||||
func (d *File) DecryptMnemonics(cmdLinePassword string) ([]string, error) {
|
||||
password := []byte(cmdLinePassword)
|
||||
if len(password) == 0 {
|
||||
password = getPassword("Password:")
|
||||
}
|
||||
func (d *File) DecryptMnemonics(password string) ([]string, error) {
|
||||
passwordBytes := []byte(password)
|
||||
|
||||
var numThreads uint8
|
||||
if len(d.EncryptedMnemonics) > 0 {
|
||||
var err error
|
||||
numThreads, err = d.numThreads(password)
|
||||
numThreads, err = d.numThreads(passwordBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -224,7 +221,7 @@ func (d *File) DecryptMnemonics(cmdLinePassword string) ([]string, error) {
|
||||
privateKeys := make([]string, len(d.EncryptedMnemonics))
|
||||
for i, encryptedPrivateKey := range d.EncryptedMnemonics {
|
||||
var err error
|
||||
privateKeys[i], err = decryptMnemonic(numThreads, encryptedPrivateKey, password)
|
||||
privateKeys[i], err = decryptMnemonic(numThreads, encryptedPrivateKey, passwordBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
61
cmd/kaspawallet/libkaspawallet/converters.go
Normal file
61
cmd/kaspawallet/libkaspawallet/converters.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package libkaspawallet
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionid"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
)
|
||||
|
||||
//KaspawalletdUTXOsTolibkaspawalletUTXOs converts a []*pb.UtxosByAddressesEntry to a []*libkaspawallet.UTXO
|
||||
func KaspawalletdUTXOsTolibkaspawalletUTXOs(kaspawalletdUtxoEntires []*pb.UtxosByAddressesEntry) ([]*UTXO, error) {
|
||||
UTXOs := make([]*UTXO, len(kaspawalletdUtxoEntires))
|
||||
for i, entry := range kaspawalletdUtxoEntires {
|
||||
script, err := hex.DecodeString(entry.UtxoEntry.ScriptPublicKey.ScriptPublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactionID, err := transactionid.FromString(entry.Outpoint.TransactionId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
UTXOs[i] = &UTXO{
|
||||
UTXOEntry: utxo.NewUTXOEntry(
|
||||
entry.UtxoEntry.Amount,
|
||||
&externalapi.ScriptPublicKey{
|
||||
Script: script,
|
||||
Version: uint16(entry.UtxoEntry.ScriptPublicKey.Version),
|
||||
},
|
||||
entry.UtxoEntry.IsCoinbase,
|
||||
entry.UtxoEntry.BlockDaaScore,
|
||||
),
|
||||
Outpoint: &externalapi.DomainOutpoint{
|
||||
TransactionID: *transactionID,
|
||||
Index: entry.Outpoint.Index,
|
||||
},
|
||||
}
|
||||
}
|
||||
return UTXOs, nil
|
||||
}
|
||||
|
||||
// AppMessageUTXOToKaspawalletdUTXO converts an appmessage.UTXOsByAddressesEntry to a pb.UtxosByAddressesEntry
|
||||
func AppMessageUTXOToKaspawalletdUTXO(appUTXOsByAddressesEntry *appmessage.UTXOsByAddressesEntry) *pb.UtxosByAddressesEntry {
|
||||
return &pb.UtxosByAddressesEntry{
|
||||
Outpoint: &pb.Outpoint{
|
||||
TransactionId: appUTXOsByAddressesEntry.Outpoint.TransactionID,
|
||||
Index: appUTXOsByAddressesEntry.Outpoint.Index,
|
||||
},
|
||||
UtxoEntry: &pb.UtxoEntry{
|
||||
Amount: appUTXOsByAddressesEntry.UTXOEntry.Amount,
|
||||
ScriptPublicKey: &pb.ScriptPublicKey{
|
||||
Version: uint32(appUTXOsByAddressesEntry.UTXOEntry.ScriptPublicKey.Version),
|
||||
ScriptPublicKey: appUTXOsByAddressesEntry.UTXOEntry.ScriptPublicKey.Script,
|
||||
},
|
||||
BlockDaaScore: appUTXOsByAddressesEntry.UTXOEntry.BlockDAAScore,
|
||||
IsCoinbase: appUTXOsByAddressesEntry.UTXOEntry.IsCoinbase,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,12 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.12.3
|
||||
// protoc-gen-go v1.28.0
|
||||
// protoc v3.17.2
|
||||
// source: wallet.proto
|
||||
|
||||
package protoserialization
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
@@ -21,10 +20,6 @@ const (
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
type PartiallySignedTransaction struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
|
||||
@@ -3,11 +3,11 @@ package serialization
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet/serialization/protoserialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// PartiallySignedTransaction is a type that is intended
|
||||
@@ -34,6 +34,44 @@ type PubKeySignaturePair struct {
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
// Clone creates a deep-clone of this PartiallySignedTransaction
|
||||
func (pst *PartiallySignedTransaction) Clone() *PartiallySignedTransaction {
|
||||
clone := &PartiallySignedTransaction{
|
||||
Tx: pst.Tx.Clone(),
|
||||
PartiallySignedInputs: make([]*PartiallySignedInput, len(pst.PartiallySignedInputs)),
|
||||
}
|
||||
for i, partiallySignedInput := range pst.PartiallySignedInputs {
|
||||
clone.PartiallySignedInputs[i] = partiallySignedInput.Clone()
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
// Clone creates a deep-clone of this PartiallySignedInput
|
||||
func (psi PartiallySignedInput) Clone() *PartiallySignedInput {
|
||||
clone := &PartiallySignedInput{
|
||||
PrevOutput: psi.PrevOutput.Clone(),
|
||||
MinimumSignatures: psi.MinimumSignatures,
|
||||
PubKeySignaturePairs: make([]*PubKeySignaturePair, len(psi.PubKeySignaturePairs)),
|
||||
DerivationPath: psi.DerivationPath,
|
||||
}
|
||||
for i, pubKeySignaturePair := range psi.PubKeySignaturePairs {
|
||||
clone.PubKeySignaturePairs[i] = pubKeySignaturePair.Clone()
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
// Clone creates a deep-clone of this PubKeySignaturePair
|
||||
func (psp PubKeySignaturePair) Clone() *PubKeySignaturePair {
|
||||
clone := &PubKeySignaturePair{
|
||||
ExtendedPublicKey: psp.ExtendedPublicKey,
|
||||
}
|
||||
if psp.Signature != nil {
|
||||
clone.Signature = make([]byte, len(psp.Signature))
|
||||
copy(clone.Signature, psp.Signature)
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
// DeserializePartiallySignedTransaction deserializes a byte slice into PartiallySignedTransaction.
|
||||
func DeserializePartiallySignedTransaction(serializedPartiallySignedTransaction []byte) (*PartiallySignedTransaction, error) {
|
||||
protoPartiallySignedTransaction := &protoserialization.PartiallySignedTransaction{}
|
||||
@@ -50,6 +88,22 @@ func SerializePartiallySignedTransaction(partiallySignedTransaction *PartiallySi
|
||||
return proto.Marshal(partiallySignedTransactionToProto(partiallySignedTransaction))
|
||||
}
|
||||
|
||||
//DeserializeDomainTransaction Deserialize a Transaction to an *externalapi.DomainTransaction
|
||||
func DeserializeDomainTransaction(serializedTransactionMessage []byte) (*externalapi.DomainTransaction, error) {
|
||||
protoTransactionMessage := &protoserialization.TransactionMessage{}
|
||||
err := proto.Unmarshal(serializedTransactionMessage, protoTransactionMessage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return transactionFromProto(protoTransactionMessage)
|
||||
}
|
||||
|
||||
// SerializeDomainTransaction Serialize a *externalapi.DomainTransaction
|
||||
func SerializeDomainTransaction(tx *externalapi.DomainTransaction) ([]byte, error) {
|
||||
return proto.Marshal(transactionToProto(tx))
|
||||
}
|
||||
|
||||
func partiallySignedTransactionFromProto(protoPartiallySignedTransaction *protoserialization.PartiallySignedTransaction) (*PartiallySignedTransaction, error) {
|
||||
tx, err := transactionFromProto(protoPartiallySignedTransaction.Tx)
|
||||
if err != nil {
|
||||
|
||||
@@ -40,7 +40,6 @@ func Sign(params *dagconfig.Params, mnemonics []string, serializedPSTx []byte, e
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return serialization.SerializePartiallySignedTransaction(partiallySignedTransaction)
|
||||
}
|
||||
|
||||
|
||||
@@ -159,6 +159,7 @@ func createUnsignedTransaction(
|
||||
Tx: domainTransaction,
|
||||
PartiallySignedInputs: partiallySignedInputs,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// IsTransactionFullySigned returns whether the transaction is fully signed and ready to broadcast.
|
||||
@@ -194,10 +195,14 @@ func ExtractTransaction(partiallySignedTransactionBytes []byte, ecdsa bool) (*ex
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return extractTransaction(partiallySignedTransaction, ecdsa)
|
||||
return ExtractTransactionDeserialized(partiallySignedTransaction, ecdsa)
|
||||
}
|
||||
|
||||
func extractTransaction(partiallySignedTransaction *serialization.PartiallySignedTransaction, ecdsa bool) (*externalapi.DomainTransaction, error) {
|
||||
// ExtractTransactionDeserialized does the same thing ExtractTransaction does, only receives the PartiallySignedTransaction
|
||||
// in an already deserialized format
|
||||
func ExtractTransactionDeserialized(partiallySignedTransaction *serialization.PartiallySignedTransaction, ecdsa bool) (
|
||||
*externalapi.DomainTransaction, error) {
|
||||
|
||||
for i, input := range partiallySignedTransaction.PartiallySignedInputs {
|
||||
isMultisig := len(input.PubKeySignaturePairs) > 1
|
||||
scriptBuilder := txscript.NewScriptBuilder()
|
||||
|
||||
@@ -2,6 +2,13 @@ package libkaspawallet_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/pkg/errors"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
"github.com/kaspanet/kaspad/domain/consensus"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
@@ -10,8 +17,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func forSchnorrAndECDSA(t *testing.T, testFunc func(t *testing.T, ecdsa bool)) {
|
||||
@@ -106,7 +111,7 @@ func TestMultisig(t *testing.T) {
|
||||
Amount: 10,
|
||||
}}, selectedUTXOs)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateUnsignedTransaction: %+v", err)
|
||||
t.Fatalf("CreateUnsignedTransactions: %+v", err)
|
||||
}
|
||||
|
||||
isFullySigned, err := libkaspawallet.IsTransactionFullySigned(unsignedTransaction)
|
||||
@@ -267,7 +272,7 @@ func TestP2PK(t *testing.T) {
|
||||
Amount: 10,
|
||||
}}, selectedUTXOs)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateUnsignedTransaction: %+v", err)
|
||||
t.Fatalf("CreateUnsignedTransactions: %+v", err)
|
||||
}
|
||||
|
||||
isFullySigned, err := libkaspawallet.IsTransactionFullySigned(unsignedTransaction)
|
||||
@@ -309,3 +314,239 @@ func TestP2PK(t *testing.T) {
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestMaxSompi(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
|
||||
params := &consensusConfig.Params
|
||||
cfg := *consensusConfig
|
||||
cfg.BlockCoinbaseMaturity = 0
|
||||
cfg.PreDeflationaryPhaseBaseSubsidy = 20e6 * constants.SompiPerKaspa
|
||||
cfg.HF1DAAScore = cfg.GenesisBlock.Header.DAAScore() + 10
|
||||
tc, teardown, err := consensus.NewFactory().NewTestConsensus(&cfg, "TestMaxSompi")
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up tc: %+v", err)
|
||||
}
|
||||
defer teardown(false)
|
||||
|
||||
const numKeys = 1
|
||||
mnemonics := make([]string, numKeys)
|
||||
publicKeys := make([]string, numKeys)
|
||||
for i := 0; i < numKeys; i++ {
|
||||
var err error
|
||||
mnemonics[i], err = libkaspawallet.CreateMnemonic()
|
||||
if err != nil {
|
||||
t.Fatalf("CreateMnemonic: %+v", err)
|
||||
}
|
||||
|
||||
publicKeys[i], err = libkaspawallet.MasterPublicKeyFromMnemonic(&cfg.Params, mnemonics[i], false)
|
||||
if err != nil {
|
||||
t.Fatalf("MasterPublicKeyFromMnemonic: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
const minimumSignatures = 1
|
||||
path := "m/1/2/3"
|
||||
address, err := libkaspawallet.Address(params, publicKeys, minimumSignatures, path, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Address: %+v", err)
|
||||
}
|
||||
|
||||
scriptPublicKey, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
t.Fatalf("PayToAddrScript: %+v", err)
|
||||
}
|
||||
|
||||
coinbaseData := &externalapi.DomainCoinbaseData{
|
||||
ScriptPublicKey: scriptPublicKey,
|
||||
ExtraData: nil,
|
||||
}
|
||||
|
||||
fundingBlock1Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{cfg.GenesisHash}, coinbaseData, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("AddBlock: %+v", err)
|
||||
}
|
||||
|
||||
fundingBlock2Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlock1Hash}, coinbaseData, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("AddBlock: %+v", err)
|
||||
}
|
||||
|
||||
fundingBlock3Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlock2Hash}, coinbaseData, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("AddBlock: %+v", err)
|
||||
}
|
||||
|
||||
fundingBlock4Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlock3Hash}, coinbaseData, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("AddBlock: %+v", err)
|
||||
}
|
||||
|
||||
fundingBlock2, err := tc.GetBlock(fundingBlock2Hash)
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlock: %+v", err)
|
||||
}
|
||||
|
||||
fundingBlock3, err := tc.GetBlock(fundingBlock3Hash)
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlock: %+v", err)
|
||||
}
|
||||
|
||||
fundingBlock4, err := tc.GetBlock(fundingBlock4Hash)
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlock: %+v", err)
|
||||
}
|
||||
|
||||
block1Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlock4Hash}, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("AddBlock: %+v", err)
|
||||
}
|
||||
|
||||
block1, err := tc.GetBlock(block1Hash)
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlock: %+v", err)
|
||||
}
|
||||
|
||||
txOut1 := fundingBlock2.Transactions[0].Outputs[0]
|
||||
txOut2 := fundingBlock3.Transactions[0].Outputs[0]
|
||||
txOut3 := fundingBlock4.Transactions[0].Outputs[0]
|
||||
txOut4 := block1.Transactions[0].Outputs[0]
|
||||
selectedUTXOsForTxWithLargeInputAmount := []*libkaspawallet.UTXO{
|
||||
{
|
||||
Outpoint: &externalapi.DomainOutpoint{
|
||||
TransactionID: *consensushashing.TransactionID(fundingBlock2.Transactions[0]),
|
||||
Index: 0,
|
||||
},
|
||||
UTXOEntry: utxo.NewUTXOEntry(txOut1.Value, txOut1.ScriptPublicKey, true, 0),
|
||||
DerivationPath: path,
|
||||
},
|
||||
{
|
||||
Outpoint: &externalapi.DomainOutpoint{
|
||||
TransactionID: *consensushashing.TransactionID(fundingBlock3.Transactions[0]),
|
||||
Index: 0,
|
||||
},
|
||||
UTXOEntry: utxo.NewUTXOEntry(txOut2.Value, txOut2.ScriptPublicKey, true, 0),
|
||||
DerivationPath: path,
|
||||
},
|
||||
}
|
||||
|
||||
unsignedTxWithLargeInputAmount, err := libkaspawallet.CreateUnsignedTransaction(publicKeys, minimumSignatures,
|
||||
[]*libkaspawallet.Payment{{
|
||||
Address: address,
|
||||
Amount: 10,
|
||||
}}, selectedUTXOsForTxWithLargeInputAmount)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateUnsignedTransactions: %+v", err)
|
||||
}
|
||||
|
||||
signedTxWithLargeInputAmount, err := libkaspawallet.Sign(params, mnemonics, unsignedTxWithLargeInputAmount, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Sign: %+v", err)
|
||||
}
|
||||
|
||||
txWithLargeInputAmount, err := libkaspawallet.ExtractTransaction(signedTxWithLargeInputAmount, false)
|
||||
if err != nil {
|
||||
t.Fatalf("ExtractTransaction: %+v", err)
|
||||
}
|
||||
|
||||
_, virtualChangeSet, err := tc.AddBlock([]*externalapi.DomainHash{block1Hash}, nil, []*externalapi.DomainTransaction{txWithLargeInputAmount})
|
||||
if err != nil {
|
||||
t.Fatalf("AddBlock: %+v", err)
|
||||
}
|
||||
|
||||
addedUTXO1 := &externalapi.DomainOutpoint{
|
||||
TransactionID: *consensushashing.TransactionID(txWithLargeInputAmount),
|
||||
Index: 0,
|
||||
}
|
||||
if virtualChangeSet.VirtualUTXODiff.ToAdd().Contains(addedUTXO1) {
|
||||
t.Fatalf("Transaction was accepted in the DAG")
|
||||
}
|
||||
|
||||
selectedUTXOsForTxWithLargeInputAndOutputAmount := []*libkaspawallet.UTXO{
|
||||
{
|
||||
Outpoint: &externalapi.DomainOutpoint{
|
||||
TransactionID: *consensushashing.TransactionID(fundingBlock4.Transactions[0]),
|
||||
Index: 0,
|
||||
},
|
||||
UTXOEntry: utxo.NewUTXOEntry(txOut3.Value, txOut3.ScriptPublicKey, true, 0),
|
||||
DerivationPath: path,
|
||||
},
|
||||
{
|
||||
Outpoint: &externalapi.DomainOutpoint{
|
||||
TransactionID: *consensushashing.TransactionID(block1.Transactions[0]),
|
||||
Index: 0,
|
||||
},
|
||||
UTXOEntry: utxo.NewUTXOEntry(txOut4.Value, txOut4.ScriptPublicKey, true, 0),
|
||||
DerivationPath: path,
|
||||
},
|
||||
}
|
||||
|
||||
unsignedTxWithLargeInputAndOutputAmount, err := libkaspawallet.CreateUnsignedTransaction(publicKeys, minimumSignatures,
|
||||
[]*libkaspawallet.Payment{{
|
||||
Address: address,
|
||||
Amount: 22e6 * constants.SompiPerKaspa,
|
||||
}}, selectedUTXOsForTxWithLargeInputAndOutputAmount)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateUnsignedTransactions: %+v", err)
|
||||
}
|
||||
|
||||
signedTxWithLargeInputAndOutputAmount, err := libkaspawallet.Sign(params, mnemonics, unsignedTxWithLargeInputAndOutputAmount, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Sign: %+v", err)
|
||||
}
|
||||
|
||||
txWithLargeInputAndOutputAmount, err := libkaspawallet.ExtractTransaction(signedTxWithLargeInputAndOutputAmount, false)
|
||||
if err != nil {
|
||||
t.Fatalf("ExtractTransaction: %+v", err)
|
||||
}
|
||||
|
||||
_, _, err = tc.AddBlock([]*externalapi.DomainHash{block1Hash}, nil, []*externalapi.DomainTransaction{txWithLargeInputAndOutputAmount})
|
||||
if !errors.Is(err, ruleerrors.ErrBadTxOutValue) {
|
||||
t.Fatalf("AddBlock: %+v", err)
|
||||
}
|
||||
|
||||
tip := block1Hash
|
||||
for {
|
||||
tip, _, err = tc.AddBlock([]*externalapi.DomainHash{tip}, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("AddBlock: %+v", err)
|
||||
}
|
||||
|
||||
selectedTip, err := tc.GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
t.Fatalf("GetVirtualDAAScore: %+v", err)
|
||||
}
|
||||
|
||||
daaScore, err := tc.DAABlocksStore().DAAScore(tc.DatabaseContext(), model.NewStagingArea(), selectedTip)
|
||||
if err != nil {
|
||||
t.Fatalf("DAAScore: %+v", err)
|
||||
}
|
||||
|
||||
if daaScore >= cfg.HF1DAAScore {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
tip, virtualChangeSet, err = tc.AddBlock([]*externalapi.DomainHash{tip}, nil, []*externalapi.DomainTransaction{txWithLargeInputAndOutputAmount})
|
||||
if err != nil {
|
||||
t.Fatalf("AddBlock: %+v", err)
|
||||
}
|
||||
|
||||
addedUTXO2 := &externalapi.DomainOutpoint{
|
||||
TransactionID: *consensushashing.TransactionID(txWithLargeInputAndOutputAmount),
|
||||
Index: 0,
|
||||
}
|
||||
|
||||
if !virtualChangeSet.VirtualUTXODiff.ToAdd().Contains(addedUTXO2) {
|
||||
t.Fatalf("txWithLargeInputAndOutputAmount weren't accepted in the DAG")
|
||||
}
|
||||
|
||||
_, virtualChangeSet, err = tc.AddBlock([]*externalapi.DomainHash{tip}, nil, []*externalapi.DomainTransaction{txWithLargeInputAmount})
|
||||
if err != nil {
|
||||
t.Fatalf("AddBlock: %+v", err)
|
||||
}
|
||||
|
||||
if !virtualChangeSet.VirtualUTXODiff.ToAdd().Contains(addedUTXO1) {
|
||||
t.Fatalf("txWithLargeInputAmount wasn't accepted in the DAG")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -29,6 +29,8 @@ func main() {
|
||||
err = dumpUnencryptedData(config.(*dumpUnencryptedDataConfig))
|
||||
case startDaemonSubCmd:
|
||||
err = startDaemon(config.(*startDaemonConfig))
|
||||
case sweepSubCmd:
|
||||
err = sweep(config.(*sweepConfig))
|
||||
default:
|
||||
err = errors.Errorf("Unknown sub-command '%s'\n", subCmd)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/client"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/keys"
|
||||
@@ -31,35 +32,45 @@ func send(conf *sendConfig) error {
|
||||
defer cancel()
|
||||
|
||||
sendAmountSompi := uint64(conf.SendAmount * constants.SompiPerKaspa)
|
||||
createUnsignedTransactionResponse, err := daemonClient.CreateUnsignedTransaction(ctx, &pb.CreateUnsignedTransactionRequest{
|
||||
Address: conf.ToAddress,
|
||||
Amount: sendAmountSompi,
|
||||
})
|
||||
createUnsignedTransactionsResponse, err :=
|
||||
daemonClient.CreateUnsignedTransactions(ctx, &pb.CreateUnsignedTransactionsRequest{
|
||||
Address: conf.ToAddress,
|
||||
Amount: sendAmountSompi,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(conf.Password) == 0 {
|
||||
conf.Password = keys.GetPassword("Password:")
|
||||
}
|
||||
mnemonics, err := keysFile.DecryptMnemonics(conf.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signedTransaction, err := libkaspawallet.Sign(conf.NetParams(), mnemonics, createUnsignedTransactionResponse.UnsignedTransaction, keysFile.ECDSA)
|
||||
signedTransactions := make([][]byte, len(createUnsignedTransactionsResponse.UnsignedTransactions))
|
||||
for i, unsignedTransaction := range createUnsignedTransactionsResponse.UnsignedTransactions {
|
||||
signedTransaction, err := libkaspawallet.Sign(conf.NetParams(), mnemonics, unsignedTransaction, keysFile.ECDSA)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signedTransactions[i] = signedTransaction
|
||||
}
|
||||
|
||||
if len(signedTransactions) > 1 {
|
||||
fmt.Printf("Broadcasting %d transactions\n", len(signedTransactions))
|
||||
}
|
||||
|
||||
response, err := daemonClient.Broadcast(ctx, &pb.BroadcastRequest{Transactions: signedTransactions})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), daemonTimeout)
|
||||
defer cancel2()
|
||||
broadcastResponse, err := daemonClient.Broadcast(ctx2, &pb.BroadcastRequest{
|
||||
Transaction: signedTransaction,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
fmt.Println("Transactions were sent successfully")
|
||||
fmt.Println("Transaction ID(s): ")
|
||||
for _, txID := range response.TxIDs {
|
||||
fmt.Printf("\\t%s\\n", txID)
|
||||
}
|
||||
|
||||
fmt.Println("Transaction was sent successfully")
|
||||
fmt.Printf("Transaction ID: \t%s\n", broadcastResponse.TxID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,21 +1,16 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/keys"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
"github.com/pkg/errors"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func sign(conf *signConfig) error {
|
||||
keysFile, err := keys.ReadKeysFile(conf.NetParams(), conf.KeysFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if conf.Transaction == "" && conf.TransactionFile == "" {
|
||||
return errors.Errorf("Either --transaction or --transaction-file is required")
|
||||
}
|
||||
@@ -23,41 +18,60 @@ func sign(conf *signConfig) error {
|
||||
return errors.Errorf("Both --transaction and --transaction-file cannot be passed at the same time")
|
||||
}
|
||||
|
||||
transactionHex := conf.Transaction
|
||||
if conf.TransactionFile != "" {
|
||||
transactionHexBytes, err := ioutil.ReadFile(conf.TransactionFile)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Could not read hex from %s", conf.TransactionFile)
|
||||
}
|
||||
transactionHex = strings.TrimSpace(string(transactionHexBytes))
|
||||
}
|
||||
|
||||
partiallySignedTransaction, err := hex.DecodeString(transactionHex)
|
||||
keysFile, err := keys.ReadKeysFile(conf.NetParams(), conf.KeysFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(conf.Password) == 0 {
|
||||
conf.Password = keys.GetPassword("Password:")
|
||||
}
|
||||
privateKeys, err := keysFile.DecryptMnemonics(conf.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updatedPartiallySignedTransaction, err := libkaspawallet.Sign(conf.NetParams(), privateKeys, partiallySignedTransaction, keysFile.ECDSA)
|
||||
transactionsHex := conf.Transaction
|
||||
if conf.TransactionFile != "" {
|
||||
transactionHexBytes, err := ioutil.ReadFile(conf.TransactionFile)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Could not read hex from %s", conf.TransactionFile)
|
||||
}
|
||||
transactionsHex = strings.TrimSpace(string(transactionHexBytes))
|
||||
}
|
||||
partiallySignedTransactions, err := decodeTransactionsFromHex(transactionsHex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
isFullySigned, err := libkaspawallet.IsTransactionFullySigned(updatedPartiallySignedTransaction)
|
||||
if err != nil {
|
||||
return err
|
||||
updatedPartiallySignedTransactions := make([][]byte, len(partiallySignedTransactions))
|
||||
for i, partiallySignedTransaction := range partiallySignedTransactions {
|
||||
updatedPartiallySignedTransactions[i], err =
|
||||
libkaspawallet.Sign(conf.NetParams(), privateKeys, partiallySignedTransaction, keysFile.ECDSA)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if isFullySigned {
|
||||
areAllTransactionsFullySigned := true
|
||||
for _, updatedPartiallySignedTransaction := range updatedPartiallySignedTransactions {
|
||||
// This is somewhat redundant to check all transactions, but we do that just-in-case
|
||||
isFullySigned, err := libkaspawallet.IsTransactionFullySigned(updatedPartiallySignedTransaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isFullySigned {
|
||||
areAllTransactionsFullySigned = false
|
||||
}
|
||||
}
|
||||
|
||||
if areAllTransactionsFullySigned {
|
||||
fmt.Println("The transaction is signed and ready to broadcast")
|
||||
} else {
|
||||
fmt.Println("Successfully signed transaction")
|
||||
}
|
||||
|
||||
fmt.Printf("Transaction: %x\n", updatedPartiallySignedTransaction)
|
||||
fmt.Println("Transaction: ")
|
||||
fmt.Println(encodeTransactionsToHex(updatedPartiallySignedTransactions))
|
||||
return nil
|
||||
}
|
||||
|
||||
242
cmd/kaspawallet/sweep.go
Normal file
242
cmd/kaspawallet/sweep.go
Normal file
@@ -0,0 +1,242 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/client"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet/serialization"
|
||||
"github.com/kaspanet/kaspad/cmd/kaspawallet/utils"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/txmass"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const feePerInput = 10000
|
||||
|
||||
func sweep(conf *sweepConfig) error {
|
||||
|
||||
privateKeyBytes, err := hex.DecodeString(conf.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
publicKeybytes, err := libkaspawallet.PublicKeyFromPrivateKey(privateKeyBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addressPubKey, err := util.NewAddressPublicKey(publicKeybytes, conf.NetParams().Prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
address, err := util.DecodeAddress(addressPubKey.String(), conf.NetParams().Prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
daemonClient, tearDown, err := client.Connect(conf.DaemonAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tearDown()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), daemonTimeout)
|
||||
defer cancel()
|
||||
|
||||
getExternalSpendableUTXOsResponse, err := daemonClient.GetExternalSpendableUTXOs(ctx, &pb.GetExternalSpendableUTXOsRequest{
|
||||
Address: address.String(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
UTXOs, err := libkaspawallet.KaspawalletdUTXOsTolibkaspawalletUTXOs(getExternalSpendableUTXOsResponse.Entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
paymentAmount := uint64(0)
|
||||
|
||||
if len(UTXOs) == 0 {
|
||||
return errors.Errorf("Could not find any spendable UTXOs in %s", addressPubKey)
|
||||
}
|
||||
|
||||
for _, UTXO := range UTXOs {
|
||||
paymentAmount = paymentAmount + UTXO.UTXOEntry.Amount()
|
||||
}
|
||||
|
||||
newAddressResponse, err := daemonClient.NewAddress(ctx, &pb.NewAddressRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
toAddress, err := util.DecodeAddress(newAddressResponse.Address, conf.ActiveNetParams.Prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
splitTransactions, err := createSplitTransactionsWithSchnorrPrivteKey(conf.NetParams(), UTXOs, toAddress, feePerInput)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serializedSplitTransactions, err := signWithSchnorrPrivateKey(conf.NetParams(), privateKeyBytes, splitTransactions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("\nSweeping...")
|
||||
fmt.Println("\tFrom:\t", addressPubKey)
|
||||
fmt.Println("\tTo:\t", toAddress)
|
||||
|
||||
response, err := daemonClient.Broadcast(ctx, &pb.BroadcastRequest{
|
||||
IsDomain: true,
|
||||
Transactions: serializedSplitTransactions,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalExtracted := uint64(0)
|
||||
|
||||
fmt.Println("\nTransaction ID(s):")
|
||||
for i, txID := range response.TxIDs {
|
||||
fmt.Printf("\t%s\n", txID)
|
||||
fmt.Println("\tSwept:\t", utils.FormatKas(splitTransactions[i].Outputs[0].Value), " KAS")
|
||||
totalExtracted = totalExtracted + splitTransactions[i].Outputs[0].Value
|
||||
}
|
||||
|
||||
fmt.Println("\nTotal Funds swept (including transaction fees):")
|
||||
fmt.Println("\t", utils.FormatKas(totalExtracted), " KAS")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newDummyTransaction() *externalapi.DomainTransaction {
|
||||
return &externalapi.DomainTransaction{
|
||||
Version: constants.MaxTransactionVersion,
|
||||
Inputs: make([]*externalapi.DomainTransactionInput, 0), //we create empty inputs
|
||||
LockTime: 0,
|
||||
Outputs: make([]*externalapi.DomainTransactionOutput, 1), // we should always have 1 output to the toAdress
|
||||
SubnetworkID: subnetworks.SubnetworkIDNative,
|
||||
Gas: 0,
|
||||
Payload: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func createSplitTransactionsWithSchnorrPrivteKey(
|
||||
params *dagconfig.Params,
|
||||
selectedUTXOs []*libkaspawallet.UTXO,
|
||||
toAddress util.Address,
|
||||
feePerInput int) ([]*externalapi.DomainTransaction, error) {
|
||||
|
||||
var splitTransactions []*externalapi.DomainTransaction
|
||||
|
||||
extraMass := uint64(7000) // Account for future signatures.
|
||||
|
||||
massCalculater := txmass.NewCalculator(params.MassPerTxByte, params.MassPerScriptPubKeyByte, params.MassPerSigOp)
|
||||
|
||||
scriptPublicKey, err := txscript.PayToAddrScript(toAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalSplitAmount := uint64(0)
|
||||
|
||||
lastValidTx := newDummyTransaction()
|
||||
currentTx := newDummyTransaction() //i.e. the tested tx
|
||||
|
||||
//loop through utxos commit segments that don't violate max mass
|
||||
for i, currentUTXO := range selectedUTXOs {
|
||||
|
||||
totalSplitAmount = totalSplitAmount + currentUTXO.UTXOEntry.Amount()
|
||||
|
||||
currentTx.Inputs = append(
|
||||
currentTx.Inputs,
|
||||
&externalapi.DomainTransactionInput{
|
||||
PreviousOutpoint: *currentUTXO.Outpoint,
|
||||
UTXOEntry: utxo.NewUTXOEntry(
|
||||
currentUTXO.UTXOEntry.Amount(),
|
||||
currentUTXO.UTXOEntry.ScriptPublicKey(),
|
||||
false,
|
||||
constants.UnacceptedDAAScore,
|
||||
),
|
||||
SigOpCount: 1,
|
||||
},
|
||||
)
|
||||
|
||||
currentTx.Outputs[0] = &externalapi.DomainTransactionOutput{
|
||||
Value: totalSplitAmount - uint64(len(currentTx.Inputs)*feePerInput),
|
||||
ScriptPublicKey: scriptPublicKey,
|
||||
}
|
||||
|
||||
if massCalculater.CalculateTransactionMass(currentTx)+extraMass >= mempool.MaximumStandardTransactionMass {
|
||||
|
||||
//in this loop we assume a transaction with one input and one output cannot violate max transaction mass, hence a sanity check.
|
||||
if len(currentTx.Inputs) == 1 {
|
||||
return nil, errors.Errorf("transaction with one input and one output violates transaction mass")
|
||||
}
|
||||
|
||||
splitTransactions = append(splitTransactions, lastValidTx)
|
||||
totalSplitAmount = 0
|
||||
lastValidTx = newDummyTransaction()
|
||||
currentTx = newDummyTransaction()
|
||||
continue
|
||||
}
|
||||
|
||||
//Special case, end of inputs, with no violation, where we can assign currentTX to split and break
|
||||
if i == len(selectedUTXOs)-1 {
|
||||
splitTransactions = append(splitTransactions, currentTx)
|
||||
break
|
||||
|
||||
}
|
||||
|
||||
lastValidTx = currentTx.Clone()
|
||||
currentTx.Outputs = make([]*externalapi.DomainTransactionOutput, 1)
|
||||
|
||||
}
|
||||
return splitTransactions, nil
|
||||
}
|
||||
|
||||
func signWithSchnorrPrivateKey(params *dagconfig.Params, privateKeyBytes []byte, domainTransactions []*externalapi.DomainTransaction) ([][]byte, error) {
|
||||
|
||||
schnorrkeyPair, err := secp256k1.DeserializeSchnorrPrivateKeyFromSlice(privateKeyBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serializedDomainTransactions := make([][]byte, len(domainTransactions))
|
||||
|
||||
for i1, domainTransaction := range domainTransactions {
|
||||
|
||||
sighashReusedValues := &consensushashing.SighashReusedValues{}
|
||||
|
||||
for i2, input := range domainTransaction.Inputs {
|
||||
signature, err := txscript.SignatureScript(domainTransaction, i2, consensushashing.SigHashAll, schnorrkeyPair, sighashReusedValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
input.SignatureScript = signature
|
||||
}
|
||||
serializedDomainTransactions[i1], err = serialization.SerializeDomainTransaction(domainTransaction)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return serializedDomainTransactions, nil
|
||||
}
|
||||
33
cmd/kaspawallet/transactions_hex_encoding.go
Normal file
33
cmd/kaspawallet/transactions_hex_encoding.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// hexTransactionsSeparator is used to mark the end of one transaction and the beginning of the next one.
|
||||
// We use a separator that is not in the hex alphabet, but which will not split selection with a double click
|
||||
const hexTransactionsSeparator = "_"
|
||||
|
||||
func encodeTransactionsToHex(transactions [][]byte) string {
|
||||
transactionsInHex := make([]string, len(transactions))
|
||||
for i, transaction := range transactions {
|
||||
transactionsInHex[i] = hex.EncodeToString(transaction)
|
||||
}
|
||||
return strings.Join(transactionsInHex, hexTransactionsSeparator)
|
||||
}
|
||||
|
||||
func decodeTransactionsFromHex(transactionsHex string) ([][]byte, error) {
|
||||
splitTransactionsHexes := strings.Split(transactionsHex, hexTransactionsSeparator)
|
||||
transactions := make([][]byte, len(splitTransactionsHexes))
|
||||
|
||||
var err error
|
||||
for i, transactionHex := range splitTransactionsHexes {
|
||||
transactions[i], err = hex.DecodeString(transactionHex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return transactions, nil
|
||||
}
|
||||
16
cmd/kaspawallet/utils/format_kas.go
Normal file
16
cmd/kaspawallet/utils/format_kas.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
)
|
||||
|
||||
// FormatKas takes the amount of sompis as uint64, and returns amount of KAS with 8 decimal places
|
||||
func FormatKas(amount uint64) string {
|
||||
res := " "
|
||||
if amount > 0 {
|
||||
res = fmt.Sprintf("%19.8f", float64(amount)/constants.SompiPerKaspa)
|
||||
}
|
||||
return res
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.16-alpine AS build
|
||||
FROM golang:1.18-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user