Compare commits

...

34 Commits

Author SHA1 Message Date
msutton
242513c5a2 Transaction hashing outputs to be used as reference for rust imp 2022-09-06 14:42:40 +03:00
Ori Newman
f6d46fd23f Update changelog.txt for v0.12.5 (#2130)
* Update changelog.txt for v0.12.5

* Update version and add #2131 to changelog.txt
2022-08-28 13:53:33 +03:00
Svarog
2a7e03e232 Kaspawallet.send(): Make separate context for Broadcast, to prolong timeout (#2131)
* Kaspawallet.send(): Make separate context for Broadcast, to prolong timeout

* Amend comment

Co-authored-by: Michael Sutton <mikisiton2@gmail.com>
2022-08-28 13:17:58 +03:00
Ori Newman
3286a7d010 Call update pruning point if required on resolve virtual (#2129)
* Call UpdatePruningPointIfRequired when resolving virtual

* Don't sanity check pruning point UTXO set if it's genesis

* Add UpdatePruningPointIfRequired on init
2022-08-24 13:32:39 +03:00
Ori Newman
aabbc741d7 Add UseExistingChangeAddress option to the wallet (#2127) 2022-08-21 17:12:05 +03:00
Elichai Turkel
20b7ab89f9 Add tests for hash writers (#2120)
Co-authored-by: Michael Sutton <mikisiton2@gmail.com>
2022-08-21 09:08:20 +03:00
Ori Newman
10f1e7e3f4 Replace daglabs's dnsseeder with Wolfie's (#2119)
Co-authored-by: Michael Sutton <mikisiton2@gmail.com>
2022-08-21 03:10:59 +03:00
Ori Newman
d941c73701 Change testnet dnsseeder (#2126)
Co-authored-by: Michael Sutton <mikisiton2@gmail.com>
2022-08-21 02:32:40 +03:00
Michael Sutton
3f80638c86 Add missing locks to notification listener modifications (#2124) 2022-08-21 01:26:03 +03:00
Michael Sutton
266ec6c270 Calculate pruning point utxo set from acceptance data (#2123)
* Calc new pruning point utxo diff through chain acceptance data

* Fix daa score to chain block
2022-08-17 15:56:53 +03:00
Michael Sutton
9ee409afaa Fix RPC client memory/goroutine leak (#2122)
* Showcase the RPC client memory leak

* Fixes an RPC client goroutine leak by properly closing the underlying connection
2022-08-09 16:30:24 +03:00
Michael Sutton
715cb3b1ac Fix a subtle lock sync issue in consensus insert block (#2121)
* Manage the locks more tightly and fix a slight and rare sync issue

* Extract virtualResolveChunk constant
2022-08-02 10:33:39 +03:00
D-Stacks
eb693c4a86 Mempool: Retrive stable state of the mempool. optimze get mempool entries by addresses (#2111)
* fix mempool accessing, rewrite get_mempool_entries_by_addresses

* fix counter, add verbose

* fmt

* addresses as string

* Define error in case utxoEntry is missing.

* fix error variable to string

* stop tests from failing (see in code comment)

* access both pools in the same state via parameters

* get rid of todo message

* fmt - very important!

* perf: scriptpublickey in mempool, no txscript.

* address reveiw

* fmt fix

* mixed up isorphan bool, pass tests now

* do map preallocation in mempoolbyaddresses

* no proallocation for orphanpool sending.

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2022-07-26 23:06:08 +03:00
Aleoami
7a61c637b0 Add RPC timeout parameter to wallet daemon (#2104)
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2022-07-21 13:55:12 +03:00
Michael Sutton
c7bd84ef9d Bump version and changelog for v0.12.4 (#2118)
* Bump version and changelog
2022-07-17 03:07:51 +03:00
Svarog
b26b9f6c4b Implement multi-layer auto-compound (#2115)
Co-authored-by: Michael Sutton <mikisiton2@gmail.com>
2022-07-15 14:12:34 +03:00
Michael Sutton
1c9bb54cc2 Crucial fix for the UTXO difference mechanism (#2114)
* Illustrate the bug through prints

* Change consensus API to a single ResolveVirtual call

* nil changeset is not expected when err=nil

* Fixes a deep bug in the resolve virtual process

* Be more defensive at resolving virtual when adding a block

* When finally resolved, set virtual parents properly

* Return nil changeset when nothing happened

* Make sure the block at the split point is reversed to new chain as well

* bump to version 0.12.4

* Avoid locking consensus twice in the common case of adding block with updateVirtual=true

* check err

* Parents must be picked first before set as virtual parents

* Keep the flag for tracking virtual state, since tip sorting perf is high with many tips

* Improve and clarify resolve virtual tests

* Addressing minor review comments

* Fixed a bug in the reported virtual changeset, and modified the test to verify it

* Addressing review comments
2022-07-15 12:35:20 +03:00
Michael Sutton
b9093d59eb Bump version and add change log (#2110) 2022-06-29 16:24:56 +03:00
D-Stacks
18d000f625 Logger: change log level for "Couldn't find UTXO entry" to debug (and ignore message on orphan transactions) (#2108)
* Logger: change log level for "Couldn't find UTXO entry" to debug

* ignore error for orphans

* continue also if orphans

* check if blocks exist

* safe rpc mode

* limit window size

* verify block status depending on context

* allow a 2 factor gap in expected mergeset size

Co-authored-by: msutton <mikisiton2@gmail.com>
2022-06-29 15:54:40 +03:00
Ori Newman
c5aade7e7f Update changelog to v0.12.2 (#2091) 2022-06-17 18:24:21 +03:00
Ori Newman
d4b741fd7c Bump version to v0.12.2 (#2086) 2022-06-16 12:54:38 +03:00
D-Stacks
74a4f927e9 RPC: include orphans into mempool entries (#2046)
* RPC: include orphans into mempool entries

* no need for + 1

* give request option to choose mempool pool(s)

* add to wallet, fix bg

* use orphanpool rpc to test for orphans

* fix fmt

* fix crash when quering orphan pool in get_mempool_entries

* pass the tests, fix fromAppMessage bug

* Update config_test.go

don't think this is needed

* needed for tests to pass

* inverse to transactionpoolfilter, cut down code to two ifs.

* fmt

* update test to true false, forgot one includetransactionpool renaming

* update and simplyfiy get_mempool_entry handler

* comment outdated

* i think we usually use make instead of var.

* Fix some leftovers of includeTransactionPool

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2022-06-15 23:46:44 +03:00
biospb
847aafc91f Fix RPC connections counting (#2026)
* Fix RPC connections counting

* show incomming connections count

* Use the flag RPCMaxClients instead of the const RPCMaxInboundConnections

* Add grpc server name to log message

Co-authored-by: Michael Sutton <mikisiton2@gmail.com>
2022-06-15 22:49:36 +03:00
Aleoami
c87e541570 Clarify wallet message concerning a wallet daemon sync state (#2045)
* upd clarified wallet daemon syncronization state log message

* Update address.go

* Update create_unsigned_transaction.go

* Update external_spendable_utxos.go

* Update sync.go

Co-authored-by: Michael Sutton <mikisiton2@gmail.com>
2022-06-15 16:05:01 +03:00
Aleoami
2ea1c4f922 Change the way the miner executable reports execution errors (closes issue #1677) (#2048)
* fix changed the way the miner executable reports execution errors

* Update main.go

* Update main.go

* Update main.go

* Update main.go

* Rolled back

Co-authored-by: Ori Newman <orinewman1@gmail.com>
Co-authored-by: Michael Sutton <mikisiton2@gmail.com>
2022-06-15 14:05:05 +03:00
Aleoami
5e9c28b77b fix the confusing term in the getHashrate() (#2081)
Co-authored-by: Michael Sutton <mikisiton2@gmail.com>
2022-06-15 03:24:10 +03:00
Michael Sutton
d957a6d93a Fix UTXO diff child error (#2084)
* Avoid creating the chain iterator if high hash is actually low hash

* Always use iterator in nextPruningPointAndCandidateByBlockHash

* Initial failing test

* Minimal failing test + some comments

* go lint

* Add simpler tests with two different errors

* Missed some error checks

* Minor

* A workaround patch for preventing the missing utxo child diff bug

* Make sure we fully resolve virtual

* Move ResolveVirtualWithMaxParam to test consensus

* Mark virtual not updated and loop in batches

* Refactor: remove VirtualChangeSet from functions return values

* Remove workaround comments

* If block has no body, virtual is still considered updated

* Remove special error ErrReverseUTXODiffsUTXODiffChildNotFound

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2022-06-15 02:52:14 +03:00
Michael Sutton
b2648aa5bd Fix not in selected chain crash (#2082)
* Avoid creating the chain iterator if high hash is actually low hash

* Always use iterator in nextPruningPointAndCandidateByBlockHash

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2022-06-15 02:32:47 +03:00
D-Stacks
3908f274ae [Ready] - RPC & UtxoIndex: keep track of, query and test circulating supply. (#2070)
* start

* pass tests, (cannot get before put!) and clean up.

* add rpc call.

* create and pass tests, fix bugs.  fully implement rpc

* As always fmt

* remover old test

* clean up proto comment

* put the logger back in place.

* revert back to 10 sec limit.

* migration, change utxoChanged removal to whole utxoEntryPair, add methods to update circulating supply, intialize circulating supply from reset.

* Update utxoindex.go

* ad

* rename to max, change comment

* one more total to max

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2022-06-05 16:42:08 +03:00
Aleoami
fa7ea121ff fix kaspawallet help messages, clarify sweep command help string (#2067)
* fix kaspawallet help messages, clarify sweep command help string

* fix sweep cmd help string phrasing

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2022-06-05 15:39:56 +03:00
biospb
24848da895 Wallet parse/send/create commands improvement (#2024)
* Fix wallet parsing of multi tx data

* Output wallet msgs to stderr when creating/signing tx

* Fix go fmt

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2022-06-03 19:06:51 +03:00
Michael Sutton
b200b77541 Use chunks for GetBlocksAcceptanceData calls in order to avoid blocking consensus for too long (#2075) 2022-06-03 18:25:02 +03:00
Michael Sutton
d50ad0667c Unite multiple GetBlockAcceptanceData consensus calls to one (#2074)
* Unite multiple `GetBlockAcceptanceData` consensus calls to one

* Variable rename
2022-06-01 12:19:21 +03:00
Ori Newman
5cea285960 Update many-small-chains-and-one-big-chain DAG to not fail merge depth limit (#2072)
Co-authored-by: Michael Sutton <mikisiton2@gmail.com>
2022-05-31 16:13:43 +03:00
135 changed files with 3537 additions and 1451 deletions

View File

@@ -161,6 +161,8 @@ const (
CmdNewBlockTemplateNotificationMessage
CmdGetMempoolEntriesByAddressesRequestMessage
CmdGetMempoolEntriesByAddressesResponseMessage
CmdGetCoinSupplyRequestMessage
CmdGetCoinSupplyResponseMessage
)
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
@@ -294,8 +296,10 @@ var RPCMessageCommandToString = map[MessageCommand]string{
CmdNotifyNewBlockTemplateRequestMessage: "NotifyNewBlockTemplateRequest",
CmdNotifyNewBlockTemplateResponseMessage: "NotifyNewBlockTemplateResponse",
CmdNewBlockTemplateNotificationMessage: "NewBlockTemplateNotification",
CmdGetMempoolEntriesByAddressesRequestMessage: "CmdGetMempoolEntriesByAddressesRequest",
CmdGetMempoolEntriesByAddressesResponseMessage: "CmdGetMempoolEntriesByAddressesResponse",
CmdGetMempoolEntriesByAddressesRequestMessage: "GetMempoolEntriesByAddressesRequest",
CmdGetMempoolEntriesByAddressesResponseMessage: "GetMempoolEntriesByAddressesResponse",
CmdGetCoinSupplyRequestMessage: "GetCoinSupplyRequest",
CmdGetCoinSupplyResponseMessage: "GetCoinSupplyResponse",
}
// Message is an interface that describes a kaspa message. A type that

View File

@@ -0,0 +1,40 @@
package appmessage
// GetCoinSupplyRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetCoinSupplyRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *GetCoinSupplyRequestMessage) Command() MessageCommand {
return CmdGetCoinSupplyRequestMessage
}
// NewGetCoinSupplyRequestMessage returns a instance of the message
func NewGetCoinSupplyRequestMessage() *GetCoinSupplyRequestMessage {
return &GetCoinSupplyRequestMessage{}
}
// GetCoinSupplyResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetCoinSupplyResponseMessage struct {
baseMessage
MaxSompi uint64
CirculatingSompi uint64
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetCoinSupplyResponseMessage) Command() MessageCommand {
return CmdGetCoinSupplyResponseMessage
}
// NewGetCoinSupplyResponseMessage returns a instance of the message
func NewGetCoinSupplyResponseMessage(maxSompi uint64, circulatingSompi uint64) *GetCoinSupplyResponseMessage {
return &GetCoinSupplyResponseMessage{
MaxSompi: maxSompi,
CirculatingSompi: circulatingSompi,
}
}

View File

@@ -4,6 +4,8 @@ package appmessage
// its respective RPC message
type GetMempoolEntriesRequestMessage struct {
baseMessage
IncludeOrphanPool bool
FilterTransactionPool bool
}
// Command returns the protocol command string for the message
@@ -12,8 +14,11 @@ func (msg *GetMempoolEntriesRequestMessage) Command() MessageCommand {
}
// NewGetMempoolEntriesRequestMessage returns a instance of the message
func NewGetMempoolEntriesRequestMessage() *GetMempoolEntriesRequestMessage {
return &GetMempoolEntriesRequestMessage{}
func NewGetMempoolEntriesRequestMessage(includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesRequestMessage {
return &GetMempoolEntriesRequestMessage{
IncludeOrphanPool: includeOrphanPool,
FilterTransactionPool: filterTransactionPool,
}
}
// GetMempoolEntriesResponseMessage is an appmessage corresponding to

View File

@@ -11,7 +11,9 @@ type MempoolEntryByAddress struct {
// its respective RPC message
type GetMempoolEntriesByAddressesRequestMessage struct {
baseMessage
Addresses []string
Addresses []string
IncludeOrphanPool bool
FilterTransactionPool bool
}
// Command returns the protocol command string for the message
@@ -20,9 +22,11 @@ func (msg *GetMempoolEntriesByAddressesRequestMessage) Command() MessageCommand
}
// NewGetMempoolEntriesByAddressesRequestMessage returns a instance of the message
func NewGetMempoolEntriesByAddressesRequestMessage(addresses []string) *GetMempoolEntriesByAddressesRequestMessage {
func NewGetMempoolEntriesByAddressesRequestMessage(addresses []string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesByAddressesRequestMessage {
return &GetMempoolEntriesByAddressesRequestMessage{
Addresses: addresses,
Addresses: addresses,
IncludeOrphanPool: includeOrphanPool,
FilterTransactionPool: filterTransactionPool,
}
}

View File

@@ -4,7 +4,9 @@ package appmessage
// its respective RPC message
type GetMempoolEntryRequestMessage struct {
baseMessage
TxID string
TxID string
IncludeOrphanPool bool
FilterTransactionPool bool
}
// Command returns the protocol command string for the message
@@ -13,8 +15,12 @@ func (msg *GetMempoolEntryRequestMessage) Command() MessageCommand {
}
// NewGetMempoolEntryRequestMessage returns a instance of the message
func NewGetMempoolEntryRequestMessage(txID string) *GetMempoolEntryRequestMessage {
return &GetMempoolEntryRequestMessage{TxID: txID}
func NewGetMempoolEntryRequestMessage(txID string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntryRequestMessage {
return &GetMempoolEntryRequestMessage{
TxID: txID,
IncludeOrphanPool: includeOrphanPool,
FilterTransactionPool: filterTransactionPool,
}
}
// GetMempoolEntryResponseMessage is an appmessage corresponding to
@@ -30,6 +36,7 @@ type GetMempoolEntryResponseMessage struct {
type MempoolEntry struct {
Fee uint64
Transaction *RPCTransaction
IsOrphan bool
}
// Command returns the protocol command string for the message
@@ -38,11 +45,12 @@ func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand {
}
// NewGetMempoolEntryResponseMessage returns a instance of the message
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction) *GetMempoolEntryResponseMessage {
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction, isOrphan bool) *GetMempoolEntryResponseMessage {
return &GetMempoolEntryResponseMessage{
Entry: &MempoolEntry{
Fee: fee,
Transaction: transaction,
IsOrphan: isOrphan,
},
}
}

View File

@@ -107,7 +107,7 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
return protocolerrors.Errorf(false, "cannot add header only block")
}
_, err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
err := f.Domain().Consensus().ValidateAndInsertBlock(block, true)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)

View File

@@ -141,7 +141,7 @@ func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (bool, er
}
delete(f.orphans, orphanHash)
_, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)

View File

@@ -5,7 +5,6 @@ import (
"github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
@@ -34,7 +33,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
if err != nil {
return err
}
if !blockInfo.Exists {
if !blockInfo.HasHeader() {
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
"with an unknown targetHash %s", targetHash)
}
@@ -47,7 +46,7 @@ func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *
}
// The IBD block locator is checking only existing blocks with bodies.
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
if !blockInfo.HasBody() {
continue
}

View File

@@ -4,7 +4,6 @@ import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
@@ -32,7 +31,7 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
if !blockInfo.HasBody() {
return protocolerrors.Errorf(true, "block %s not found (v5)", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)

View File

@@ -5,7 +5,6 @@ import (
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
@@ -33,7 +32,7 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
if !blockInfo.HasBody() {
return protocolerrors.Errorf(true, "block %s not found", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)

View File

@@ -321,7 +321,7 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, error) {
blockHash := consensushashing.BlockHash(block)
_, err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return nil, errors.Wrapf(err, "failed to process block %s", blockHash)

View File

@@ -47,9 +47,9 @@ func (flow *handleRequestAnticoneFlow) start() error {
// GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip
// intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since
// we relay blocks only if they enter virtual's mergeset. We add 2 for a small margin error.
// we relay blocks only if they enter virtual's mergeset. We add a 2 factor for possible sync gaps.
blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash,
flow.Config().ActiveNetParams.MergeSetSizeLimit+2)
flow.Config().ActiveNetParams.MergeSetSizeLimit*2)
if err != nil {
return protocolerrors.Wrap(true, err, "Failed querying anticone")
}

View File

@@ -46,7 +46,25 @@ func (flow *handleRequestHeadersFlow) start() error {
}
log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
isLowSelectedAncestorOfHigh, err := flow.Domain().Consensus().IsInSelectedParentChainOf(lowHash, highHash)
consensus := flow.Domain().Consensus()
lowHashInfo, err := consensus.GetBlockInfo(lowHash)
if err != nil {
return err
}
if !lowHashInfo.HasHeader() {
return protocolerrors.Errorf(true, "Block %s does not exist", lowHash)
}
highHashInfo, err := consensus.GetBlockInfo(highHash)
if err != nil {
return err
}
if !highHashInfo.HasHeader() {
return protocolerrors.Errorf(true, "Block %s does not exist", highHash)
}
isLowSelectedAncestorOfHigh, err := consensus.IsInSelectedParentChainOf(lowHash, highHash)
if err != nil {
return err
}
@@ -62,7 +80,7 @@ func (flow *handleRequestHeadersFlow) start() error {
// in order to avoid locking the consensus for too long
// maxBlocks MUST be >= MergeSetSizeLimit + 1
const maxBlocks = 1 << 10
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlocks)
blockHashes, _, err := consensus.GetHashesBetween(lowHash, highHash, maxBlocks)
if err != nil {
return err
}
@@ -70,7 +88,7 @@ func (flow *handleRequestHeadersFlow) start() error {
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
for i, blockHash := range blockHashes {
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
blockHeader, err := consensus.GetBlockHeader(blockHash)
if err != nil {
return err
}

View File

@@ -488,7 +488,7 @@ func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlo
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
return nil
}
_, err = consensus.ValidateAndInsertBlock(block, false)
err = consensus.ValidateAndInsertBlock(block, false)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
@@ -654,7 +654,7 @@ func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHa
return err
}
_, err = flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
err = flow.Domain().Consensus().ValidateAndInsertBlock(block, false)
if err != nil {
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
@@ -686,37 +686,28 @@ func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock
}
func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error {
virtualDAAScoreStart, err := flow.Domain().Consensus().GetVirtualDAAScore()
err := flow.Domain().Consensus().ResolveVirtual(func(virtualDAAScoreStart uint64, virtualDAAScore uint64) {
var percents int
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
percents = 100
} else {
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
}
if percents < 0 {
percents = 0
} else if percents > 100 {
percents = 100
}
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
})
if err != nil {
return err
}
for i := 0; ; i++ {
if i%10 == 0 {
virtualDAAScore, err := flow.Domain().Consensus().GetVirtualDAAScore()
if err != nil {
return err
}
var percents int
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
percents = 100
} else {
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
}
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
}
_, isCompletelyResolved, err := flow.Domain().Consensus().ResolveVirtual()
if err != nil {
return err
}
if isCompletelyResolved {
log.Infof("Resolved virtual")
err = flow.OnNewBlockTemplate()
if err != nil {
return err
}
return nil
}
log.Infof("Resolved virtual")
err = flow.OnNewBlockTemplate()
if err != nil {
return err
}
return nil
}

View File

@@ -280,7 +280,7 @@ func (flow *handleIBDFlow) processBlockWithTrustedData(
blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index]))
}
_, err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
return err
}

View File

@@ -102,7 +102,7 @@ func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool {
// Ask the transaction memory pool if the transaction is known
// to it in any form (main pool or orphan).
if _, ok := flow.Domain().MiningManager().GetTransaction(txID); ok {
if _, _, ok := flow.Domain().MiningManager().GetTransaction(txID, true, true); ok {
return true
}

View File

@@ -30,7 +30,7 @@ func (flow *handleRequestedTransactionsFlow) start() error {
}
for _, transactionID := range msgRequestTransactions.IDs {
tx, ok := flow.Domain().MiningManager().GetTransaction(transactionID)
tx, _, ok := flow.Domain().MiningManager().GetTransaction(transactionID, true, false)
if !ok {
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
@@ -40,7 +40,6 @@ func (flow *handleRequestedTransactionsFlow) start() error {
}
continue
}
err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx))
if err != nil {
return err

View File

@@ -223,18 +223,9 @@ func (m *Manager) notifyVirtualSelectedParentChainChanged(virtualChangeSet *exte
onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentChainChanged")
defer onEnd()
listenersThatPropagateSelectedParentChanged :=
m.context.NotificationManager.AllListenersThatPropagateVirtualSelectedParentChainChanged()
if len(listenersThatPropagateSelectedParentChanged) > 0 {
// Generating acceptedTransactionIDs is a heavy operation, so we check if it's needed by any listener.
includeAcceptedTransactionIDs := false
for _, listener := range listenersThatPropagateSelectedParentChanged {
if listener.IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications() {
includeAcceptedTransactionIDs = true
break
}
}
hasListeners, includeAcceptedTransactionIDs := m.context.NotificationManager.HasListenersThatPropagateVirtualSelectedParentChainChanged()
if hasListeners {
notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
virtualChangeSet.VirtualSelectedParentChainChanges, includeAcceptedTransactionIDs)
if err != nil {

View File

@@ -49,6 +49,7 @@ var handlers = map[appmessage.MessageCommand]handler{
appmessage.CmdEstimateNetworkHashesPerSecondRequestMessage: rpchandlers.HandleEstimateNetworkHashesPerSecond,
appmessage.CmdNotifyVirtualDaaScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualDaaScoreChanged,
appmessage.CmdNotifyNewBlockTemplateRequestMessage: rpchandlers.HandleNotifyNewBlockTemplate,
appmessage.CmdGetCoinSupplyRequestMessage: rpchandlers.HandleGetCoinSupply,
appmessage.CmdGetMempoolEntriesByAddressesRequestMessage: rpchandlers.HandleGetMempoolEntriesByAddresses,
}

View File

@@ -40,24 +40,39 @@ func (ctx *Context) getAndConvertAcceptedTransactionIDs(selectedParentChainChang
acceptedTransactionIDs := make([]*appmessage.AcceptedTransactionIDs, len(selectedParentChainChanges.Added))
for i, addedChainBlock := range selectedParentChainChanges.Added {
blockAcceptanceData, err := ctx.Domain.Consensus().GetBlockAcceptanceData(addedChainBlock)
const chunk = 1000
position := 0
for position < len(selectedParentChainChanges.Added) {
var chainBlocksChunk []*externalapi.DomainHash
if position+chunk > len(selectedParentChainChanges.Added) {
chainBlocksChunk = selectedParentChainChanges.Added[position:]
} else {
chainBlocksChunk = selectedParentChainChanges.Added[position : position+chunk]
}
// We use chunks in order to avoid blocking consensus for too long
chainBlocksAcceptanceData, err := ctx.Domain.Consensus().GetBlocksAcceptanceData(chainBlocksChunk)
if err != nil {
return nil, err
}
acceptedTransactionIDs[i] = &appmessage.AcceptedTransactionIDs{
AcceptingBlockHash: addedChainBlock.String(),
AcceptedTransactionIDs: nil,
}
for _, blockAcceptanceData := range blockAcceptanceData {
for _, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData {
if transactionAcceptanceData.IsAccepted {
acceptedTransactionIDs[i].AcceptedTransactionIDs =
append(acceptedTransactionIDs[i].AcceptedTransactionIDs,
consensushashing.TransactionID(transactionAcceptanceData.Transaction).String())
for i, addedChainBlock := range chainBlocksChunk {
chainBlockAcceptanceData := chainBlocksAcceptanceData[i]
acceptedTransactionIDs[position+i] = &appmessage.AcceptedTransactionIDs{
AcceptingBlockHash: addedChainBlock.String(),
AcceptedTransactionIDs: nil,
}
for _, blockAcceptanceData := range chainBlockAcceptanceData {
for _, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData {
if transactionAcceptanceData.IsAccepted {
acceptedTransactionIDs[position+i].AcceptedTransactionIDs =
append(acceptedTransactionIDs[position+i].AcceptedTransactionIDs,
consensushashing.TransactionID(transactionAcceptanceData.Transaction).String())
}
}
}
}
position += chunk
}
return acceptedTransactionIDs, nil

View File

@@ -5,6 +5,7 @@ import (
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
"github.com/kaspanet/kaspad/app/appmessage"
@@ -141,16 +142,29 @@ func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged(
return nil
}
// AllListenersThatPropagateVirtualSelectedParentChainChanged returns true if there's any listener that is
// subscribed to VirtualSelectedParentChainChanged notifications.
func (nm *NotificationManager) AllListenersThatPropagateVirtualSelectedParentChainChanged() []*NotificationListener {
var listenersThatPropagate []*NotificationListener
// HasListenersThatPropagateVirtualSelectedParentChainChanged returns whether there's any listener that is
// subscribed to VirtualSelectedParentChainChanged notifications as well as checks if any such listener requested
// to include AcceptedTransactionIDs.
func (nm *NotificationManager) HasListenersThatPropagateVirtualSelectedParentChainChanged() (hasListeners, hasListenersThatRequireAcceptedTransactionIDs bool) {
nm.RLock()
defer nm.RUnlock()
hasListeners = false
hasListenersThatRequireAcceptedTransactionIDs = false
for _, listener := range nm.listeners {
if listener.propagateVirtualSelectedParentChainChangedNotifications {
listenersThatPropagate = append(listenersThatPropagate, listener)
hasListeners = true
// Generating acceptedTransactionIDs is a heavy operation, so we check if it's needed by any listener.
if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications {
hasListenersThatRequireAcceptedTransactionIDs = true
break
}
}
}
return listenersThatPropagate
return hasListeners, hasListenersThatRequireAcceptedTransactionIDs
}
// NotifyFinalityConflict notifies the notification manager that there's a finality conflict in the DAG
@@ -337,7 +351,11 @@ func (nl *NotificationListener) PropagateFinalityConflictResolvedNotifications()
// to the remote listener for the given addresses. Subsequent calls instruct the listener to
// send UTXOs changed notifications for those addresses along with the old ones. Duplicate addresses
// are ignored.
func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
func (nm *NotificationManager) PropagateUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
// Apply a write-lock since the internal listener address map is modified
nm.Lock()
defer nm.Unlock()
if !nl.propagateUTXOsChangedNotifications {
nl.propagateUTXOsChangedNotifications = true
nl.propagateUTXOsChangedNotificationAddresses =
@@ -352,7 +370,11 @@ func (nl *NotificationListener) PropagateUTXOsChangedNotifications(addresses []*
// StopPropagatingUTXOsChangedNotifications instructs the listener to stop sending UTXOs
// changed notifications to the remote listener for the given addresses. Addresses for which
// notifications are not currently sent are ignored.
func (nl *NotificationListener) StopPropagatingUTXOsChangedNotifications(addresses []*UTXOsChangedNotificationAddress) {
func (nm *NotificationManager) StopPropagatingUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) {
// Apply a write-lock since the internal listener address map is modified
nm.Lock()
defer nm.Unlock()
if !nl.propagateUTXOsChangedNotifications {
return
}
@@ -378,9 +400,9 @@ func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
notification.Added = append(notification.Added, utxosByAddressesEntries...)
}
}
for scriptPublicKeyString, removedOutpoints := range utxoChanges.Removed {
for scriptPublicKeyString, removedPairs := range utxoChanges.Removed {
if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok {
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs)
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
}
}
@@ -391,8 +413,8 @@ func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)
notification.Added = append(notification.Added, utxosByAddressesEntries...)
}
if removedOutpoints, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(listenerAddress.Address, removedOutpoints)
if removedPairs, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs)
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
}
}
@@ -406,13 +428,13 @@ func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, addedPairs)
notification.Added = append(notification.Added, utxosByAddressesEntries...)
}
for scriptPublicKeyString, removedOutpoints := range utxoChanges.Removed {
for scriptPublicKeyString, removedPAirs := range utxoChanges.Removed {
addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString)
if err != nil {
return nil, err
}
utxosByAddressesEntries := convertUTXOOutpointsToUTXOsByAddressesEntries(addressString, removedOutpoints)
utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, removedPAirs)
notification.Removed = append(notification.Removed, utxosByAddressesEntries...)
}
}
@@ -421,7 +443,7 @@ func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
}
func (nl *NotificationListener) scriptPubKeyStringToAddressString(scriptPublicKeyString utxoindex.ScriptPublicKeyString) (string, error) {
scriptPubKey := utxoindex.ConvertStringToScriptPublicKey(scriptPublicKeyString)
scriptPubKey := externalapi.NewScriptPublicKeyFromString(string(scriptPublicKeyString))
// ignore error because it is often returned when the script is of unknown type
scriptType, address, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey, nl.params)

View File

@@ -32,22 +32,6 @@ func ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(address string, pair
return utxosByAddressesEntries
}
// convertUTXOOutpointsToUTXOsByAddressesEntries converts
// UTXOOutpoints to a slice of UTXOsByAddressesEntry
func convertUTXOOutpointsToUTXOsByAddressesEntries(address string, outpoints utxoindex.UTXOOutpoints) []*appmessage.UTXOsByAddressesEntry {
utxosByAddressesEntries := make([]*appmessage.UTXOsByAddressesEntry, 0, len(outpoints))
for outpoint := range outpoints {
utxosByAddressesEntries = append(utxosByAddressesEntries, &appmessage.UTXOsByAddressesEntry{
Address: address,
Outpoint: &appmessage.RPCOutpoint{
TransactionID: outpoint.TransactionID.String(),
Index: outpoint.Index,
},
})
}
return utxosByAddressesEntries
}
// ConvertAddressStringsToUTXOsChangedNotificationAddresses converts address strings
// to UTXOsChangedNotificationAddresses
func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
@@ -63,7 +47,7 @@ func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses(
if err != nil {
return nil, errors.Errorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err)
}
scriptPublicKeyString := utxoindex.ConvertScriptPublicKeyToString(scriptPublicKey)
scriptPublicKeyString := utxoindex.ScriptPublicKeyString(scriptPublicKey.String())
addresses[i] = &UTXOsChangedNotificationAddress{
Address: addressString,
ScriptPublicKeyString: scriptPublicKeyString,

View File

@@ -9,6 +9,14 @@ import (
// HandleAddPeer handles the respectively named RPC command
func HandleAddPeer(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("AddPeer RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewAddPeerResponseMessage()
response.Error =
appmessage.RPCErrorf("AddPeer RPC command called while node in safe RPC mode")
return response, nil
}
AddPeerRequest := request.(*appmessage.AddPeerRequestMessage)
address, err := network.NormalizeAddress(AddPeerRequest.Address, context.Config.ActiveNetParams.DefaultPort)
if err != nil {

View File

@@ -9,6 +9,14 @@ import (
// HandleBan handles the respectively named RPC command
func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("Ban RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewBanResponseMessage()
response.Error =
appmessage.RPCErrorf("Ban RPC command called while node in safe RPC mode")
return response, nil
}
banRequest := request.(*appmessage.BanRequestMessage)
ip := net.ParseIP(banRequest.IP)
if ip == nil {

View File

@@ -27,6 +27,27 @@ func HandleEstimateNetworkHashesPerSecond(
}
}
if context.Config.SafeRPC {
const windowSizeLimit = 10000
if windowSize > windowSizeLimit {
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
response.Error =
appmessage.RPCErrorf(
"Requested window size %d is larger than max allowed in RPC safe mode (%d)",
windowSize, windowSizeLimit)
return response, nil
}
}
if uint64(windowSize) > context.Config.ActiveNetParams.PruningDepth() {
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}
response.Error =
appmessage.RPCErrorf(
"Requested window size %d is larger than pruning point depth %d",
windowSize, context.Config.ActiveNetParams.PruningDepth())
return response, nil
}
networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(startHash, windowSize)
if err != nil {
response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{}

View File

@@ -0,0 +1,29 @@
package rpchandlers
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleGetCoinSupply handles the respectively named RPC command
func HandleGetCoinSupply(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
if !context.Config.UTXOIndex {
errorMessage := &appmessage.GetCoinSupplyResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Method unavailable when kaspad is run without --utxoindex")
return errorMessage, nil
}
circulatingSompiSupply, err := context.UTXOIndex.GetCirculatingSompiSupply()
if err != nil {
return nil, err
}
response := appmessage.NewGetCoinSupplyResponseMessage(
constants.MaxSompi,
circulatingSompiSupply,
)
return response, nil
}

View File

@@ -16,7 +16,7 @@ func HandleGetInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.M
response := appmessage.NewGetInfoResponseMessage(
context.NetAdapter.ID().String(),
uint64(context.Domain.MiningManager().TransactionCount()),
uint64(context.Domain.MiningManager().TransactionCount(true, false)),
version.Version(),
context.Config.UTXOIndex,
context.ProtocolManager.Context().HasPeers() && isNearlySynced,

View File

@@ -7,19 +7,40 @@ import (
)
// HandleGetMempoolEntries handles the respectively named RPC command
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
transactions := context.Domain.MiningManager().AllTransactions()
entries := make([]*appmessage.MempoolEntry, 0, len(transactions))
for _, transaction := range transactions {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
getMempoolEntriesRequest := request.(*appmessage.GetMempoolEntriesRequestMessage)
entries := make([]*appmessage.MempoolEntry, 0)
transactionPoolTransactions, orphanPoolTransactions := context.Domain.MiningManager().AllTransactions(!getMempoolEntriesRequest.FilterTransactionPool, getMempoolEntriesRequest.IncludeOrphanPool)
if !getMempoolEntriesRequest.FilterTransactionPool {
for _, transaction := range transactionPoolTransactions {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
entries = append(entries, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: false,
})
}
}
if getMempoolEntriesRequest.IncludeOrphanPool {
for _, transaction := range orphanPoolTransactions {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
entries = append(entries, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: true,
})
}
entries = append(entries, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
})
}
return appmessage.NewGetMempoolEntriesResponseMessage(entries), nil

View File

@@ -3,8 +3,8 @@ package rpchandlers
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
)
@@ -12,15 +12,20 @@ import (
// HandleGetMempoolEntriesByAddresses handles the respectively named RPC command
func HandleGetMempoolEntriesByAddresses(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
transactions := context.Domain.MiningManager().AllTransactions()
getMempoolEntriesByAddressesRequest := request.(*appmessage.GetMempoolEntriesByAddressesRequestMessage)
mempoolEntriesByAddresses := make([]*appmessage.MempoolEntryByAddress, 0)
sendingInTransactionPool, receivingInTransactionPool, sendingInOrphanPool, receivingInOrphanPool, err := context.Domain.MiningManager().GetTransactionsByAddresses(!getMempoolEntriesByAddressesRequest.FilterTransactionPool, getMempoolEntriesByAddressesRequest.IncludeOrphanPool)
if err != nil {
return nil, err
}
for _, addressString := range getMempoolEntriesByAddressesRequest.Addresses {
_, err := util.DecodeAddress(addressString, context.Config.ActiveNetParams.Prefix)
address, err := util.DecodeAddress(addressString, context.Config.NetParams().Prefix)
if err != nil {
errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{}
errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Could not decode address '%s': %s", addressString, err)
return errorMessage, nil
}
@@ -28,68 +33,88 @@ func HandleGetMempoolEntriesByAddresses(context *rpccontext.Context, _ *router.R
sending := make([]*appmessage.MempoolEntry, 0)
receiving := make([]*appmessage.MempoolEntry, 0)
for _, transaction := range transactions {
scriptPublicKey, err := txscript.PayToAddrScript(address)
if err != nil {
errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Could not extract scriptPublicKey from address '%s': %s", addressString, err)
return errorMessage, nil
}
for i, input := range transaction.Inputs {
// TODO: Fix this
if input.UTXOEntry == nil {
log.Errorf("Couldn't find UTXO entry for input %d in mempool transaction %s. This is a bug and should be fixed.", i, consensushashing.TransactionID(transaction))
continue
}
if !getMempoolEntriesByAddressesRequest.FilterTransactionPool {
_, transactionSendingAddress, err := txscript.ExtractScriptPubKeyAddress(
input.UTXOEntry.ScriptPublicKey(),
context.Config.ActiveNetParams)
if transaction, found := sendingInTransactionPool[scriptPublicKey.String()]; found {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
if addressString == transactionSendingAddress.String() {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
sending = append(
sending,
&appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
},
)
break //one input is enough
}
sending = append(sending, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: false,
},
)
}
for _, output := range transaction.Outputs {
_, transactionReceivingAddress, err := txscript.ExtractScriptPubKeyAddress(
output.ScriptPublicKey,
context.Config.ActiveNetParams,
)
if transaction, found := receivingInTransactionPool[scriptPublicKey.String()]; found {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
if addressString == transactionReceivingAddress.String() {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
receiving = append(
receiving,
&appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
},
)
break //one output is enough
}
}
//Only append mempoolEntriesByAddress, if at least 1 mempoolEntry for the address is found.
//This mimics the behaviour of GetUtxosByAddresses RPC call.
if len(sending) > 0 || len(receiving) > 0 {
mempoolEntriesByAddresses = append(
mempoolEntriesByAddresses,
&appmessage.MempoolEntryByAddress{
Address: addressString,
Sending: sending,
Receiving: receiving,
},
receiving = append(receiving, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: false,
},
)
}
}
if getMempoolEntriesByAddressesRequest.IncludeOrphanPool {
if transaction, found := sendingInOrphanPool[scriptPublicKey.String()]; found {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
sending = append(sending, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: true,
},
)
}
if transaction, found := receivingInOrphanPool[scriptPublicKey.String()]; found {
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
receiving = append(receiving, &appmessage.MempoolEntry{
Fee: transaction.Fee,
Transaction: rpcTransaction,
IsOrphan: true,
},
)
}
}
if len(sending) > 0 || len(receiving) > 0 {
mempoolEntriesByAddresses = append(
mempoolEntriesByAddresses,
&appmessage.MempoolEntryByAddress{
Address: address.String(),
Sending: sending,
Receiving: receiving,
},
)
}
}

View File

@@ -3,12 +3,18 @@ package rpchandlers
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionid"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleGetMempoolEntry handles the respectively named RPC command
func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
transaction := &externalapi.DomainTransaction{}
var found bool
var isOrphan bool
getMempoolEntryRequest := request.(*appmessage.GetMempoolEntryRequestMessage)
transactionID, err := transactionid.FromString(getMempoolEntryRequest.TxID)
@@ -18,17 +24,18 @@ func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, reques
return errorMessage, nil
}
transaction, ok := context.Domain.MiningManager().GetTransaction(transactionID)
if !ok {
mempoolTransaction, isOrphan, found := context.Domain.MiningManager().GetTransaction(transactionID, !getMempoolEntryRequest.FilterTransactionPool, getMempoolEntryRequest.IncludeOrphanPool)
if !found {
errorMessage := &appmessage.GetMempoolEntryResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Transaction %s was not found", transactionID)
return errorMessage, nil
}
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(mempoolTransaction)
err = context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
if err != nil {
return nil, err
}
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction), nil
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction, isOrphan), nil
}

View File

@@ -26,7 +26,7 @@ func HandleNotifyUTXOsChanged(context *rpccontext.Context, router *router.Router
if err != nil {
return nil, err
}
listener.PropagateUTXOsChangedNotifications(addresses)
context.NotificationManager.PropagateUTXOsChangedNotifications(listener, addresses)
response := appmessage.NewNotifyUTXOsChangedResponseMessage()
return response, nil

View File

@@ -8,6 +8,14 @@ import (
// HandleResolveFinalityConflict handles the respectively named RPC command
func HandleResolveFinalityConflict(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("ResolveFinalityConflict RPC command called while node in safe RPC mode -- ignoring.")
response := &appmessage.ResolveFinalityConflictResponseMessage{}
response.Error =
appmessage.RPCErrorf("ResolveFinalityConflict RPC command called while node in safe RPC mode")
return response, nil
}
response := &appmessage.ResolveFinalityConflictResponseMessage{}
response.Error = appmessage.RPCErrorf("not implemented")
return response, nil

View File

@@ -12,6 +12,14 @@ const pauseBeforeShutDown = time.Second
// HandleShutDown handles the respectively named RPC command
func HandleShutDown(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("ShutDown RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewShutDownResponseMessage()
response.Error =
appmessage.RPCErrorf("ShutDown RPC command called while node in safe RPC mode")
return response, nil
}
log.Warn("ShutDown RPC called.")
// Wait a second before shutting down, to allow time to return the response to the caller

View File

@@ -26,7 +26,7 @@ func HandleStopNotifyingUTXOsChanged(context *rpccontext.Context, router *router
if err != nil {
return nil, err
}
listener.StopPropagatingUTXOsChangedNotifications(addresses)
context.NotificationManager.StopPropagatingUTXOsChangedNotifications(listener, addresses)
response := appmessage.NewStopNotifyingUTXOsChangedResponseMessage()
return response, nil

View File

@@ -9,6 +9,14 @@ import (
// HandleUnban handles the respectively named RPC command
func HandleUnban(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
if context.Config.SafeRPC {
log.Warn("Unban RPC command called while node in safe RPC mode -- ignoring.")
response := appmessage.NewUnbanResponseMessage()
response.Error =
appmessage.RPCErrorf("Unban RPC command called while node in safe RPC mode")
return response, nil
}
unbanRequest := request.(*appmessage.UnbanRequestMessage)
ip := net.ParseIP(unbanRequest.IP)
if ip == nil {

View File

@@ -1,3 +1,56 @@
Kaspad v0.12.5 - 2022-08-28
===========================
* Add tests for hash writers (#2120)
* Replace daglabs's dnsseeder with Wolfie's (#2119)
* Change testnet dnsseeder (#2126)
* Add RPC timeout parameter to wallet daemon (#2104)
Wallet new features:
* Add UseExistingChangeAddress option to the wallet (#2127)
Bug fixes:
* Call update pruning point if required on resolve virtual or startup (#2129)
* Add missing locks to notification listener modifications (#2124)
* Calculate pruning point utxo set from acceptance data (#2123)
* Fix RPC client memory/goroutine leak (#2122)
* Fix a subtle lock sync issue in consensus insert block (#2121)
* Mempool: Retrieve stable state of the mempool. Optimze get mempool entries by addresses (#2111)
* Kaspawallet.send(): Make separate context for Broadcast, to prolong timeout (#2131)
Kaspad v0.12.4 - 2022-07-17
===========================
* Crucial fix for the UTXO difference mechanism (#2114)
* Implement multi-layer auto-compound (#2115)
Kaspad v0.12.3 - 2022-06-29
===========================
* Fixes a few bugs which can lead to node crashes or out-of-memory errors
Kaspad v0.12.2 - 2022-06-17
===========================
* Clarify wallet message concerning a wallet daemon sync state (#2045)
* Change the way the miner executable reports execution errors (closes issue #1677) (#2048)
* Fix kaspawallet help messages, clarify sweep command help string (#2067)
* Wallet parse/send/create commands improvement (#2024)
* Use chunks for `GetBlocksAcceptanceData` calls in order to avoid blocking consensus for too long (#2075)
* Unite multiple `GetBlockAcceptanceData` consensus calls to one (#2074)
* Update many-small-chains-and-one-big-chain DAG to not fail merge depth limit (#2072)
RPC API Changes:
* RPC: include orphans into mempool entries (#2046)
* RPC & UtxoIndex: keep track of, query and test circulating supply. (#2070)
Bug Fixes:
* Fix RPC connections counting (#2026)
* Fix UTXO diff child error (#2084)
* Fix `not in selected chain` crash (#2082)
Kaspad v0.12.1 - 2022-05-31
===========================

View File

@@ -37,6 +37,7 @@ var commandTypes = []reflect.Type{
reflect.TypeOf(protowire.KaspadMessage_GetUtxosByAddressesRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetBalanceByAddressRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetCoinSupplyRequest{}),
reflect.TypeOf(protowire.KaspadMessage_BanRequest{}),
reflect.TypeOf(protowire.KaspadMessage_UnbanRequest{}),

View File

@@ -23,8 +23,7 @@ func main() {
cfg, err := parseConfig()
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing command-line arguments: %s\n", err)
os.Exit(1)
printErrorAndExit(errors.Errorf("Error parsing command-line arguments: %s", err))
}
defer backendLog.Close()
@@ -44,7 +43,7 @@ func main() {
miningAddr, err := util.DecodeAddress(cfg.MiningAddr, cfg.ActiveNetParams.Prefix)
if err != nil {
panic(errors.Wrap(err, "error decoding mining address"))
printErrorAndExit(errors.Errorf("Error decoding mining address: %s", err))
}
doneChan := make(chan struct{})
@@ -61,3 +60,8 @@ func main() {
case <-interrupt:
}
}
func printErrorAndExit(err error) {
fmt.Fprintf(os.Stderr, "%+v\n", err)
os.Exit(1)
}

View File

@@ -46,32 +46,34 @@ type createConfig struct {
}
type balanceConfig struct {
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"`
Verbose bool `long:"verbose" short:"v" description:"Verbose: show addresses with balance"`
config.NetworkFlags
}
type sendConfig struct {
KeysFile string `long:"keys-file" short:"f" description:"Keys file location (default: ~/.kaspawallet/keys.json (*nix), %USERPROFILE%\\AppData\\Local\\Kaspawallet\\key.json (Windows))"`
Password string `long:"password" short:"p" description:"Wallet password"`
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
ToAddress string `long:"to-address" short:"t" description:"The public address to send Kaspa to" required:"true"`
FromAddresses []string `long:"from-address" short:"a" description:"Specific public address to send Kaspa from. Use multiple times to accept several addresses" required:"false"`
SendAmount float64 `long:"send-amount" short:"v" description:"An amount to send in Kaspa (e.g. 1234.12345678)" required:"true"`
KeysFile string `long:"keys-file" short:"f" description:"Keys file location (default: ~/.kaspawallet/keys.json (*nix), %USERPROFILE%\\AppData\\Local\\Kaspawallet\\key.json (Windows))"`
Password string `long:"password" short:"p" description:"Wallet password"`
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"`
ToAddress string `long:"to-address" short:"t" description:"The public address to send Kaspa to" required:"true"`
FromAddresses []string `long:"from-address" short:"a" description:"Specific public address to send Kaspa from. Use multiple times to accept several addresses" required:"false"`
SendAmount float64 `long:"send-amount" short:"v" description:"An amount to send in Kaspa (e.g. 1234.12345678)" required:"true"`
UseExistingChangeAddress bool `long:"use-existing-change-address" short:"u" description:"Will use an existing change address (in case no change address was ever used, it will use a new one)"`
config.NetworkFlags
}
type sweepConfig struct {
PrivateKey string `long:"private-key" short:"k" description:"Private key in hex format"`
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"`
config.NetworkFlags
}
type createUnsignedTransactionConfig struct {
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
ToAddress string `long:"to-address" short:"t" description:"The public address to send Kaspa to" required:"true"`
FromAddresses []string `long:"from-address" short:"a" description:"Specific public address to send Kaspa from. Use multiple times to accept several addresses" required:"false"`
SendAmount float64 `long:"send-amount" short:"v" description:"An amount to send in Kaspa (e.g. 1234.12345678)" required:"true"`
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"`
ToAddress string `long:"to-address" short:"t" description:"The public address to send Kaspa to" required:"true"`
FromAddresses []string `long:"from-address" short:"a" description:"Specific public address to send Kaspa from. Use multiple times to accept several addresses" required:"false"`
SendAmount float64 `long:"send-amount" short:"v" description:"An amount to send in Kaspa (e.g. 1234.12345678)" required:"true"`
UseExistingChangeAddress bool `long:"use-existing-change-address" short:"u" description:"Will use an existing change address (in case no change address was ever used, it will use a new one)"`
config.NetworkFlags
}
@@ -84,7 +86,7 @@ type signConfig struct {
}
type broadcastConfig struct {
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"`
Transactions string `long:"transaction" short:"t" description:"The signed transaction to broadcast (encoded in hex)"`
TransactionsFile string `long:"transaction-file" short:"F" description:"The file containing the unsigned transaction to sign on (encoded in hex)"`
config.NetworkFlags
@@ -98,12 +100,12 @@ type parseConfig struct {
}
type showAddressesConfig struct {
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"`
config.NetworkFlags
}
type newAddressConfig struct {
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to (default: localhost:8082)"`
DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"`
config.NetworkFlags
}
@@ -111,7 +113,8 @@ type startDaemonConfig struct {
KeysFile string `long:"keys-file" short:"f" description:"Keys file location (default: ~/.kaspawallet/keys.json (*nix), %USERPROFILE%\\AppData\\Local\\Kaspawallet\\key.json (Windows))"`
Password string `long:"password" short:"p" description:"Wallet password"`
RPCServer string `long:"rpcserver" short:"s" description:"RPC server to connect to"`
Listen string `short:"l" long:"listen" description:"Address to listen on (default: 0.0.0.0:8082)"`
Listen string `long:"listen" short:"l" description:"Address to listen on (default: 0.0.0.0:8082)"`
Timeout uint32 `long:"wait-timeout" short:"w" description:"Waiting timeout for RPC calls, seconds (default: 30 s)"`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
config.NetworkFlags
}
@@ -140,8 +143,10 @@ func parseCommandLine() (subCommand string, config interface{}) {
"Sends a Kaspa transaction to a public address", sendConf)
sweepConf := &sweepConfig{DaemonAddress: defaultListen}
parser.AddCommand(sweepSubCmd, "Sends all funds associated with the given private key, to a new address of the wallet",
"Sends all funds associated with the private key, to a given change address of the wallet", sweepConf)
parser.AddCommand(sweepSubCmd, "Sends all funds associated with the given schnorr private key to a new address of the current wallet",
"Sends all funds associated with the given schnorr private key to a newly created external (i.e. not a change) address of the "+
"keyfile that is under the daemon's contol. Can be used with a private key generated with the genkeypair utilily "+
"to send funds to your main wallet.", sweepConf)
createUnsignedTransactionConf := &createUnsignedTransactionConfig{DaemonAddress: defaultListen}
parser.AddCommand(createUnsignedTransactionSubCmd, "Create an unsigned Kaspa transaction",
@@ -179,7 +184,6 @@ func parseCommandLine() (subCommand string, config interface{}) {
parser.AddCommand(startDaemonSubCmd, "Start the wallet daemon", "Start the wallet daemon", startDaemonConf)
_, err := parser.Parse()
if err != nil {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); ok && flagsErr.Type == flags.ErrHelp {

View File

@@ -3,6 +3,7 @@ package main
import (
"context"
"fmt"
"os"
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/client"
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
@@ -21,15 +22,17 @@ func createUnsignedTransaction(conf *createUnsignedTransactionConfig) error {
sendAmountSompi := uint64(conf.SendAmount * constants.SompiPerKaspa)
response, err := daemonClient.CreateUnsignedTransactions(ctx, &pb.CreateUnsignedTransactionsRequest{
From: conf.FromAddresses,
Address: conf.ToAddress,
Amount: sendAmountSompi,
From: conf.FromAddresses,
Address: conf.ToAddress,
Amount: sendAmountSompi,
UseExistingChangeAddress: conf.UseExistingChangeAddress,
})
if err != nil {
return err
}
fmt.Println("Created unsigned transaction")
fmt.Fprintln(os.Stderr, "Created unsigned transaction")
fmt.Println(encodeTransactionsToHex(response.UnsignedTransactions))
return nil
}

View File

@@ -1,12 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
// protoc v3.17.2
// protoc-gen-go v1.25.0
// protoc v3.12.3
// source: kaspawalletd.proto
package pb
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
@@ -20,6 +21,10 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type GetBalanceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -189,9 +194,10 @@ type CreateUnsignedTransactionsRequest struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"`
From []string `protobuf:"bytes,3,rep,name=from,proto3" json:"from,omitempty"`
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"`
From []string `protobuf:"bytes,3,rep,name=from,proto3" json:"from,omitempty"`
UseExistingChangeAddress bool `protobuf:"varint,4,opt,name=useExistingChangeAddress,proto3" json:"useExistingChangeAddress,omitempty"`
}
func (x *CreateUnsignedTransactionsRequest) Reset() {
@@ -247,6 +253,13 @@ func (x *CreateUnsignedTransactionsRequest) GetFrom() []string {
return nil
}
func (x *CreateUnsignedTransactionsRequest) GetUseExistingChangeAddress() bool {
if x != nil {
return x.UseExistingChangeAddress
}
return false
}
type CreateUnsignedTransactionsResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -986,10 +999,11 @@ type SendRequest struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ToAddress string `protobuf:"bytes,1,opt,name=toAddress,proto3" json:"toAddress,omitempty"`
Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"`
Password string `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"`
From []string `protobuf:"bytes,4,rep,name=from,proto3" json:"from,omitempty"`
ToAddress string `protobuf:"bytes,1,opt,name=toAddress,proto3" json:"toAddress,omitempty"`
Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"`
Password string `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"`
From []string `protobuf:"bytes,4,rep,name=from,proto3" json:"from,omitempty"`
UseExistingChangeAddress bool `protobuf:"varint,5,opt,name=useExistingChangeAddress,proto3" json:"useExistingChangeAddress,omitempty"`
}
func (x *SendRequest) Reset() {
@@ -1052,6 +1066,13 @@ func (x *SendRequest) GetFrom() []string {
return nil
}
func (x *SendRequest) GetUseExistingChangeAddress() bool {
if x != nil {
return x.UseExistingChangeAddress
}
return false
}
type SendResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1224,155 +1245,162 @@ var file_kaspawalletd_proto_rawDesc = []byte{
0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52,
0x09, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x65,
0x6e, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, 0x65, 0x6e,
0x64, 0x69, 0x6e, 0x67, 0x22, 0x69, 0x0a, 0x21, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x6e,
0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64,
0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72,
0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20,
0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x66,
0x72, 0x6f, 0x6d, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x22,
0x58, 0x0a, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65,
0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x75, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65,
0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20,
0x03, 0x28, 0x0c, 0x52, 0x14, 0x75, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61,
0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x53, 0x68, 0x6f,
0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x22, 0x31, 0x0a, 0x15, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64,
0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64,
0x72, 0x65, 0x73, 0x73, 0x22, 0x13, 0x0a, 0x11, 0x4e, 0x65, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65,
0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x12, 0x4e, 0x65, 0x77,
0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x52, 0x0a, 0x10, 0x42, 0x72, 0x6f,
0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a,
0x08, 0x69, 0x73, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
0x08, 0x69, 0x73, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x72, 0x61,
0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52,
0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x29, 0x0a,
0x11, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x78, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
0x09, 0x52, 0x05, 0x74, 0x78, 0x49, 0x44, 0x73, 0x22, 0x11, 0x0a, 0x0f, 0x53, 0x68, 0x75, 0x74,
0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x12, 0x0a, 0x10, 0x53,
0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x46, 0x0a, 0x08, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x74,
0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49,
0x64, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d,
0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x15, 0x55, 0x74, 0x78, 0x6f,
0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x08, 0x6f,
0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e,
0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x4f, 0x75, 0x74,
0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12,
0x35, 0x0a, 0x09, 0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74,
0x64, 0x2e, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x75, 0x74, 0x78,
0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x55, 0x0a, 0x0f, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74,
0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72,
0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73,
0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62,
0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x63,
0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0xb2, 0x01,
0x0a, 0x09, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x61,
0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f,
0x75, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62,
0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6b,
0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x63, 0x72, 0x69,
0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x0f, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0d,
0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20,
0x01, 0x28, 0x04, 0x52, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f,
0x72, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65,
0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61,
0x73, 0x65, 0x22, 0x3c, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x53, 0x70, 0x65, 0x6e, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x55, 0x54, 0x58, 0x4f, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x22, 0x62, 0x0a, 0x21, 0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53,
0x70, 0x65, 0x6e, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x55, 0x54, 0x58, 0x4f, 0x73, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73,
0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61,
0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64,
0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x45, 0x6e, 0x74,
0x72, 0x69, 0x65, 0x73, 0x22, 0x73, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73,
0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73,
0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x04, 0x20,
0x03, 0x28, 0x09, 0x52, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x22, 0x24, 0x0a, 0x0c, 0x53, 0x65, 0x6e,
0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x78, 0x49,
0x44, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x74, 0x78, 0x49, 0x44, 0x73, 0x22,
0x5d, 0x0a, 0x0b, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32,
0x0a, 0x14, 0x75, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x14, 0x75, 0x6e,
0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x3e,
0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e,
0x0a, 0x12, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x12, 0x73, 0x69, 0x67, 0x6e,
0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0xb3,
0x06, 0x0a, 0x0c, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x12,
0x51, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x1f, 0x2e,
0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x47, 0x65, 0x74,
0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20,
0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x47, 0x65,
0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x12, 0x7e, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x53, 0x70, 0x65, 0x6e, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x55, 0x54, 0x58, 0x4f, 0x73, 0x12,
0x2e, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x47,
0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x70, 0x65, 0x6e, 0x64, 0x61,
0x62, 0x6c, 0x65, 0x55, 0x54, 0x58, 0x4f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x2f, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x47,
0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x70, 0x65, 0x6e, 0x64, 0x61,
0x62, 0x6c, 0x65, 0x55, 0x54, 0x58, 0x4f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x12, 0x81, 0x01, 0x0a, 0x1a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x6e, 0x73,
0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x12, 0x2f, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64,
0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54,
0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x30, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74,
0x64, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64,
0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0d, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x64,
0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77,
0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65,
0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6b, 0x61,
0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x41,
0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x12, 0x51, 0x0a, 0x0a, 0x4e, 0x65, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x12, 0x1f, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e,
0x4e, 0x65, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x20, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64,
0x2e, 0x4e, 0x65, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77,
0x6e, 0x12, 0x1d, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64,
0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x1e, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e,
0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x12, 0x4e, 0x0a, 0x09, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x12,
0x1e, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x42,
0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1f, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x42,
0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x12, 0x3f, 0x0a, 0x04, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x19, 0x2e, 0x6b, 0x61, 0x73,
0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c,
0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x04, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x19, 0x2e, 0x6b, 0x61,
0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61,
0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b, 0x61, 0x73, 0x70,
0x61, 0x64, 0x2f, 0x63, 0x6d, 0x64, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c,
0x65, 0x74, 0x2f, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
0x64, 0x69, 0x6e, 0x67, 0x22, 0xa5, 0x01, 0x0a, 0x21, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55,
0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64,
0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64,
0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02,
0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04,
0x66, 0x72, 0x6f, 0x6d, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x66, 0x72, 0x6f, 0x6d,
0x12, 0x3a, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x43,
0x68, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01,
0x28, 0x08, 0x52, 0x18, 0x75, 0x73, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x43,
0x68, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x58, 0x0a, 0x22,
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72,
0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x75, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72,
0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c,
0x52, 0x14, 0x75, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x64,
0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x31,
0x0a, 0x15, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65,
0x73, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x22, 0x13, 0x0a, 0x11, 0x4e, 0x65, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x12, 0x4e, 0x65, 0x77, 0x41, 0x64, 0x64,
0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07,
0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61,
0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x52, 0x0a, 0x10, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63,
0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73,
0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73,
0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x74, 0x72,
0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x29, 0x0a, 0x11, 0x42, 0x72,
0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
0x14, 0x0a, 0x05, 0x74, 0x78, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05,
0x74, 0x78, 0x49, 0x44, 0x73, 0x22, 0x11, 0x0a, 0x0f, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77,
0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x12, 0x0a, 0x10, 0x53, 0x68, 0x75, 0x74,
0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x46, 0x0a, 0x08,
0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x6e,
0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14,
0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69,
0x6e, 0x64, 0x65, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x15, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79,
0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x18,
0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x08, 0x6f, 0x75, 0x74, 0x70,
0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6b, 0x61, 0x73,
0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69,
0x6e, 0x74, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x35, 0x0a, 0x09,
0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x17, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x55,
0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x22, 0x55, 0x0a, 0x0f, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62,
0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
0x12, 0x28, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63,
0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70,
0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0xb2, 0x01, 0x0a, 0x09, 0x55,
0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75,
0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74,
0x12, 0x47, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63,
0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6b, 0x61, 0x73, 0x70,
0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50,
0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0d, 0x62, 0x6c, 0x6f,
0x63, 0x6b, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04,
0x52, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12,
0x1e, 0x0a, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x18, 0x04, 0x20,
0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x22,
0x3c, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x70,
0x65, 0x6e, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x55, 0x54, 0x58, 0x4f, 0x73, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x62, 0x0a,
0x21, 0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x70, 0x65, 0x6e,
0x64, 0x61, 0x62, 0x6c, 0x65, 0x55, 0x54, 0x58, 0x4f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65,
0x74, 0x64, 0x2e, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65,
0x73, 0x22, 0xaf, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12,
0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52,
0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77,
0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77,
0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x04, 0x20, 0x03, 0x28,
0x09, 0x52, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x12, 0x3a, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x45, 0x78,
0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x64, 0x64, 0x72,
0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x75, 0x73, 0x65, 0x45, 0x78,
0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x64, 0x64, 0x72,
0x65, 0x73, 0x73, 0x22, 0x24, 0x0a, 0x0c, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x78, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20, 0x03,
0x28, 0x09, 0x52, 0x05, 0x74, 0x78, 0x49, 0x44, 0x73, 0x22, 0x5d, 0x0a, 0x0b, 0x53, 0x69, 0x67,
0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x14, 0x75, 0x6e, 0x73, 0x69,
0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73,
0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x14, 0x75, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64,
0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1a, 0x0a, 0x08,
0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x3e, 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x73, 0x69, 0x67, 0x6e,
0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01,
0x20, 0x03, 0x28, 0x0c, 0x52, 0x12, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e,
0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0xb3, 0x06, 0x0a, 0x0c, 0x6b, 0x61, 0x73,
0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x12, 0x51, 0x0a, 0x0a, 0x47, 0x65, 0x74,
0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77,
0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61,
0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e,
0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7e, 0x0a, 0x19,
0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x70, 0x65, 0x6e, 0x64,
0x61, 0x62, 0x6c, 0x65, 0x55, 0x54, 0x58, 0x4f, 0x73, 0x12, 0x2e, 0x2e, 0x6b, 0x61, 0x73, 0x70,
0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65,
0x72, 0x6e, 0x61, 0x6c, 0x53, 0x70, 0x65, 0x6e, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x55, 0x54, 0x58,
0x4f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x6b, 0x61, 0x73, 0x70,
0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65,
0x72, 0x6e, 0x61, 0x6c, 0x53, 0x70, 0x65, 0x6e, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x55, 0x54, 0x58,
0x4f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x81, 0x01, 0x0a,
0x1a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54,
0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x2e, 0x6b, 0x61,
0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74,
0x65, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x6b,
0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x12, 0x5a, 0x0a, 0x0d, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65,
0x73, 0x12, 0x22, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64,
0x2e, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c,
0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0a,
0x4e, 0x65, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x2e, 0x6b, 0x61, 0x73,
0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x4e, 0x65, 0x77, 0x41, 0x64, 0x64,
0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6b, 0x61,
0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x4e, 0x65, 0x77, 0x41, 0x64,
0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
0x4b, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x1d, 0x2e, 0x6b, 0x61,
0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64,
0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6b, 0x61, 0x73,
0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f,
0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x09,
0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x12, 0x1e, 0x2e, 0x6b, 0x61, 0x73, 0x70,
0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61,
0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6b, 0x61, 0x73, 0x70,
0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61,
0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x04,
0x53, 0x65, 0x6e, 0x64, 0x12, 0x19, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c,
0x65, 0x74, 0x64, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1a, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53,
0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a,
0x04, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x19, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c,
0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x1a, 0x2e, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e,
0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x36,
0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x61, 0x73,
0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x2f, 0x63, 0x6d, 0x64,
0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x2f, 0x64, 0x61, 0x65,
0x6d, 0x6f, 0x6e, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@@ -36,6 +36,7 @@ message CreateUnsignedTransactionsRequest {
string address = 1;
uint64 amount = 2;
repeated string from = 3;
bool useExistingChangeAddress = 4;
}
message CreateUnsignedTransactionsResponse {
@@ -57,8 +58,8 @@ message NewAddressResponse {
}
message BroadcastRequest {
bool isDomain = 1;
repeated bytes transactions = 2;
bool isDomain = 1;
repeated bytes transactions = 2;
}
message BroadcastResponse {
@@ -107,6 +108,7 @@ message SendRequest{
uint64 amount = 2;
string password = 3;
repeated string from = 4;
bool useExistingChangeAddress = 5;
}
message SendResponse{

View File

@@ -1,8 +1,4 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.17.2
// source: kaspawalletd.proto
package pb
@@ -15,8 +11,7 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
const _ = grpc.SupportPackageIsVersion6
// KaspawalletdClient is the client API for Kaspawalletd service.
//
@@ -146,44 +141,37 @@ type KaspawalletdServer interface {
type UnimplementedKaspawalletdServer struct {
}
func (UnimplementedKaspawalletdServer) GetBalance(context.Context, *GetBalanceRequest) (*GetBalanceResponse, error) {
func (*UnimplementedKaspawalletdServer) GetBalance(context.Context, *GetBalanceRequest) (*GetBalanceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetBalance not implemented")
}
func (UnimplementedKaspawalletdServer) GetExternalSpendableUTXOs(context.Context, *GetExternalSpendableUTXOsRequest) (*GetExternalSpendableUTXOsResponse, error) {
func (*UnimplementedKaspawalletdServer) GetExternalSpendableUTXOs(context.Context, *GetExternalSpendableUTXOsRequest) (*GetExternalSpendableUTXOsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetExternalSpendableUTXOs not implemented")
}
func (UnimplementedKaspawalletdServer) CreateUnsignedTransactions(context.Context, *CreateUnsignedTransactionsRequest) (*CreateUnsignedTransactionsResponse, error) {
func (*UnimplementedKaspawalletdServer) CreateUnsignedTransactions(context.Context, *CreateUnsignedTransactionsRequest) (*CreateUnsignedTransactionsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateUnsignedTransactions not implemented")
}
func (UnimplementedKaspawalletdServer) ShowAddresses(context.Context, *ShowAddressesRequest) (*ShowAddressesResponse, error) {
func (*UnimplementedKaspawalletdServer) ShowAddresses(context.Context, *ShowAddressesRequest) (*ShowAddressesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ShowAddresses not implemented")
}
func (UnimplementedKaspawalletdServer) NewAddress(context.Context, *NewAddressRequest) (*NewAddressResponse, error) {
func (*UnimplementedKaspawalletdServer) NewAddress(context.Context, *NewAddressRequest) (*NewAddressResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method NewAddress not implemented")
}
func (UnimplementedKaspawalletdServer) Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) {
func (*UnimplementedKaspawalletdServer) Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented")
}
func (UnimplementedKaspawalletdServer) Broadcast(context.Context, *BroadcastRequest) (*BroadcastResponse, error) {
func (*UnimplementedKaspawalletdServer) Broadcast(context.Context, *BroadcastRequest) (*BroadcastResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Broadcast not implemented")
}
func (UnimplementedKaspawalletdServer) Send(context.Context, *SendRequest) (*SendResponse, error) {
func (*UnimplementedKaspawalletdServer) Send(context.Context, *SendRequest) (*SendResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Send not implemented")
}
func (UnimplementedKaspawalletdServer) Sign(context.Context, *SignRequest) (*SignResponse, error) {
func (*UnimplementedKaspawalletdServer) Sign(context.Context, *SignRequest) (*SignResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Sign not implemented")
}
func (UnimplementedKaspawalletdServer) mustEmbedUnimplementedKaspawalletdServer() {}
func (*UnimplementedKaspawalletdServer) mustEmbedUnimplementedKaspawalletdServer() {}
// UnsafeKaspawalletdServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to KaspawalletdServer will
// result in compilation errors.
type UnsafeKaspawalletdServer interface {
mustEmbedUnimplementedKaspawalletdServer()
}
func RegisterKaspawalletdServer(s grpc.ServiceRegistrar, srv KaspawalletdServer) {
s.RegisterService(&Kaspawalletd_ServiceDesc, srv)
func RegisterKaspawalletdServer(s *grpc.Server, srv KaspawalletdServer) {
s.RegisterService(&_Kaspawalletd_serviceDesc, srv)
}
func _Kaspawalletd_GetBalance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
@@ -348,10 +336,7 @@ func _Kaspawalletd_Sign_Handler(srv interface{}, ctx context.Context, dec func(i
return interceptor(ctx, in, info, handler)
}
// Kaspawalletd_ServiceDesc is the grpc.ServiceDesc for Kaspawalletd service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Kaspawalletd_ServiceDesc = grpc.ServiceDesc{
var _Kaspawalletd_serviceDesc = grpc.ServiceDesc{
ServiceName: "kaspawalletd.kaspawalletd",
HandlerType: (*KaspawalletdServer)(nil),
Methods: []grpc.MethodDesc{

View File

@@ -10,19 +10,24 @@ import (
"github.com/pkg/errors"
)
func (s *server) changeAddress() (util.Address, *walletAddress, error) {
err := s.keysFile.SetLastUsedInternalIndex(s.keysFile.LastUsedInternalIndex() + 1)
if err != nil {
return nil, nil, err
}
func (s *server) changeAddress(useFirst bool) (util.Address, *walletAddress, error) {
internalIndex := uint32(0)
if !useFirst {
err := s.keysFile.SetLastUsedInternalIndex(s.keysFile.LastUsedInternalIndex() + 1)
if err != nil {
return nil, nil, err
}
err = s.keysFile.Save()
if err != nil {
return nil, nil, err
err = s.keysFile.Save()
if err != nil {
return nil, nil, err
}
internalIndex = s.keysFile.LastUsedInternalIndex()
}
walletAddr := &walletAddress{
index: s.keysFile.LastUsedInternalIndex(),
index: internalIndex,
cosignerIndex: s.keysFile.CosignerIndex,
keyChain: libkaspawallet.InternalKeychain,
}
@@ -39,10 +44,10 @@ func (s *server) ShowAddresses(_ context.Context, request *pb.ShowAddressesReque
defer s.lock.Unlock()
if !s.isSynced() {
return nil, errors.New("server is not synced")
return nil, errors.Errorf("wallet daemon is not synced yet, %s", s.formatSyncStateReport())
}
addresses := make([]string, 0)
addresses := make([]string, s.keysFile.LastUsedExternalIndex())
for i := uint32(1); i <= s.keysFile.LastUsedExternalIndex(); i++ {
walletAddr := &walletAddress{
index: i,
@@ -54,7 +59,7 @@ func (s *server) ShowAddresses(_ context.Context, request *pb.ShowAddressesReque
if err != nil {
return nil, err
}
addresses = append(addresses, address.String())
addresses[i-1] = address.String()
}
return &pb.ShowAddressesResponse{Address: addresses}, nil
@@ -65,7 +70,7 @@ func (s *server) NewAddress(_ context.Context, request *pb.NewAddressRequest) (*
defer s.lock.Unlock()
if !s.isSynced() {
return nil, errors.New("server is not synced")
return nil, errors.Errorf("wallet daemon is not synced yet, %s", s.formatSyncStateReport())
}
err := s.keysFile.SetLastUsedExternalIndex(s.keysFile.LastUsedExternalIndex() + 1)

View File

@@ -3,24 +3,26 @@ package server
import (
"context"
"fmt"
"time"
"github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/pb"
"github.com/kaspanet/kaspad/cmd/kaspawallet/libkaspawallet"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"golang.org/x/exp/slices"
"time"
)
// TODO: Implement a better fee estimation mechanism
const feePerInput = 10000
func (s *server) CreateUnsignedTransactions(_ context.Context, request *pb.CreateUnsignedTransactionsRequest) (
*pb.CreateUnsignedTransactionsResponse, error) {
*pb.CreateUnsignedTransactionsResponse, error,
) {
s.lock.Lock()
defer s.lock.Unlock()
unsignedTransactions, err := s.createUnsignedTransactions(request.Address, request.Amount, request.From)
unsignedTransactions, err := s.createUnsignedTransactions(request.Address, request.Amount, request.From, request.UseExistingChangeAddress)
if err != nil {
return nil, err
}
@@ -28,17 +30,19 @@ func (s *server) CreateUnsignedTransactions(_ context.Context, request *pb.Creat
return &pb.CreateUnsignedTransactionsResponse{UnsignedTransactions: unsignedTransactions}, nil
}
func (s *server) createUnsignedTransactions(address string, amount uint64, fromAddressesString []string) ([][]byte, error) {
func (s *server) createUnsignedTransactions(address string, amount uint64, fromAddressesString []string, useExistingChangeAddress bool) ([][]byte, error) {
if !s.isSynced() {
return nil, errors.New("server is not synced")
return nil, errors.Errorf("wallet daemon is not synced yet, %s", s.formatSyncStateReport())
}
err := s.refreshUTXOs()
// make sure address string is correct before proceeding to a
// potentially long UTXO refreshment operation
toAddress, err := util.DecodeAddress(address, s.params.Prefix)
if err != nil {
return nil, err
}
toAddress, err := util.DecodeAddress(address, s.params.Prefix)
err = s.refreshUTXOs()
if err != nil {
return nil, err
}
@@ -57,7 +61,7 @@ func (s *server) createUnsignedTransactions(address string, amount uint64, fromA
return nil, err
}
changeAddress, changeWalletAddress, err := s.changeAddress()
changeAddress, changeWalletAddress, err := s.changeAddress(useExistingChangeAddress)
if err != nil {
return nil, err
}
@@ -87,8 +91,8 @@ func (s *server) createUnsignedTransactions(address string, amount uint64, fromA
}
func (s *server) selectUTXOs(spendAmount uint64, feePerInput uint64, fromAddresses []*walletAddress) (
selectedUTXOs []*libkaspawallet.UTXO, changeSompi uint64, err error) {
selectedUTXOs []*libkaspawallet.UTXO, changeSompi uint64, err error,
) {
selectedUTXOs = []*libkaspawallet.UTXO{}
totalValue := uint64(0)

View File

@@ -31,7 +31,6 @@ func (s *server) GetExternalSpendableUTXOs(_ context.Context, request *pb.GetExt
}
func (s *server) selectExternalSpendableUTXOs(externalUTXOs *appmessage.GetUTXOsByAddressesResponseMessage, address string) ([]*pb.UtxosByAddressesEntry, error) {
dagInfo, err := s.rpcClient.GetBlockDAGInfo()
if err != nil {
return nil, err

View File

@@ -1,15 +1,26 @@
package server
import (
"time"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient"
)
func connectToRPC(params *dagconfig.Params, rpcServer string) (*rpcclient.RPCClient, error) {
func connectToRPC(params *dagconfig.Params, rpcServer string, timeout uint32) (*rpcclient.RPCClient, error) {
rpcAddress, err := params.NormalizeRPCServerAddress(rpcServer)
if err != nil {
return nil, err
}
return rpcclient.NewRPCClient(rpcAddress)
rpcClient, err := rpcclient.NewRPCClient(rpcAddress)
if err != nil {
return nil, err
}
if timeout != 0 {
rpcClient.SetTimeout(time.Duration(timeout) * time.Second)
}
return rpcClient, err
}

View File

@@ -10,7 +10,7 @@ func (s *server) Send(_ context.Context, request *pb.SendRequest) (*pb.SendRespo
s.lock.Lock()
defer s.lock.Unlock()
unsignedTransactions, err := s.createUnsignedTransactions(request.ToAddress, request.Amount, request.From)
unsignedTransactions, err := s.createUnsignedTransactions(request.ToAddress, request.Amount, request.From, request.UseExistingChangeAddress)
if err != nil {
return nil, err
}

View File

@@ -45,7 +45,7 @@ type server struct {
}
// Start starts the kaspawalletd server
func Start(params *dagconfig.Params, listen, rpcServer string, keysFilePath string, profile string) error {
func Start(params *dagconfig.Params, listen, rpcServer string, keysFilePath string, profile string, timeout uint32) error {
initLog(defaultLogFile, defaultErrLogFile)
defer panics.HandlePanic(log, "MAIN", nil)
@@ -62,7 +62,7 @@ func Start(params *dagconfig.Params, listen, rpcServer string, keysFilePath stri
log.Infof("Listening to TCP on %s", listen)
log.Infof("Connecting to a node at %s...", rpcServer)
rpcClient, err := connectToRPC(params, rpcServer)
rpcClient, err := connectToRPC(params, rpcServer, timeout)
if err != nil {
return (errors.Wrapf(err, "Error connecting to RPC server %s", rpcServer))
}

View File

@@ -27,18 +27,10 @@ func (s *server) maybeAutoCompoundTransaction(transactionBytes []byte, toAddress
return nil, err
}
splitTransactions, err := s.maybeSplitTransaction(transaction, changeAddress)
splitTransactions, err := s.maybeSplitAndMergeTransaction(transaction, toAddress, changeAddress, changeWalletAddress)
if err != nil {
return nil, err
}
if len(splitTransactions) > 1 {
mergeTransaction, err := s.mergeTransaction(splitTransactions, transaction, toAddress, changeAddress, changeWalletAddress)
if err != nil {
return nil, err
}
splitTransactions = append(splitTransactions, mergeTransaction)
}
splitTransactionsBytes := make([][]byte, len(splitTransactions))
for i, splitTransaction := range splitTransactions {
splitTransactionsBytes[i], err = serialization.SerializePartiallySignedTransaction(splitTransaction)
@@ -113,8 +105,8 @@ func (s *server) mergeTransaction(
return serialization.DeserializePartiallySignedTransaction(mergeTransactionBytes)
}
func (s *server) maybeSplitTransaction(transaction *serialization.PartiallySignedTransaction,
changeAddress util.Address) ([]*serialization.PartiallySignedTransaction, error) {
func (s *server) maybeSplitAndMergeTransaction(transaction *serialization.PartiallySignedTransaction, toAddress util.Address,
changeAddress util.Address, changeWalletAddress *walletAddress) ([]*serialization.PartiallySignedTransaction, error) {
transactionMass, err := s.estimateMassAfterSignatures(transaction)
if err != nil {
@@ -141,6 +133,20 @@ func (s *server) maybeSplitTransaction(transaction *serialization.PartiallySigne
}
}
if len(splitTransactions) > 1 {
mergeTransaction, err := s.mergeTransaction(splitTransactions, transaction, toAddress, changeAddress, changeWalletAddress)
if err != nil {
return nil, err
}
// Recursion will be 2-3 iterations deep even in the rarest` cases, so considered safe..
splitMergeTransaction, err := s.maybeSplitAndMergeTransaction(mergeTransaction, toAddress, changeAddress, changeWalletAddress)
if err != nil {
return nil, err
}
splitTransactions = append(splitTransactions, splitMergeTransaction...)
}
return splitTransactions, nil
}

View File

@@ -1,6 +1,7 @@
package server
import (
"fmt"
"sort"
"time"
@@ -101,10 +102,14 @@ func (s *server) collectFarAddresses() error {
return nil
}
func (s *server) maxUsedIndex() uint32 {
func (s *server) maxUsedIndexWithLock() uint32 {
s.lock.RLock()
defer s.lock.RUnlock()
return s.maxUsedIndex()
}
func (s *server) maxUsedIndex() uint32 {
maxUsedIndex := s.keysFile.LastUsedExternalIndex()
if s.keysFile.LastUsedInternalIndex() > maxUsedIndex {
maxUsedIndex = s.keysFile.LastUsedInternalIndex()
@@ -122,10 +127,11 @@ func (s *server) collectRecentAddresses() error {
maxUsedIndex := uint32(0)
for ; index < maxUsedIndex+numIndexesToQueryForRecentAddresses; index += numIndexesToQueryForRecentAddresses {
err := s.collectAddressesWithLock(index, index+numIndexesToQueryForRecentAddresses)
if err != nil {
return err
}
maxUsedIndex = s.maxUsedIndex()
maxUsedIndex = s.maxUsedIndexWithLock()
s.updateSyncingProgressLog(index, maxUsedIndex)
}
@@ -261,7 +267,7 @@ func (s *server) refreshUTXOs() error {
// and not in consensus, and between the calls its spending transaction will be
// added to consensus and removed from the mempool, so `getUTXOsByAddressesResponse`
// will include an obsolete output.
mempoolEntriesByAddresses, err := s.rpcClient.GetMempoolEntriesByAddresses(s.addressSet.strings())
mempoolEntriesByAddresses, err := s.rpcClient.GetMempoolEntriesByAddresses(s.addressSet.strings(), true, true)
if err != nil {
return err
}
@@ -275,7 +281,18 @@ func (s *server) refreshUTXOs() error {
}
func (s *server) isSynced() bool {
return s.nextSyncStartIndex > s.keysFile.LastUsedInternalIndex() && s.nextSyncStartIndex > s.keysFile.LastUsedExternalIndex()
return s.nextSyncStartIndex > s.maxUsedIndex()
}
func (s *server) formatSyncStateReport() string {
maxUsedIndex := s.maxUsedIndex()
if s.nextSyncStartIndex > maxUsedIndex {
maxUsedIndex = s.nextSyncStartIndex
}
return fmt.Sprintf("scanned %d out of %d addresses (%.2f%%)",
s.nextSyncStartIndex, maxUsedIndex, float64(s.nextSyncStartIndex)*100.0/float64(maxUsedIndex))
}
func (s *server) updateSyncingProgressLog(currProcessedAddresses, currMaxUsedAddresses uint32) {

View File

@@ -29,55 +29,57 @@ func parse(conf *parseConfig) error {
transactionHex = strings.TrimSpace(string(transactionHexBytes))
}
transaction, err := hex.DecodeString(transactionHex)
transactions, err := decodeTransactionsFromHex(transactionHex)
if err != nil {
return err
}
for i, transaction := range transactions {
partiallySignedTransaction, err := serialization.DeserializePartiallySignedTransaction(transaction)
if err != nil {
return err
}
fmt.Printf("Transaction ID: \t%s\n", consensushashing.TransactionID(partiallySignedTransaction.Tx))
fmt.Println()
allInputSompi := uint64(0)
for index, input := range partiallySignedTransaction.Tx.Inputs {
partiallySignedInput := partiallySignedTransaction.PartiallySignedInputs[index]
if conf.Verbose {
fmt.Printf("Input %d: \tOutpoint: %s:%d \tAmount: %.2f Kaspa\n", index, input.PreviousOutpoint.TransactionID,
input.PreviousOutpoint.Index, float64(partiallySignedInput.PrevOutput.Value)/float64(constants.SompiPerKaspa))
}
allInputSompi += partiallySignedInput.PrevOutput.Value
}
if conf.Verbose {
fmt.Println()
}
allOutputSompi := uint64(0)
for index, output := range partiallySignedTransaction.Tx.Outputs {
scriptPublicKeyType, scriptPublicKeyAddress, err := txscript.ExtractScriptPubKeyAddress(output.ScriptPublicKey, conf.ActiveNetParams)
partiallySignedTransaction, err := serialization.DeserializePartiallySignedTransaction(transaction)
if err != nil {
return err
}
addressString := scriptPublicKeyAddress.EncodeAddress()
if scriptPublicKeyType == txscript.NonStandardTy {
scriptPublicKeyHex := hex.EncodeToString(output.ScriptPublicKey.Script)
addressString = fmt.Sprintf("<Non-standard transaction script public key: %s>", scriptPublicKeyHex)
fmt.Printf("Transaction #%d ID: \t%s\n", i+1, consensushashing.TransactionID(partiallySignedTransaction.Tx))
fmt.Println()
allInputSompi := uint64(0)
for index, input := range partiallySignedTransaction.Tx.Inputs {
partiallySignedInput := partiallySignedTransaction.PartiallySignedInputs[index]
if conf.Verbose {
fmt.Printf("Input %d: \tOutpoint: %s:%d \tAmount: %.2f Kaspa\n", index, input.PreviousOutpoint.TransactionID,
input.PreviousOutpoint.Index, float64(partiallySignedInput.PrevOutput.Value)/float64(constants.SompiPerKaspa))
}
allInputSompi += partiallySignedInput.PrevOutput.Value
}
if conf.Verbose {
fmt.Println()
}
fmt.Printf("Output %d: \tRecipient: %s \tAmount: %.2f Kaspa\n",
index, addressString, float64(output.Value)/float64(constants.SompiPerKaspa))
allOutputSompi := uint64(0)
for index, output := range partiallySignedTransaction.Tx.Outputs {
scriptPublicKeyType, scriptPublicKeyAddress, err := txscript.ExtractScriptPubKeyAddress(output.ScriptPublicKey, conf.ActiveNetParams)
if err != nil {
return err
}
allOutputSompi += output.Value
addressString := scriptPublicKeyAddress.EncodeAddress()
if scriptPublicKeyType == txscript.NonStandardTy {
scriptPublicKeyHex := hex.EncodeToString(output.ScriptPublicKey.Script)
addressString = fmt.Sprintf("<Non-standard transaction script public key: %s>", scriptPublicKeyHex)
}
fmt.Printf("Output %d: \tRecipient: %s \tAmount: %.2f Kaspa\n",
index, addressString, float64(output.Value)/float64(constants.SompiPerKaspa))
allOutputSompi += output.Value
}
fmt.Println()
fmt.Printf("Fee:\t%d Sompi\n\n", allInputSompi-allOutputSompi)
}
fmt.Println()
fmt.Printf("Fee:\t%d Sompi\n", allInputSompi-allOutputSompi)
return nil
}

View File

@@ -35,9 +35,10 @@ func send(conf *sendConfig) error {
createUnsignedTransactionsResponse, err :=
daemonClient.CreateUnsignedTransactions(ctx, &pb.CreateUnsignedTransactionsRequest{
From: conf.FromAddresses,
Address: conf.ToAddress,
Amount: sendAmountSompi,
From: conf.FromAddresses,
Address: conf.ToAddress,
Amount: sendAmountSompi,
UseExistingChangeAddress: conf.UseExistingChangeAddress,
})
if err != nil {
return err
@@ -64,7 +65,12 @@ func send(conf *sendConfig) error {
fmt.Printf("Broadcasting %d transactions\n", len(signedTransactions))
}
response, err := daemonClient.Broadcast(ctx, &pb.BroadcastRequest{Transactions: signedTransactions})
// Since we waited for user input when getting the password, which could take unbound amount of time -
// create a new context for broadcast, to reset the timeout.
broadcastCtx, broadcastCancel := context.WithTimeout(context.Background(), daemonTimeout)
defer broadcastCancel()
response, err := daemonClient.Broadcast(broadcastCtx, &pb.BroadcastRequest{Transactions: signedTransactions})
if err != nil {
return err
}

View File

@@ -3,6 +3,7 @@ package main
import (
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/kaspanet/kaspad/cmd/kaspawallet/keys"
@@ -66,12 +67,11 @@ func sign(conf *signConfig) error {
}
if areAllTransactionsFullySigned {
fmt.Println("The transaction is signed and ready to broadcast")
fmt.Fprintln(os.Stderr, "The transaction is signed and ready to broadcast")
} else {
fmt.Println("Successfully signed transaction")
fmt.Fprintln(os.Stderr, "Successfully signed transaction")
}
fmt.Println("Transaction: ")
fmt.Println(encodeTransactionsToHex(updatedPartiallySignedTransactions))
return nil
}

View File

@@ -3,5 +3,5 @@ package main
import "github.com/kaspanet/kaspad/cmd/kaspawallet/daemon/server"
func startDaemon(conf *startDaemonConfig) error {
return server.Start(conf.NetParams(), conf.Listen, conf.RPCServer, conf.KeysFile, conf.Profile)
return server.Start(conf.NetParams(), conf.Listen, conf.RPCServer, conf.KeysFile, conf.Profile, conf.Timeout)
}

View File

@@ -61,17 +61,24 @@ type consensus struct {
blocksWithTrustedDataDAAWindowStore model.BlocksWithTrustedDataDAAWindowStore
consensusEventsChan chan externalapi.ConsensusEvent
virtualNotUpdated bool
}
func (s *consensus) ValidateAndInsertBlockWithTrustedData(block *externalapi.BlockWithTrustedData, validateUTXO bool) (*externalapi.VirtualChangeSet, error) {
// In order to prevent a situation that the consensus lock is held for too much time, we
// release the lock each time we resolve 100 blocks.
// Note: `virtualResolveChunk` should be smaller than `params.FinalityDuration` in order to avoid a situation
// where UpdatePruningPointByVirtual skips a pruning point.
const virtualResolveChunk = 100
func (s *consensus) ValidateAndInsertBlockWithTrustedData(block *externalapi.BlockWithTrustedData, validateUTXO bool) error {
s.lock.Lock()
defer s.lock.Unlock()
virtualChangeSet, _, err := s.blockProcessor.ValidateAndInsertBlockWithTrustedData(block, validateUTXO)
_, _, err := s.blockProcessor.ValidateAndInsertBlockWithTrustedData(block, validateUTXO)
if err != nil {
return nil, err
return err
}
return virtualChangeSet, nil
return nil
}
// Init initializes consensus
@@ -193,21 +200,73 @@ func (s *consensus) BuildBlockTemplate(coinbaseData *externalapi.DomainCoinbaseD
// ValidateAndInsertBlock validates the given block and, if valid, applies it
// to the current state
func (s *consensus) ValidateAndInsertBlock(block *externalapi.DomainBlock, shouldValidateAgainstUTXO bool) (*externalapi.VirtualChangeSet, error) {
func (s *consensus) ValidateAndInsertBlock(block *externalapi.DomainBlock, updateVirtual bool) error {
if updateVirtual {
s.lock.Lock()
if s.virtualNotUpdated {
// We enter the loop in locked state
for {
_, isCompletelyResolved, err := s.resolveVirtualChunkNoLock(virtualResolveChunk)
if err != nil {
s.lock.Unlock()
return err
}
if isCompletelyResolved {
// Make sure we enter the block insertion function w/o releasing the lock.
// Otherwise, we might actually enter it in `s.virtualNotUpdated == true` state
_, err = s.validateAndInsertBlockNoLock(block, updateVirtual)
// Finally, unlock for the last iteration and return
s.lock.Unlock()
if err != nil {
return err
}
return nil
}
// Unlock to allow other threads to enter consensus
s.lock.Unlock()
// Lock for the next iteration
s.lock.Lock()
}
}
_, err := s.validateAndInsertBlockNoLock(block, updateVirtual)
s.lock.Unlock()
if err != nil {
return err
}
return nil
}
return s.validateAndInsertBlockWithLock(block, updateVirtual)
}
func (s *consensus) validateAndInsertBlockWithLock(block *externalapi.DomainBlock, updateVirtual bool) error {
s.lock.Lock()
defer s.lock.Unlock()
virtualChangeSet, blockStatus, err := s.blockProcessor.ValidateAndInsertBlock(block, shouldValidateAgainstUTXO)
_, err := s.validateAndInsertBlockNoLock(block, updateVirtual)
if err != nil {
return err
}
return nil
}
func (s *consensus) validateAndInsertBlockNoLock(block *externalapi.DomainBlock, updateVirtual bool) (*externalapi.VirtualChangeSet, error) {
virtualChangeSet, blockStatus, err := s.blockProcessor.ValidateAndInsertBlock(block, updateVirtual)
if err != nil {
return nil, err
}
// If block has a body, and yet virtual was not updated -- signify that virtual is in non-updated state
if !updateVirtual && blockStatus != externalapi.StatusHeaderOnly {
s.virtualNotUpdated = true
}
err = s.sendBlockAddedEvent(block, blockStatus)
if err != nil {
return nil, err
}
err = s.sendVirtualChangedEvent(virtualChangeSet, shouldValidateAgainstUTXO)
err = s.sendVirtualChangedEvent(virtualChangeSet, updateVirtual)
if err != nil {
return nil, err
}
@@ -230,7 +289,7 @@ func (s *consensus) sendBlockAddedEvent(block *externalapi.DomainBlock, blockSta
}
func (s *consensus) sendVirtualChangedEvent(virtualChangeSet *externalapi.VirtualChangeSet, wasVirtualUpdated bool) error {
if !wasVirtualUpdated || s.consensusEventsChan == nil {
if !wasVirtualUpdated || s.consensusEventsChan == nil || virtualChangeSet == nil {
return nil
}
@@ -426,6 +485,30 @@ func (s *consensus) GetBlockAcceptanceData(blockHash *externalapi.DomainHash) (e
return s.acceptanceDataStore.Get(s.databaseContext, stagingArea, blockHash)
}
func (s *consensus) GetBlocksAcceptanceData(blockHashes []*externalapi.DomainHash) ([]externalapi.AcceptanceData, error) {
s.lock.Lock()
defer s.lock.Unlock()
stagingArea := model.NewStagingArea()
blocksAcceptanceData := make([]externalapi.AcceptanceData, len(blockHashes))
for i, blockHash := range blockHashes {
err := s.validateBlockHashExists(stagingArea, blockHash)
if err != nil {
return nil, err
}
acceptanceData, err := s.acceptanceDataStore.Get(s.databaseContext, stagingArea, blockHash)
if err != nil {
return nil, err
}
blocksAcceptanceData[i] = acceptanceData
}
return blocksAcceptanceData, nil
}
func (s *consensus) GetHashesBetween(lowHash, highHash *externalapi.DomainHash, maxBlocks uint64) (
hashes []*externalapi.DomainHash, actualHighHash *externalapi.DomainHash, err error) {
@@ -837,18 +920,46 @@ func (s *consensus) PopulateMass(transaction *externalapi.DomainTransaction) {
s.transactionValidator.PopulateMass(transaction)
}
func (s *consensus) ResolveVirtual() (*externalapi.VirtualChangeSet, bool, error) {
func (s *consensus) ResolveVirtual(progressReportCallback func(uint64, uint64)) error {
virtualDAAScoreStart, err := s.GetVirtualDAAScore()
if err != nil {
return err
}
for i := 0; ; i++ {
if i%10 == 0 && progressReportCallback != nil {
virtualDAAScore, err := s.GetVirtualDAAScore()
if err != nil {
return err
}
progressReportCallback(virtualDAAScoreStart, virtualDAAScore)
}
_, isCompletelyResolved, err := s.resolveVirtualChunkWithLock(virtualResolveChunk)
if err != nil {
return err
}
if isCompletelyResolved {
break
}
}
return nil
}
func (s *consensus) resolveVirtualChunkWithLock(maxBlocksToResolve uint64) (*externalapi.VirtualChangeSet, bool, error) {
s.lock.Lock()
defer s.lock.Unlock()
// In order to prevent a situation that the consensus lock is held for too much time, we
// release the lock each time resolve 100 blocks.
// Note: maxBlocksToResolve should be smaller than finality interval in order to avoid a situation
// where UpdatePruningPointByVirtual skips a pruning point.
virtualChangeSet, isCompletelyResolved, err := s.consensusStateManager.ResolveVirtual(100)
return s.resolveVirtualChunkNoLock(maxBlocksToResolve)
}
func (s *consensus) resolveVirtualChunkNoLock(maxBlocksToResolve uint64) (*externalapi.VirtualChangeSet, bool, error) {
virtualChangeSet, isCompletelyResolved, err := s.consensusStateManager.ResolveVirtual(maxBlocksToResolve)
if err != nil {
return nil, false, err
}
s.virtualNotUpdated = !isCompletelyResolved
stagingArea := model.NewStagingArea()
err = s.pruningManager.UpdatePruningPointByVirtual(stagingArea)
@@ -861,6 +972,11 @@ func (s *consensus) ResolveVirtual() (*externalapi.VirtualChangeSet, bool, error
return nil, false, err
}
err = s.pruningManager.UpdatePruningPointIfRequired()
if err != nil {
return nil, false, err
}
err = s.sendVirtualChangedEvent(virtualChangeSet, true)
if err != nil {
return nil, false, err

View File

@@ -27,7 +27,7 @@ func TestConsensus_GetBlockInfo(t *testing.T) {
newHeader := invalidBlock.Header.ToMutable()
newHeader.SetTimeInMilliseconds(0)
invalidBlock.Header = newHeader.ToImmutable()
_, err = consensus.ValidateAndInsertBlock(invalidBlock, true)
err = consensus.ValidateAndInsertBlock(invalidBlock, true)
if !errors.Is(err, ruleerrors.ErrTimeTooOld) {
t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrTimeTooOld, err)
}
@@ -55,7 +55,7 @@ func TestConsensus_GetBlockInfo(t *testing.T) {
t.Fatalf("consensus.BuildBlock with an empty coinbase shouldn't fail: %v", err)
}
_, err = consensus.ValidateAndInsertBlock(validBlock, true)
err = consensus.ValidateAndInsertBlock(validBlock, true)
if err != nil {
t.Fatalf("consensus.ValidateAndInsertBlock with a block straight from consensus.BuildBlock should not fail: %v", err)
}

View File

@@ -515,6 +515,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
blocksWithTrustedDataDAAWindowStore: daaWindowStore,
consensusEventsChan: consensusEventsChan,
virtualNotUpdated: true,
}
if isOldReachabilityInitialized {
@@ -539,6 +540,8 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
return nil, false, err
}
// If the virtual moved before shutdown but the pruning point hasn't, we
// move it if needed.
stagingArea := model.NewStagingArea()
err = pruningManager.UpdatePruningPointByVirtual(stagingArea)
if err != nil {
@@ -550,6 +553,11 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
return nil, false, err
}
err = pruningManager.UpdatePruningPointIfRequired()
if err != nil {
return nil, false, err
}
return c, false, nil
}

View File

@@ -33,7 +33,7 @@ func TestFinality(t *testing.T) {
return nil, err
}
_, err = consensus.ValidateAndInsertBlock(block, true)
err = consensus.ValidateAndInsertBlock(block, true)
if err != nil {
return nil, err
}
@@ -201,7 +201,7 @@ func TestBoundedMergeDepth(t *testing.T) {
return nil, false // fo some reason go doesn't recognize that t.Fatalf never returns
}
_, err = consensus.ValidateAndInsertBlock(block, true)
err = consensus.ValidateAndInsertBlock(block, true)
if err == nil {
return block, false
} else if errors.Is(err, ruleerrors.ErrViolatingBoundedMergeDepth) {
@@ -213,7 +213,7 @@ func TestBoundedMergeDepth(t *testing.T) {
}
processBlock := func(consensus testapi.TestConsensus, block *externalapi.DomainBlock, name string) {
_, err := consensus.ValidateAndInsertBlock(block, true)
err := consensus.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("TestBoundedMergeDepth: %s got unexpected error from ProcessBlock: %+v", name, err)
@@ -225,7 +225,7 @@ func TestBoundedMergeDepth(t *testing.T) {
if err != nil {
t.Fatalf("TestBoundedMergeDepth: Failed building block: %+v", err)
}
_, err = consensus.ValidateAndInsertBlock(block, true)
err = consensus.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("TestBoundedMergeDepth: Failed Inserting block to consensus: %v", err)
}
@@ -268,7 +268,7 @@ func TestBoundedMergeDepth(t *testing.T) {
t.Fatalf("GetBlockHeader: %+v", err)
}
_, err = tcSyncee.ValidateAndInsertBlock(block, true)
err = tcSyncee.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock %d: %+v", i, err)
}
@@ -556,7 +556,7 @@ func TestFinalityResolveVirtual(t *testing.T) {
block.Header = mutableHeader.ToImmutable()
}
_, err = tcAttacker.ValidateAndInsertBlock(block, true)
err = tcAttacker.ValidateAndInsertBlock(block, true)
if err != nil {
panic(err)
}
@@ -583,24 +583,19 @@ func TestFinalityResolveVirtual(t *testing.T) {
t.Logf("Side chain tip (%s) blue score %d", sideChainTipHash, sideChainTipGHOSTDAGData.BlueScore())
for _, block := range sideChain {
_, err := tc.ValidateAndInsertBlock(block, false)
err := tc.ValidateAndInsertBlock(block, false)
if err != nil {
panic(err)
}
}
for i := 0; ; i++ {
_, isCompletelyResolved, err := tc.ResolveVirtual()
if err != nil {
panic(err)
}
if isCompletelyResolved {
t.Log("Resolved virtual")
break
}
err = tc.ResolveVirtual(nil)
if err != nil {
panic(err)
}
t.Log("Resolved virtual")
sideChainTipGHOSTDAGData, err = tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), stagingArea, sideChainTipHash, false)
if err != nil {
panic(err)

View File

@@ -13,6 +13,16 @@ type BlockInfo struct {
MergeSetReds []*DomainHash
}
// HasHeader returns whether the block exists and has a valid header
func (bi *BlockInfo) HasHeader() bool {
return bi.Exists && bi.BlockStatus != StatusInvalid
}
// HasBody returns whether the block exists and has a valid body
func (bi *BlockInfo) HasBody() bool {
return bi.Exists && bi.BlockStatus != StatusInvalid && bi.BlockStatus != StatusHeaderOnly
}
// Clone returns a clone of BlockInfo
func (bi *BlockInfo) Clone() *BlockInfo {
return &BlockInfo{

View File

@@ -5,8 +5,8 @@ type Consensus interface {
Init(skipAddingGenesis bool) error
BuildBlock(coinbaseData *DomainCoinbaseData, transactions []*DomainTransaction) (*DomainBlock, error)
BuildBlockTemplate(coinbaseData *DomainCoinbaseData, transactions []*DomainTransaction) (*DomainBlockTemplate, error)
ValidateAndInsertBlock(block *DomainBlock, shouldValidateAgainstUTXO bool) (*VirtualChangeSet, error)
ValidateAndInsertBlockWithTrustedData(block *BlockWithTrustedData, validateUTXO bool) (*VirtualChangeSet, error)
ValidateAndInsertBlock(block *DomainBlock, updateVirtual bool) error
ValidateAndInsertBlockWithTrustedData(block *BlockWithTrustedData, validateUTXO bool) error
ValidateTransactionAndPopulateWithConsensusData(transaction *DomainTransaction) error
ImportPruningPoints(pruningPoints []BlockHeader) error
BuildPruningPointProof() (*PruningPointProof, error)
@@ -19,6 +19,7 @@ type Consensus interface {
GetBlockInfo(blockHash *DomainHash) (*BlockInfo, error)
GetBlockRelations(blockHash *DomainHash) (parents []*DomainHash, children []*DomainHash, err error)
GetBlockAcceptanceData(blockHash *DomainHash) (AcceptanceData, error)
GetBlocksAcceptanceData(blockHashes []*DomainHash) ([]AcceptanceData, error)
GetHashesBetween(lowHash, highHash *DomainHash, maxBlocks uint64) (hashes []*DomainHash, actualHighHash *DomainHash, err error)
GetAnticone(blockHash, contextHash *DomainHash, maxBlocks uint64) (hashes []*DomainHash, err error)
@@ -47,7 +48,7 @@ type Consensus interface {
Anticone(blockHash *DomainHash) ([]*DomainHash, error)
EstimateNetworkHashesPerSecond(startHash *DomainHash, windowSize int) (uint64, error)
PopulateMass(transaction *DomainTransaction)
ResolveVirtual() (*VirtualChangeSet, bool, error)
ResolveVirtual(progressReportCallback func(uint64, uint64)) error
BlockDAAWindowHashes(blockHash *DomainHash) ([]*DomainHash, error)
TrustedDataDataDAAHeader(trustedBlockHash, daaBlockHash *DomainHash, daaBlockWindowIndex uint64) (*TrustedDataDataDAAHeader, error)
TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash *DomainHash) ([]*DomainHash, error)

View File

@@ -2,6 +2,7 @@ package externalapi
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
@@ -242,6 +243,23 @@ func (spk *ScriptPublicKey) Equal(other *ScriptPublicKey) bool {
return bytes.Equal(spk.Script, other.Script)
}
// String stringifies a ScriptPublicKey.
func (spk *ScriptPublicKey) String() string {
var versionBytes = make([]byte, 2) // uint16
binary.LittleEndian.PutUint16(versionBytes, spk.Version)
versionString := string(versionBytes)
scriptString := string(spk.Script)
return versionString + scriptString
}
// NewScriptPublicKeyFromString converts the given string to a scriptPublicKey
func NewScriptPublicKeyFromString(ScriptPublicKeyString string) *ScriptPublicKey {
bytes := []byte(ScriptPublicKeyString)
version := binary.LittleEndian.Uint16(bytes[:2])
script := bytes[2:]
return &ScriptPublicKey{Script: script, Version: version}
}
// DomainTransactionOutput represents a Kaspad transaction output
type DomainTransactionOutput struct {
Value uint64

View File

@@ -5,7 +5,7 @@ package externalapi
// score of the block that accepts the tx, its public key script, and how
// much it pays.
type UTXOEntry interface {
Amount() uint64
Amount() uint64 // Utxo amount in Sompis
ScriptPublicKey() *ScriptPublicKey // The public key script for the output.
BlockDAAScore() uint64 // Daa score of the block accepting the tx.
IsCoinbase() bool

View File

@@ -47,6 +47,9 @@ type TestConsensus interface {
AddUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainHash,
*externalapi.VirtualChangeSet, error)
UpdatePruningPointByVirtual() error
ResolveVirtualWithMaxParam(maxBlocksToResolve uint64) (*externalapi.VirtualChangeSet, bool, error)
MineJSON(r io.Reader, blockType MineJSONBlockType) (tips []*externalapi.DomainHash, err error)
ToJSON(w io.Writer) error

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/processes/consensusstatemanager"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/multiset"
@@ -165,12 +164,7 @@ func (bp *blockProcessor) validateAndInsertBlock(stagingArea *model.StagingArea,
if reversalData != nil {
err = bp.consensusStateManager.ReverseUTXODiffs(blockHash, reversalData)
// It's still not known what causes this error, but we can ignore it and not reverse the UTXO diffs
// and harm performance in some cases.
// TODO: Investigate why this error happens in the first place, and remove the workaround.
if errors.Is(err, consensusstatemanager.ErrReverseUTXODiffsUTXODiffChildNotFound) {
log.Errorf("Could not reverse UTXO diffs while resolving virtual: %s", err)
} else if err != nil {
if err != nil {
return nil, externalapi.StatusInvalid, err
}
}

View File

@@ -79,7 +79,7 @@ func TestBlockStatus(t *testing.T) {
disqualifiedBlock.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(disqualifiedBlock, true)
err = tc.ValidateAndInsertBlock(disqualifiedBlock, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
@@ -106,7 +106,7 @@ func TestBlockStatus(t *testing.T) {
disqualifiedBlock.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(invalidBlock, true)
err = tc.ValidateAndInsertBlock(invalidBlock, true)
if err == nil {
t.Fatalf("block is expected to be invalid")
}
@@ -139,11 +139,11 @@ func TestValidateAndInsertErrors(t *testing.T) {
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
_, err = tc.ValidateAndInsertBlock(blockWithStatusInvalid, true)
err = tc.ValidateAndInsertBlock(blockWithStatusInvalid, true)
if err == nil {
t.Fatalf("Test ValidateAndInsertBlock: Expected an error, because the block is invalid.")
}
_, err = tc.ValidateAndInsertBlock(blockWithStatusInvalid, true)
err = tc.ValidateAndInsertBlock(blockWithStatusInvalid, true)
if err == nil || !errors.Is(err, ruleerrors.ErrKnownInvalid) {
t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrKnownInvalid, err)
}
@@ -155,12 +155,12 @@ func TestValidateAndInsertErrors(t *testing.T) {
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
_, err = tc.ValidateAndInsertBlock(block, true)
err = tc.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
// resend the same block.
_, err = tc.ValidateAndInsertBlock(block, true)
err = tc.ValidateAndInsertBlock(block, true)
if err == nil || !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrDuplicateBlock, err)
}
@@ -173,12 +173,12 @@ func TestValidateAndInsertErrors(t *testing.T) {
t.Fatalf("AddBlock: %+v", err)
}
onlyHeader.Transactions = []*externalapi.DomainTransaction{}
_, err = tc.ValidateAndInsertBlock(onlyHeader, true)
err = tc.ValidateAndInsertBlock(onlyHeader, true)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
// resend the same header.
_, err = tc.ValidateAndInsertBlock(onlyHeader, true)
err = tc.ValidateAndInsertBlock(onlyHeader, true)
if err == nil || !errors.Is(err, ruleerrors.ErrDuplicateBlock) {
t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrDuplicateBlock, err)
}

View File

@@ -26,7 +26,7 @@ func addBlock(tc testapi.TestConsensus, parentHashes []*externalapi.DomainHash,
}
blockHash := consensushashing.BlockHash(block)
_, err = tc.ValidateAndInsertBlock(block, true)
err = tc.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
@@ -44,7 +44,7 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
consensusConfig.K = 0
consensusConfig.PruningProofM = 1
syncConsensuses := func(tcSyncerRef, tcSynceeRef *testapi.TestConsensus) {
syncConsensuses := func(tcSyncerRef, tcSynceeRef *testapi.TestConsensus, updatePruningPointJustAfterImportingPruningPoint bool) {
tcSyncer, tcSyncee := *tcSyncerRef, *tcSynceeRef
pruningPointProof, err := tcSyncer.BuildPruningPointProof()
if err != nil {
@@ -133,7 +133,7 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
})
}
_, err = synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
err = synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
if err != nil {
t.Fatalf("ValidateAndInsertBlockWithTrustedData: %+v", err)
}
@@ -169,7 +169,7 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
t.Fatalf("GetBlockHeader: %+v", err)
}
_, err = synceeStaging.ValidateAndInsertBlock(&externalapi.DomainBlock{Header: header}, false)
err = synceeStaging.ValidateAndInsertBlock(&externalapi.DomainBlock{Header: header}, false)
if err != nil {
t.Fatalf("ValidateAndInsertBlock %d: %+v", i, err)
}
@@ -236,6 +236,13 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
t.Fatalf("ValidateAndInsertImportedPruningPoint: %+v", err)
}
if updatePruningPointJustAfterImportingPruningPoint {
err = synceeStaging.UpdatePruningPointByVirtual()
if err != nil {
t.Fatal(err)
}
}
emptyCoinbase := &externalapi.DomainCoinbaseData{
ScriptPublicKey: &externalapi.ScriptPublicKey{
Script: nil,
@@ -266,7 +273,7 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
t.Fatalf("GetBlock: %+v", err)
}
_, err = synceeStaging.ValidateAndInsertBlock(block, true)
err = synceeStaging.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
@@ -292,7 +299,7 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
t.Fatalf("GetBlock: %+v", err)
}
_, err = synceeStaging.ValidateAndInsertBlock(tip, true)
err = synceeStaging.ValidateAndInsertBlock(tip, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
@@ -339,7 +346,7 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
t.Fatalf("GetBlock: %+v", err)
}
_, err = tcSyncee1.ValidateAndInsertBlock(block, true)
err = tcSyncee1.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
@@ -386,7 +393,7 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
}
tcSyncee1Ref := &tcSyncee1
syncConsensuses(&tcSyncer, tcSyncee1Ref)
syncConsensuses(&tcSyncer, tcSyncee1Ref, false)
// Test a situation where a consensus with pruned headers syncs another fresh consensus.
tcSyncee2, teardownSyncee2, err := factory.NewTestConsensus(consensusConfig, "TestValidateAndInsertPruningPointSyncee2")
@@ -395,7 +402,17 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
}
defer teardownSyncee2(false)
syncConsensuses(tcSyncee1Ref, &tcSyncee2)
syncConsensuses(tcSyncee1Ref, &tcSyncee2, false)
// Check the regular sync but try to update the pruning point after the pruning point was imported. It tests a situation where the node
// was restarted before the virtual was resolved and then it calls UpdatePruningPointByVirtual on init.
tcSyncee3, teardownSyncee3, err := factory.NewTestConsensus(consensusConfig, "TestValidateAndInsertPruningPointSyncee3")
if err != nil {
t.Fatalf("Error setting up tcSyncee1: %+v", err)
}
defer teardownSyncee3(false)
syncConsensuses(&tcSyncer, &tcSyncee3, true)
})
}
@@ -461,7 +478,7 @@ func TestGetPruningPointUTXOs(t *testing.T) {
if err != nil {
t.Fatalf("Error building block above genesis: %+v", err)
}
_, err = testConsensus.ValidateAndInsertBlock(blockAboveGenesis, true)
err = testConsensus.ValidateAndInsertBlock(blockAboveGenesis, true)
if err != nil {
t.Fatalf("Error validating and inserting block above genesis: %+v", err)
}
@@ -473,7 +490,7 @@ func TestGetPruningPointUTXOs(t *testing.T) {
if err != nil {
t.Fatalf("Error building block with spendable coinbase: %+v", err)
}
_, err = testConsensus.ValidateAndInsertBlock(blockWithSpendableCoinbase, true)
err = testConsensus.ValidateAndInsertBlock(blockWithSpendableCoinbase, true)
if err != nil {
t.Fatalf("Error validating and inserting block with spendable coinbase: %+v", err)
}
@@ -512,7 +529,7 @@ func TestGetPruningPointUTXOs(t *testing.T) {
if err != nil {
t.Fatalf("Error building including block: %+v", err)
}
_, err = testConsensus.ValidateAndInsertBlock(includingBlock, true)
err = testConsensus.ValidateAndInsertBlock(includingBlock, true)
if err != nil {
t.Fatalf("Error validating and inserting including block: %+v", err)
}
@@ -523,7 +540,7 @@ func TestGetPruningPointUTXOs(t *testing.T) {
if err != nil {
t.Fatalf("Error building block: %+v", err)
}
_, err = testConsensus.ValidateAndInsertBlock(block, true)
err = testConsensus.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("Error validating and inserting block: %+v", err)
}
@@ -619,7 +636,7 @@ func BenchmarkGetPruningPointUTXOs(b *testing.B) {
if err != nil {
b.Fatalf("Error building block with spendable coinbase: %+v", err)
}
_, err = testConsensus.ValidateAndInsertBlock(blockWithSpendableCoinbase, true)
err = testConsensus.ValidateAndInsertBlock(blockWithSpendableCoinbase, true)
if err != nil {
b.Fatalf("Error validating and inserting block with spendable coinbase: %+v", err)
}
@@ -657,7 +674,7 @@ func BenchmarkGetPruningPointUTXOs(b *testing.B) {
if err != nil {
b.Fatalf("Error building block: %+v", err)
}
_, err = testConsensus.ValidateAndInsertBlock(block, true)
err = testConsensus.ValidateAndInsertBlock(block, true)
if err != nil {
b.Fatalf("Error validating and inserting block: %+v", err)
}
@@ -677,7 +694,7 @@ func BenchmarkGetPruningPointUTXOs(b *testing.B) {
if err != nil {
b.Fatalf("Error building block: %+v", err)
}
_, err = testConsensus.ValidateAndInsertBlock(block, true)
err = testConsensus.ValidateAndInsertBlock(block, true)
if err != nil {
b.Fatalf("Error validating and inserting block: %+v", err)
}

View File

@@ -62,7 +62,7 @@ func TestCheckBlockIsNotPruned(t *testing.T) {
}
}
_, err = tc.ValidateAndInsertBlock(beforePruningBlock, true)
err = tc.ValidateAndInsertBlock(beforePruningBlock, true)
if !errors.Is(err, ruleerrors.ErrPrunedBlock) {
t.Fatalf("Unexpected error: %+v", err)
}
@@ -117,7 +117,7 @@ func TestCheckParentBlockBodiesExist(t *testing.T) {
}
// Add only the header of anticonePruningBlock
_, err = tc.ValidateAndInsertBlock(&externalapi.DomainBlock{
err = tc.ValidateAndInsertBlock(&externalapi.DomainBlock{
Header: anticonePruningBlock.Header,
Transactions: nil,
}, true)
@@ -143,7 +143,7 @@ func TestCheckParentBlockBodiesExist(t *testing.T) {
// Add anticonePruningBlock's body and check that it's valid to point to
// a header only block in the past of the pruning point.
_, err = tc.ValidateAndInsertBlock(anticonePruningBlock, true)
err = tc.ValidateAndInsertBlock(anticonePruningBlock, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
@@ -189,7 +189,7 @@ func TestIsFinalizedTransaction(t *testing.T) {
if err != nil {
t.Fatalf("Error getting block: %+v", err)
}
_, err = tc.ValidateAndInsertBlock(block, true)
err = tc.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("Error Inserting block: %+v", err)
}

View File

@@ -1052,14 +1052,14 @@ func CheckBlockHashMerkleRoot(t *testing.T, tc testapi.TestConsensus, consensusC
blockWithInvalidMerkleRoot := block.Clone()
blockWithInvalidMerkleRoot.Transactions[0].Version += 1
_, err = tc.ValidateAndInsertBlock(blockWithInvalidMerkleRoot, true)
err = tc.ValidateAndInsertBlock(blockWithInvalidMerkleRoot, true)
if !errors.Is(err, ruleerrors.ErrBadMerkleRoot) {
t.Fatalf("Unexpected error: %+v", err)
}
// Check that a block with invalid merkle root is not marked as invalid
// and can be re-added with the right transactions.
_, err = tc.ValidateAndInsertBlock(block, true)
err = tc.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}

View File

@@ -34,7 +34,7 @@ func TestValidateMedianTime(t *testing.T) {
newHeader := block.Header.ToMutable()
newHeader.SetTimeInMilliseconds(blockTime)
block.Header = newHeader.ToImmutable()
_, err = tc.ValidateAndInsertBlock(block, true)
err = tc.ValidateAndInsertBlock(block, true)
if !errors.Is(err, expectedErr) {
t.Fatalf("expected error %s but got %+v", expectedErr, err)
}
@@ -127,7 +127,7 @@ func TestCheckParentsIncest(t *testing.T) {
Transactions: nil,
}
_, err = tc.ValidateAndInsertBlock(directParentsRelationBlock, true)
err = tc.ValidateAndInsertBlock(directParentsRelationBlock, true)
if !errors.Is(err, ruleerrors.ErrInvalidParentsRelation) {
t.Fatalf("unexpected error %+v", err)
}
@@ -150,7 +150,7 @@ func TestCheckParentsIncest(t *testing.T) {
Transactions: nil,
}
_, err = tc.ValidateAndInsertBlock(indirectParentsRelationBlock, true)
err = tc.ValidateAndInsertBlock(indirectParentsRelationBlock, true)
if !errors.Is(err, ruleerrors.ErrInvalidParentsRelation) {
t.Fatalf("unexpected error %+v", err)
}

View File

@@ -78,7 +78,7 @@ func CheckBlockVersion(t *testing.T, tc testapi.TestConsensus, consensusConfig *
block.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(block, true)
err = tc.ValidateAndInsertBlock(block, true)
if !errors.Is(err, ruleerrors.ErrWrongBlockVersion) {
t.Fatalf("Unexpected error: %+v", err)
}
@@ -118,7 +118,7 @@ func CheckBlockTimestampInIsolation(t *testing.T, tc testapi.TestConsensus, cfg
block.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(block, true)
err = tc.ValidateAndInsertBlock(block, true)
if !errors.Is(err, ruleerrors.ErrTimeTooMuchInTheFuture) {
t.Fatalf("Unexpected error: %+v", err)
}

View File

@@ -39,7 +39,7 @@ func TestPOW(t *testing.T) {
t.Fatal(err)
}
invalidBlockWrongPOW = solveBlockWithWrongPOW(invalidBlockWrongPOW)
_, err = tc.ValidateAndInsertBlock(invalidBlockWrongPOW, true)
err = tc.ValidateAndInsertBlock(invalidBlockWrongPOW, true)
if !errors.Is(err, ruleerrors.ErrInvalidPoW) {
t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrInvalidPoW, err)
}
@@ -65,7 +65,7 @@ func TestPOW(t *testing.T) {
abovePowMaxBlock.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(abovePowMaxBlock, true)
err = tc.ValidateAndInsertBlock(abovePowMaxBlock, true)
if !errors.Is(err, ruleerrors.ErrTargetTooHigh) {
t.Fatalf("Unexpected error: %+v", err)
}
@@ -90,7 +90,7 @@ func TestPOW(t *testing.T) {
negativeTargetBlock.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(negativeTargetBlock, true)
err = tc.ValidateAndInsertBlock(negativeTargetBlock, true)
if !errors.Is(err, ruleerrors.ErrNegativeTarget) {
t.Fatalf("Unexpected error: %+v", err)
}
@@ -104,7 +104,7 @@ func TestPOW(t *testing.T) {
// Difficulty is too high on mainnet to actually mine.
if consensusConfig.Name != "kaspa-mainnet" {
mining.SolveBlock(validBlock, random)
_, err = tc.ValidateAndInsertBlock(validBlock, true)
err = tc.ValidateAndInsertBlock(validBlock, true)
if err != nil {
t.Fatal(err)
}
@@ -161,7 +161,7 @@ func TestCheckParentHeadersExist(t *testing.T) {
orphanBlock.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(orphanBlock, true)
err = tc.ValidateAndInsertBlock(orphanBlock, true)
errMissingParents := &ruleerrors.ErrMissingParents{}
if !errors.As(err, errMissingParents) {
t.Fatalf("Unexpected error: %+v", err)
@@ -193,7 +193,7 @@ func TestCheckParentHeadersExist(t *testing.T) {
orphanBlock.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(invalidBlock, true)
err = tc.ValidateAndInsertBlock(invalidBlock, true)
if !errors.Is(err, ruleerrors.ErrTransactionVersionIsUnknown) {
t.Fatalf("Unexpected error: %+v", err)
}
@@ -220,7 +220,7 @@ func TestCheckParentHeadersExist(t *testing.T) {
invalidBlockChild.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(invalidBlockChild, true)
err = tc.ValidateAndInsertBlock(invalidBlockChild, true)
if !errors.Is(err, ruleerrors.ErrInvalidAncestorBlock) {
t.Fatalf("Unexpected error: %+v", err)
}
@@ -284,7 +284,7 @@ func TestCheckPruningPointViolation(t *testing.T) {
blockWithPruningViolation.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(blockWithPruningViolation, true)
err = tc.ValidateAndInsertBlock(blockWithPruningViolation, true)
if !errors.Is(err, ruleerrors.ErrPruningPointViolation) {
t.Fatalf("Unexpected error: %+v", err)
}

View File

@@ -9,19 +9,18 @@ import (
"sort"
)
func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (*externalapi.VirtualChangeSet, bool, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "csm.ResolveVirtual")
defer onEnd()
readStagingArea := model.NewStagingArea()
tips, err := csm.consensusStateStore.Tips(readStagingArea, csm.databaseContext)
// tipsInDecreasingGHOSTDAGParentSelectionOrder returns the current DAG tips in decreasing parent selection order.
// This means that the first tip in the resulting list would be the GHOSTDAG selected parent, and if removed from the list,
// the second tip would be the selected parent, and so on.
func (csm *consensusStateManager) tipsInDecreasingGHOSTDAGParentSelectionOrder(stagingArea *model.StagingArea) ([]*externalapi.DomainHash, error) {
tips, err := csm.consensusStateStore.Tips(stagingArea, csm.databaseContext)
if err != nil {
return nil, false, err
return nil, err
}
var sortErr error
sort.Slice(tips, func(i, j int) bool {
selectedParent, err := csm.ghostdagManager.ChooseSelectedParent(readStagingArea, tips[i], tips[j])
selectedParent, err := csm.ghostdagManager.ChooseSelectedParent(stagingArea, tips[i], tips[j])
if err != nil {
sortErr = err
return false
@@ -30,16 +29,22 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (*ex
return selectedParent.Equal(tips[i])
})
if sortErr != nil {
return nil, false, sortErr
return nil, sortErr
}
return tips, nil
}
func (csm *consensusStateManager) findNextPendingTip(stagingArea *model.StagingArea) (*externalapi.DomainHash, externalapi.BlockStatus, error) {
orderedTips, err := csm.tipsInDecreasingGHOSTDAGParentSelectionOrder(stagingArea)
if err != nil {
return nil, externalapi.StatusInvalid, err
}
var selectedTip *externalapi.DomainHash
isCompletelyResolved := true
for _, tip := range tips {
for _, tip := range orderedTips {
log.Debugf("Resolving tip %s", tip)
isViolatingFinality, shouldNotify, err := csm.isViolatingFinality(readStagingArea, tip)
isViolatingFinality, shouldNotify, err := csm.isViolatingFinality(stagingArea, tip)
if err != nil {
return nil, false, err
return nil, externalapi.StatusInvalid, err
}
if isViolatingFinality {
@@ -50,60 +55,147 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (*ex
continue
}
resolveStagingArea := model.NewStagingArea()
unverifiedBlocks, err := csm.getUnverifiedChainBlocks(resolveStagingArea, tip)
status, err := csm.blockStatusStore.Get(csm.databaseContext, stagingArea, tip)
if err != nil {
return nil, false, err
return nil, externalapi.StatusInvalid, err
}
resolveTip := tip
hasMoreUnverifiedThanMax := maxBlocksToResolve != 0 && uint64(len(unverifiedBlocks)) > maxBlocksToResolve
if hasMoreUnverifiedThanMax {
resolveTip = unverifiedBlocks[uint64(len(unverifiedBlocks))-maxBlocksToResolve]
log.Debugf("Has more than %d blocks to resolve. Changing the resolve tip to %s", maxBlocksToResolve, resolveTip)
}
blockStatus, reversalData, err := csm.resolveBlockStatus(resolveStagingArea, resolveTip, true)
if err != nil {
return nil, false, err
}
if blockStatus == externalapi.StatusUTXOValid {
selectedTip = resolveTip
isCompletelyResolved = !hasMoreUnverifiedThanMax
err = staging.CommitAllChanges(csm.databaseContext, resolveStagingArea)
if err != nil {
return nil, false, err
}
if reversalData != nil {
err = csm.ReverseUTXODiffs(resolveTip, reversalData)
// It's still not known what causes this error, but we can ignore it and not reverse the UTXO diffs
// and harm performance in some cases.
// TODO: Investigate why this error happens in the first place, and remove the workaround.
if errors.Is(err, ErrReverseUTXODiffsUTXODiffChildNotFound) {
log.Errorf("Could not reverse UTXO diffs while resolving virtual: %s", err)
} else if err != nil {
return nil, false, err
}
}
break
if status == externalapi.StatusUTXOValid || status == externalapi.StatusUTXOPendingVerification {
return tip, status, nil
}
}
if selectedTip == nil {
log.Warnf("Non of the DAG tips are valid")
return nil, true, nil
return nil, externalapi.StatusInvalid, nil
}
// getGHOSTDAGLowerTips returns the set of tips which are lower in GHOSTDAG parent selection order than `pendingTip`. i.e.,
// they can be added to virtual parents but `pendingTip` will remain the virtual selected parent
func (csm *consensusStateManager) getGHOSTDAGLowerTips(stagingArea *model.StagingArea, pendingTip *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
tips, err := csm.consensusStateStore.Tips(stagingArea, csm.databaseContext)
if err != nil {
return nil, err
}
oldVirtualGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, readStagingArea, model.VirtualBlockHash, false)
lowerTips := []*externalapi.DomainHash{pendingTip}
for _, tip := range tips {
if tip.Equal(pendingTip) {
continue
}
selectedParent, err := csm.ghostdagManager.ChooseSelectedParent(stagingArea, tip, pendingTip)
if err != nil {
return nil, err
}
if selectedParent.Equal(pendingTip) {
lowerTips = append(lowerTips, tip)
}
}
return lowerTips, nil
}
func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (*externalapi.VirtualChangeSet, bool, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "csm.ResolveVirtual")
defer onEnd()
// We use a read-only staging area for some read-only actions, to avoid
// confusion with the resolve/updateVirtual staging areas below
readStagingArea := model.NewStagingArea()
pendingTip, pendingTipStatus, err := csm.findNextPendingTip(readStagingArea)
if err != nil {
return nil, false, err
}
if pendingTip == nil {
log.Warnf("None of the DAG tips are valid")
return nil, true, nil
}
previousVirtualSelectedParent, err := csm.virtualSelectedParent(readStagingArea)
if err != nil {
return nil, false, err
}
if pendingTipStatus == externalapi.StatusUTXOValid && previousVirtualSelectedParent.Equal(pendingTip) {
return nil, true, nil
}
// Resolve a chunk from the pending chain
resolveStagingArea := model.NewStagingArea()
unverifiedBlocks, err := csm.getUnverifiedChainBlocks(resolveStagingArea, pendingTip)
if err != nil {
return nil, false, err
}
// Initially set the resolve processing point to the pending tip
processingPoint := pendingTip
// Too many blocks to verify, so we only process a chunk and return
if maxBlocksToResolve != 0 && uint64(len(unverifiedBlocks)) > maxBlocksToResolve {
processingPointIndex := uint64(len(unverifiedBlocks)) - maxBlocksToResolve
processingPoint = unverifiedBlocks[processingPointIndex]
isNewVirtualSelectedParent, err := csm.isNewSelectedTip(readStagingArea, processingPoint, previousVirtualSelectedParent)
if err != nil {
return nil, false, err
}
// We must find a processing point which wins previous virtual selected parent
// even if we process more than `maxBlocksToResolve` for that.
// Otherwise, internal UTXO diff logic gets all messed up
for !isNewVirtualSelectedParent {
if processingPointIndex == 0 {
return nil, false, errors.Errorf(
"Expecting the pending tip %s to overcome the previous selected parent %s", pendingTip, previousVirtualSelectedParent)
}
processingPointIndex--
processingPoint = unverifiedBlocks[processingPointIndex]
isNewVirtualSelectedParent, err = csm.isNewSelectedTip(readStagingArea, processingPoint, previousVirtualSelectedParent)
if err != nil {
return nil, false, err
}
}
log.Debugf("Has more than %d blocks to resolve. Setting the resolve processing point to %s", maxBlocksToResolve, processingPoint)
}
processingPointStatus, reversalData, err := csm.resolveBlockStatus(
resolveStagingArea, processingPoint, true)
if err != nil {
return nil, false, err
}
if processingPointStatus == externalapi.StatusUTXOValid {
err = staging.CommitAllChanges(csm.databaseContext, resolveStagingArea)
if err != nil {
return nil, false, err
}
if reversalData != nil {
err = csm.ReverseUTXODiffs(processingPoint, reversalData)
if err != nil {
return nil, false, err
}
}
}
isActualTip := processingPoint.Equal(pendingTip)
isCompletelyResolved := isActualTip && processingPointStatus == externalapi.StatusUTXOValid
updateVirtualStagingArea := model.NewStagingArea()
virtualUTXODiff, err := csm.updateVirtualWithParents(updateVirtualStagingArea, []*externalapi.DomainHash{selectedTip})
virtualParents := []*externalapi.DomainHash{processingPoint}
// If `isCompletelyResolved`, set virtual correctly with all tips which have less blue work than pending
if isCompletelyResolved {
lowerTips, err := csm.getGHOSTDAGLowerTips(readStagingArea, pendingTip)
if err != nil {
return nil, false, err
}
log.Debugf("Picking virtual parents from relevant tips len: %d", len(lowerTips))
virtualParents, err = csm.pickVirtualParents(readStagingArea, lowerTips)
if err != nil {
return nil, false, err
}
log.Debugf("Picked virtual parents: %s", virtualParents)
}
virtualUTXODiff, err := csm.updateVirtualWithParents(updateVirtualStagingArea, virtualParents)
if err != nil {
return nil, false, err
}
@@ -114,12 +206,12 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (*ex
}
selectedParentChainChanges, err := csm.dagTraversalManager.
CalculateChainPath(readStagingArea, oldVirtualGHOSTDAGData.SelectedParent(), selectedTip)
CalculateChainPath(updateVirtualStagingArea, previousVirtualSelectedParent, processingPoint)
if err != nil {
return nil, false, err
}
virtualParents, err := csm.dagTopologyManager.Parents(readStagingArea, model.VirtualBlockHash)
virtualParentsOutcome, err := csm.dagTopologyManager.Parents(updateVirtualStagingArea, model.VirtualBlockHash)
if err != nil {
return nil, false, err
}
@@ -127,6 +219,6 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (*ex
return &externalapi.VirtualChangeSet{
VirtualSelectedParentChainChanges: selectedParentChainChanges,
VirtualUTXODiff: virtualUTXODiff,
VirtualParents: virtualParents,
VirtualParents: virtualParentsOutcome,
}, isCompletelyResolved, nil
}

View File

@@ -233,7 +233,7 @@ func (csm *consensusStateManager) resolveSingleBlockStatus(stagingArea *model.St
return externalapi.StatusUTXOValid, nil, nil
}
oldSelectedTip, err := csm.selectedTip(stagingArea)
oldSelectedTip, err := csm.virtualSelectedParent(stagingArea)
if err != nil {
return 0, nil, err
}
@@ -298,7 +298,7 @@ func (csm *consensusStateManager) isNewSelectedTip(stagingArea *model.StagingAre
return blockHash.Equal(newSelectedTip), nil
}
func (csm *consensusStateManager) selectedTip(stagingArea *model.StagingArea) (*externalapi.DomainHash, error) {
func (csm *consensusStateManager) virtualSelectedParent(stagingArea *model.StagingArea) (*externalapi.DomainHash, error) {
virtualGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, model.VirtualBlockHash, false)
if err != nil {
return nil, err

View File

@@ -0,0 +1,444 @@
package consensusstatemanager_test
import (
"fmt"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"testing"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
)
func TestAddBlockBetweenResolveVirtualCalls(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestAddBlockBetweenResolveVirtualCalls")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
hashes := []*externalapi.DomainHash{consensusConfig.GenesisHash}
// Create a chain of blocks
const initialChainLength = 10
previousBlockHash := consensusConfig.GenesisHash
for i := 0; i < initialChainLength; i++ {
previousBlockHash, _, err = tc.AddBlock([]*externalapi.DomainHash{previousBlockHash}, nil, nil)
hashes = append(hashes, previousBlockHash)
if err != nil {
t.Fatalf("Error mining block no. %d in initial chain: %+v", i, err)
}
}
// Mine a chain with more blocks, to re-organize the DAG
const reorgChainLength = initialChainLength + 1
previousBlockHash = consensusConfig.GenesisHash
for i := 0; i < reorgChainLength; i++ {
previousBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{previousBlockHash}, nil, nil)
if err != nil {
t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err)
}
previousBlockHash = consensushashing.BlockHash(previousBlock)
hashes = append(hashes, previousBlockHash)
// Do not UTXO validate in order to resolve virtual later
err = tc.ValidateAndInsertBlock(previousBlock, false)
if err != nil {
t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err)
}
}
// Resolve one step
_, _, err = tc.ResolveVirtualWithMaxParam(2)
if err != nil {
t.Fatalf("Error resolving virtual in re-org chain: %+v", err)
}
emptyCoinbase := &externalapi.DomainCoinbaseData{
ScriptPublicKey: &externalapi.ScriptPublicKey{
Script: nil,
Version: 0,
},
}
// Get template based on current resolve state
blockTemplate, err := tc.BuildBlockTemplate(emptyCoinbase, nil)
if err != nil {
t.Fatalf("Error building block template during virtual resolution of reorg: %+v", err)
}
// Resolve one more step
_, isCompletelyResolved, err := tc.ResolveVirtualWithMaxParam(2)
if err != nil {
t.Fatalf("Error resolving virtual in re-org chain: %+v", err)
}
// Add the mined block (now virtual was modified)
err = tc.ValidateAndInsertBlock(blockTemplate.Block, true)
if err != nil {
t.Fatalf("Error mining block during virtual resolution of reorg: %+v", err)
}
hashes = append(hashes, consensushashing.BlockHash(blockTemplate.Block))
// Complete resolving virtual
for !isCompletelyResolved {
_, isCompletelyResolved, err = tc.ResolveVirtualWithMaxParam(2)
if err != nil {
t.Fatalf("Error resolving virtual in re-org chain: %+v", err)
}
}
verifyUtxoDiffPaths(t, tc, hashes)
})
}
func TestAddGenesisChildAfterOneResolveVirtualCall(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestAddGenesisChildAfterOneResolveVirtualCall")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
hashes := []*externalapi.DomainHash{consensusConfig.GenesisHash}
// Create a chain of blocks
const initialChainLength = 6
previousBlockHash := consensusConfig.GenesisHash
for i := 0; i < initialChainLength; i++ {
previousBlockHash, _, err = tc.AddBlock([]*externalapi.DomainHash{previousBlockHash}, nil, nil)
hashes = append(hashes, previousBlockHash)
if err != nil {
t.Fatalf("Error mining block no. %d in initial chain: %+v", i, err)
}
}
// Mine a chain with more blocks, to re-organize the DAG
const reorgChainLength = initialChainLength + 1
previousBlockHash = consensusConfig.GenesisHash
for i := 0; i < reorgChainLength; i++ {
previousBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{previousBlockHash}, nil, nil)
if err != nil {
t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err)
}
previousBlockHash = consensushashing.BlockHash(previousBlock)
hashes = append(hashes, previousBlockHash)
// Do not UTXO validate in order to resolve virtual later
err = tc.ValidateAndInsertBlock(previousBlock, false)
if err != nil {
t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err)
}
}
// Resolve one step
_, isCompletelyResolved, err := tc.ResolveVirtualWithMaxParam(2)
if err != nil {
t.Fatalf("Error resolving virtual in re-org chain: %+v", err)
}
_, _, err = tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("Error adding block during virtual resolution of reorg: %+v", err)
}
// Complete resolving virtual
for !isCompletelyResolved {
_, isCompletelyResolved, err = tc.ResolveVirtualWithMaxParam(2)
if err != nil {
t.Fatalf("Error resolving virtual in re-org chain: %+v", err)
}
}
verifyUtxoDiffPaths(t, tc, hashes)
})
}
func TestAddGenesisChildAfterTwoResolveVirtualCalls(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestAddGenesisChildAfterTwoResolveVirtualCalls")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
hashes := []*externalapi.DomainHash{consensusConfig.GenesisHash}
// Create a chain of blocks
const initialChainLength = 6
previousBlockHash := consensusConfig.GenesisHash
for i := 0; i < initialChainLength; i++ {
previousBlockHash, _, err = tc.AddBlock([]*externalapi.DomainHash{previousBlockHash}, nil, nil)
hashes = append(hashes, previousBlockHash)
if err != nil {
t.Fatalf("Error mining block no. %d in initial chain: %+v", i, err)
}
}
// Mine a chain with more blocks, to re-organize the DAG
const reorgChainLength = initialChainLength + 1
previousBlockHash = consensusConfig.GenesisHash
for i := 0; i < reorgChainLength; i++ {
previousBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{previousBlockHash}, nil, nil)
if err != nil {
t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err)
}
previousBlockHash = consensushashing.BlockHash(previousBlock)
hashes = append(hashes, previousBlockHash)
// Do not UTXO validate in order to resolve virtual later
err = tc.ValidateAndInsertBlock(previousBlock, false)
if err != nil {
t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err)
}
}
// Resolve one step
_, _, err = tc.ResolveVirtualWithMaxParam(2)
if err != nil {
t.Fatalf("Error resolving virtual in re-org chain: %+v", err)
}
// Resolve one more step
_, isCompletelyResolved, err := tc.ResolveVirtualWithMaxParam(2)
if err != nil {
t.Fatalf("Error resolving virtual in re-org chain: %+v", err)
}
_, _, err = tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("Error adding block during virtual resolution of reorg: %+v", err)
}
// Complete resolving virtual
for !isCompletelyResolved {
_, isCompletelyResolved, err = tc.ResolveVirtualWithMaxParam(2)
if err != nil {
t.Fatalf("Error resolving virtual in re-org chain: %+v", err)
}
}
verifyUtxoDiffPaths(t, tc, hashes)
})
}
func TestResolveVirtualBackAndForthReorgs(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestAddGenesisChildAfterTwoResolveVirtualCalls")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
hashes := []*externalapi.DomainHash{consensusConfig.GenesisHash}
blocks := make(map[externalapi.DomainHash]string)
blocks[*consensusConfig.GenesisHash] = "g"
blocks[*model.VirtualBlockHash] = "v"
printfDebug("%s\n\n", consensusConfig.GenesisHash)
// Create a chain of blocks
const initialChainLength = 6
previousBlockHash := consensusConfig.GenesisHash
for i := 0; i < initialChainLength; i++ {
previousBlockHash, _, err = tc.AddBlock([]*externalapi.DomainHash{previousBlockHash}, nil, nil)
blocks[*previousBlockHash] = fmt.Sprintf("A_%d", i)
hashes = append(hashes, previousBlockHash)
printfDebug("A_%d: %s\n", i, previousBlockHash)
if err != nil {
t.Fatalf("Error mining block no. %d in initial chain: %+v", i, err)
}
}
printfDebug("\n")
verifyUtxoDiffPaths(t, tc, hashes)
firstChainTip := previousBlockHash
// Mine a chain with more blocks, to re-organize the DAG
const reorgChainLength = 12 // initialChainLength + 1
previousBlockHash = consensusConfig.GenesisHash
for i := 0; i < reorgChainLength; i++ {
previousBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{previousBlockHash}, nil, nil)
if err != nil {
t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err)
}
previousBlockHash = consensushashing.BlockHash(previousBlock)
blocks[*previousBlockHash] = fmt.Sprintf("B_%d", i)
hashes = append(hashes, previousBlockHash)
printfDebug("B_%d: %s\n", i, previousBlockHash)
// Do not UTXO validate in order to resolve virtual later
err = tc.ValidateAndInsertBlock(previousBlock, false)
if err != nil {
t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err)
}
}
printfDebug("\n")
printUtxoDiffChildren(t, tc, hashes, blocks)
verifyUtxoDiffPaths(t, tc, hashes)
previousVirtualSelectedParent, err := tc.GetVirtualSelectedParent()
if err != nil {
t.Fatal(err)
}
// Resolve one step
virtualChangeSet, _, err := tc.ResolveVirtualWithMaxParam(3)
if err != nil {
printUtxoDiffChildren(t, tc, hashes, blocks)
t.Fatalf("Error resolving virtual in re-org chain: %+v", err)
}
newVirtualSelectedParent, err := tc.GetVirtualSelectedParent()
if err != nil {
t.Fatal(err)
}
// Make sure the reported change-set is compatible with actual changes.
// Checking this for one call should suffice to avoid possible bugs.
reportedPreviousVirtualSelectedParent := virtualChangeSet.VirtualSelectedParentChainChanges.Removed[0]
reportedNewVirtualSelectedParent := virtualChangeSet.VirtualSelectedParentChainChanges.
Added[len(virtualChangeSet.VirtualSelectedParentChainChanges.Added)-1]
if !previousVirtualSelectedParent.Equal(reportedPreviousVirtualSelectedParent) {
t.Fatalf("The reported changeset is incompatible with actual changes")
}
if !newVirtualSelectedParent.Equal(reportedNewVirtualSelectedParent) {
t.Fatalf("The reported changeset is incompatible with actual changes")
}
// Resolve one more step
_, isCompletelyResolved, err := tc.ResolveVirtualWithMaxParam(3)
if err != nil {
t.Fatalf("Error resolving virtual in re-org chain: %+v", err)
}
// Complete resolving virtual
for !isCompletelyResolved {
_, isCompletelyResolved, err = tc.ResolveVirtualWithMaxParam(3)
if err != nil {
t.Fatalf("Error resolving virtual in re-org chain: %+v", err)
}
}
printUtxoDiffChildren(t, tc, hashes, blocks)
verifyUtxoDiffPaths(t, tc, hashes)
// Now get the first chain back to the wining position
previousBlockHash = firstChainTip
for i := 0; i < reorgChainLength; i++ {
previousBlockHash, _, err = tc.AddBlock([]*externalapi.DomainHash{previousBlockHash}, nil, nil)
blocks[*previousBlockHash] = fmt.Sprintf("A_%d", initialChainLength+i)
hashes = append(hashes, previousBlockHash)
printfDebug("A_%d: %s\n", initialChainLength+i, previousBlockHash)
if err != nil {
t.Fatalf("Error mining block no. %d in initial chain: %+v", initialChainLength+i, err)
}
}
printfDebug("\n")
printUtxoDiffChildren(t, tc, hashes, blocks)
verifyUtxoDiffPaths(t, tc, hashes)
})
}
func verifyUtxoDiffPathToRoot(t *testing.T, tc testapi.TestConsensus, stagingArea *model.StagingArea, block, utxoDiffRoot *externalapi.DomainHash) {
current := block
for !current.Equal(utxoDiffRoot) {
hasUTXODiffChild, err := tc.UTXODiffStore().HasUTXODiffChild(tc.DatabaseContext(), stagingArea, current)
if err != nil {
t.Fatalf("Error while reading utxo diff store: %+v", err)
}
if !hasUTXODiffChild {
t.Fatalf("%s is expected to have a UTXO diff child", current)
}
current, err = tc.UTXODiffStore().UTXODiffChild(tc.DatabaseContext(), stagingArea, current)
if err != nil {
t.Fatalf("Error while reading utxo diff store: %+v", err)
}
}
}
func verifyUtxoDiffPaths(t *testing.T, tc testapi.TestConsensus, hashes []*externalapi.DomainHash) {
stagingArea := model.NewStagingArea()
virtualGHOSTDAGData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), stagingArea, model.VirtualBlockHash, false)
if err != nil {
t.Fatal(err)
}
utxoDiffRoot := virtualGHOSTDAGData.SelectedParent()
hasUTXODiffChild, err := tc.UTXODiffStore().HasUTXODiffChild(tc.DatabaseContext(), stagingArea, utxoDiffRoot)
if err != nil {
t.Fatalf("Error while reading utxo diff store: %+v", err)
}
if hasUTXODiffChild {
t.Fatalf("Virtual selected parent is not expected to have an explicit diff child")
}
_, err = tc.UTXODiffStore().UTXODiff(tc.DatabaseContext(), stagingArea, utxoDiffRoot)
if err != nil {
t.Fatalf("Virtual selected parent is expected to have a utxo diff: %+v", err)
}
for _, block := range hashes {
hasUTXODiffChild, err = tc.UTXODiffStore().HasUTXODiffChild(tc.DatabaseContext(), stagingArea, block)
if err != nil {
t.Fatalf("Error while reading utxo diff store: %+v", err)
}
isOnVirtualSelectedChain, err := tc.DAGTopologyManager().IsInSelectedParentChainOf(stagingArea, block, utxoDiffRoot)
if err != nil {
t.Fatal(err)
}
// We expect a valid path to root in both cases: (i) block has a diff child, (ii) block is on the virtual selected chain
if hasUTXODiffChild || isOnVirtualSelectedChain {
verifyUtxoDiffPathToRoot(t, tc, stagingArea, block, utxoDiffRoot)
}
}
}
func printfDebug(format string, a ...any) {
// Uncomment below when debugging the test
//fmt.Printf(format, a...)
}
func printUtxoDiffChildren(t *testing.T, tc testapi.TestConsensus, hashes []*externalapi.DomainHash, blocks map[externalapi.DomainHash]string) {
printfDebug("\n===============================\nBlock\t\tDiff child\n")
stagingArea := model.NewStagingArea()
for _, block := range hashes {
hasUTXODiffChild, err := tc.UTXODiffStore().HasUTXODiffChild(tc.DatabaseContext(), stagingArea, block)
if err != nil {
t.Fatalf("Error while reading utxo diff store: %+v", err)
}
if hasUTXODiffChild {
utxoDiffChild, err := tc.UTXODiffStore().UTXODiffChild(tc.DatabaseContext(), stagingArea, block)
if err != nil {
t.Fatalf("Error while reading utxo diff store: %+v", err)
}
printfDebug("%s\t\t\t%s\n", blocks[*block], blocks[*utxoDiffChild])
} else {
printfDebug("%s\n", blocks[*block])
}
}
printfDebug("\n===============================\n")
}

View File

@@ -3,18 +3,10 @@ package consensusstatemanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/staging"
"github.com/pkg/errors"
)
// ErrReverseUTXODiffsUTXODiffChildNotFound indicates a UTXO diff child was not found while calling ReverseUTXODiffs.
// It's still not known what causes this error, but we can ignore it and not reverse the UTXO diffs
// and harm performance in some cases.
// TODO: Investigate why this error happens in the first place, and remove the workaround.
var ErrReverseUTXODiffsUTXODiffChildNotFound = errors.New("ErrReverseUTXODiffsUTXODiffChildNotFound")
func (csm *consensusStateManager) ReverseUTXODiffs(tipHash *externalapi.DomainHash,
reversalData *model.UTXODiffReversalData) error {
@@ -57,9 +49,6 @@ func (csm *consensusStateManager) ReverseUTXODiffs(tipHash *externalapi.DomainHa
currentBlockUTXODiffChild, err := csm.utxoDiffStore.UTXODiffChild(csm.databaseContext, readStagingArea, currentBlock)
if err != nil {
if database.IsNotFoundError(err) {
return errors.Wrapf(ErrReverseUTXODiffsUTXODiffChildNotFound, "UTXO diff child was not found for block %s", currentBlock)
}
return err
}
currentBlockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, readStagingArea, currentBlock, false)
@@ -67,12 +56,6 @@ func (csm *consensusStateManager) ReverseUTXODiffs(tipHash *externalapi.DomainHa
return err
}
// We stop reversing when current's UTXODiffChild is not current's SelectedParent
if !currentBlockGHOSTDAGData.SelectedParent().Equal(currentBlockUTXODiffChild) {
log.Debugf("Block %s's UTXODiffChild is not it's selected parent - finish reversing", currentBlock)
break
}
currentUTXODiff := previousUTXODiff.Reversed()
// retrieve current utxoDiff for Bi, to be used by next block
@@ -86,6 +69,12 @@ func (csm *consensusStateManager) ReverseUTXODiffs(tipHash *externalapi.DomainHa
return err
}
// We stop reversing when current's UTXODiffChild is not current's SelectedParent
if !currentBlockGHOSTDAGData.SelectedParent().Equal(currentBlockUTXODiffChild) {
log.Debugf("Block %s's UTXODiffChild is not it's selected parent - finish reversing", currentBlock)
break
}
previousBlock = currentBlock
previousBlockGHOSTDAGData = currentBlockGHOSTDAGData

View File

@@ -110,7 +110,7 @@ func (csm *consensusStateManager) updateSelectedTipUTXODiff(
onEnd := logger.LogAndMeasureExecutionTime(log, "updateSelectedTipUTXODiff")
defer onEnd()
selectedTip, err := csm.selectedTip(stagingArea)
selectedTip, err := csm.virtualSelectedParent(stagingArea)
if err != nil {
return err
}

View File

@@ -84,7 +84,7 @@ func TestConsensusStateManager_pickVirtualParents(t *testing.T) {
if err != nil {
t.Fatalf("Failed building a block: %v", err)
}
_, err = tc.ValidateAndInsertBlock(block, true)
err = tc.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("Failed Inserting block to tc: %v", err)
}

View File

@@ -65,7 +65,7 @@ func TestDifficulty(t *testing.T) {
newHeader := block.Header.ToMutable()
newHeader.SetTimeInMilliseconds(blockTime)
block.Header = newHeader.ToImmutable()
_, err = tc.ValidateAndInsertBlock(block, true)
err = tc.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}

View File

@@ -34,7 +34,7 @@ func TestPastMedianTime(t *testing.T) {
newHeader := block.Header.ToMutable()
newHeader.SetTimeInMilliseconds(blockTime)
block.Header = newHeader.ToImmutable()
_, err = tc.ValidateAndInsertBlock(block, true)
err = tc.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}

View File

@@ -191,6 +191,46 @@ func (pm *pruningManager) UpdatePruningPointByVirtual(stagingArea *model.Staging
return nil
}
type blockIteratorFromOneBlock struct {
done, isClosed bool
hash *externalapi.DomainHash
}
func (b *blockIteratorFromOneBlock) First() bool {
if b.isClosed {
panic("Tried using a closed blockIteratorFromOneBlock")
}
b.done = false
return true
}
func (b *blockIteratorFromOneBlock) Next() bool {
if b.isClosed {
panic("Tried using a closed blockIteratorFromOneBlock")
}
b.done = true
return false
}
func (b *blockIteratorFromOneBlock) Get() (*externalapi.DomainHash, error) {
if b.isClosed {
panic("Tried using a closed blockIteratorFromOneBlock")
}
return b.hash, nil
}
func (b *blockIteratorFromOneBlock) Close() error {
if b.isClosed {
panic("Tried using a closed blockIteratorFromOneBlock")
}
b.isClosed = true
return nil
}
func (pm *pruningManager) nextPruningPointAndCandidateByBlockHash(stagingArea *model.StagingArea,
blockHash, suggestedLowHash *externalapi.DomainHash) (*externalapi.DomainHash, *externalapi.DomainHash, error) {
@@ -222,12 +262,12 @@ func (pm *pruningManager) nextPruningPointAndCandidateByBlockHash(stagingArea *m
}
}
ghostdagData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, blockHash, false)
currentPruningPoint, err := pm.pruningStore.PruningPoint(pm.databaseContext, stagingArea)
if err != nil {
return nil, nil, err
}
currentPruningPoint, err := pm.pruningStore.PruningPoint(pm.databaseContext, stagingArea)
ghostdagData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, blockHash, false)
if err != nil {
return nil, nil, err
}
@@ -240,9 +280,14 @@ func (pm *pruningManager) nextPruningPointAndCandidateByBlockHash(stagingArea *m
// We iterate until the selected parent of the given block, in order to allow a situation where the given block hash
// belongs to the virtual. This shouldn't change anything since the max blue score difference between a block and its
// selected parent is K, and K << pm.pruningDepth.
iterator, err := pm.dagTraversalManager.SelectedChildIterator(stagingArea, ghostdagData.SelectedParent(), lowHash, true)
if err != nil {
return nil, nil, err
var iterator model.BlockIterator
if blockHash.Equal(lowHash) {
iterator = &blockIteratorFromOneBlock{hash: lowHash}
} else {
iterator, err = pm.dagTraversalManager.SelectedChildIterator(stagingArea, ghostdagData.SelectedParent(), lowHash, true)
if err != nil {
return nil, nil, err
}
}
defer iterator.Close()
@@ -727,78 +772,41 @@ func (pm *pruningManager) calculateDiffBetweenPreviousAndCurrentPruningPoints(st
if err != nil {
return nil, err
}
currentPruningGhostDAG, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, currentPruningHash, false)
utxoDiff := utxo.NewMutableUTXODiff()
iterator, err := pm.dagTraversalManager.SelectedChildIterator(stagingArea, currentPruningHash, previousPruningHash, false)
if err != nil {
return nil, err
}
previousPruningGhostDAG, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, previousPruningHash, false)
if err != nil {
return nil, err
defer iterator.Close()
for ok := iterator.First(); ok; ok = iterator.Next() {
child, err := iterator.Get()
if err != nil {
return nil, err
}
chainBlockAcceptanceData, err := pm.acceptanceDataStore.Get(pm.databaseContext, stagingArea, child)
if err != nil {
return nil, err
}
chainBlockHeader, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, child)
if err != nil {
return nil, err
}
for _, blockAcceptanceData := range chainBlockAcceptanceData {
for _, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData {
if transactionAcceptanceData.IsAccepted {
err = utxoDiff.AddTransaction(transactionAcceptanceData.Transaction, chainBlockHeader.DAAScore())
if err != nil {
return nil, err
}
}
}
}
}
currentPruningCurrentDiffChild := currentPruningHash
previousPruningCurrentDiffChild := previousPruningHash
// We need to use BlueWork because it's the only thing that's monotonic in the whole DAG
// We use the BlueWork to know which point is currently lower on the DAG so we can keep climbing its children,
// that way we keep climbing on the lowest point until they both reach the exact same descendant
currentPruningCurrentDiffChildBlueWork := currentPruningGhostDAG.BlueWork()
previousPruningCurrentDiffChildBlueWork := previousPruningGhostDAG.BlueWork()
var diffHashesFromPrevious []*externalapi.DomainHash
var diffHashesFromCurrent []*externalapi.DomainHash
for {
// if currentPruningCurrentDiffChildBlueWork > previousPruningCurrentDiffChildBlueWork
if currentPruningCurrentDiffChildBlueWork.Cmp(previousPruningCurrentDiffChildBlueWork) == 1 {
diffHashesFromPrevious = append(diffHashesFromPrevious, previousPruningCurrentDiffChild)
previousPruningCurrentDiffChild, err = pm.utxoDiffStore.UTXODiffChild(pm.databaseContext, stagingArea, previousPruningCurrentDiffChild)
if err != nil {
return nil, err
}
diffChildGhostDag, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, previousPruningCurrentDiffChild, false)
if err != nil {
return nil, err
}
previousPruningCurrentDiffChildBlueWork = diffChildGhostDag.BlueWork()
} else if currentPruningCurrentDiffChild.Equal(previousPruningCurrentDiffChild) {
break
} else {
diffHashesFromCurrent = append(diffHashesFromCurrent, currentPruningCurrentDiffChild)
currentPruningCurrentDiffChild, err = pm.utxoDiffStore.UTXODiffChild(pm.databaseContext, stagingArea, currentPruningCurrentDiffChild)
if err != nil {
return nil, err
}
diffChildGhostDag, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, currentPruningCurrentDiffChild, false)
if err != nil {
return nil, err
}
currentPruningCurrentDiffChildBlueWork = diffChildGhostDag.BlueWork()
}
}
// The order in which we apply the diffs should be from top to bottom, but we traversed from bottom to top
// so we apply the diffs in reverse order.
oldDiff := utxo.NewMutableUTXODiff()
for i := len(diffHashesFromPrevious) - 1; i >= 0; i-- {
utxoDiff, err := pm.utxoDiffStore.UTXODiff(pm.databaseContext, stagingArea, diffHashesFromPrevious[i])
if err != nil {
return nil, err
}
err = oldDiff.WithDiffInPlace(utxoDiff)
if err != nil {
return nil, err
}
}
newDiff := utxo.NewMutableUTXODiff()
for i := len(diffHashesFromCurrent) - 1; i >= 0; i-- {
utxoDiff, err := pm.utxoDiffStore.UTXODiff(pm.databaseContext, stagingArea, diffHashesFromCurrent[i])
if err != nil {
return nil, err
}
err = newDiff.WithDiffInPlace(utxoDiff)
if err != nil {
return nil, err
}
}
return oldDiff.DiffFrom(newDiff.ToImmutable())
return utxoDiff.ToImmutable(), err
}
// finalityScore is the number of finality intervals passed since
@@ -892,7 +900,7 @@ func (pm *pruningManager) updatePruningPoint() error {
if err != nil {
return err
}
if pm.shouldSanityCheckPruningUTXOSet {
if pm.shouldSanityCheckPruningUTXOSet && !pruningPoint.Equal(pm.genesisHash) {
err = pm.validateUTXOSetFitsCommitment(stagingArea, pruningPoint)
if err != nil {
return err

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/util/staging"
"io"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
@@ -60,7 +61,7 @@ func (tc *testConsensus) AddBlock(parentHashes []*externalapi.DomainHash, coinba
return nil, nil, err
}
virtualChangeSet, _, err := tc.blockProcessor.ValidateAndInsertBlock(block, true)
virtualChangeSet, err := tc.validateAndInsertBlockNoLock(block, true)
if err != nil {
return nil, nil, err
}
@@ -80,7 +81,7 @@ func (tc *testConsensus) AddUTXOInvalidHeader(parentHashes []*externalapi.Domain
return nil, nil, err
}
virtualChangeSet, _, err := tc.blockProcessor.ValidateAndInsertBlock(&externalapi.DomainBlock{
virtualChangeSet, err := tc.validateAndInsertBlockNoLock(&externalapi.DomainBlock{
Header: header,
Transactions: nil,
}, true)
@@ -103,7 +104,7 @@ func (tc *testConsensus) AddUTXOInvalidBlock(parentHashes []*externalapi.DomainH
return nil, nil, err
}
virtualChangeSet, _, err := tc.blockProcessor.ValidateAndInsertBlock(block, true)
virtualChangeSet, err := tc.validateAndInsertBlockNoLock(block, true)
if err != nil {
return nil, nil, err
}
@@ -111,6 +112,13 @@ func (tc *testConsensus) AddUTXOInvalidBlock(parentHashes []*externalapi.DomainH
return consensushashing.BlockHash(block), virtualChangeSet, nil
}
func (tc *testConsensus) ResolveVirtualWithMaxParam(maxBlocksToResolve uint64) (*externalapi.VirtualChangeSet, bool, error) {
tc.lock.Lock()
defer tc.lock.Unlock()
return tc.resolveVirtualChunkNoLock(maxBlocksToResolve)
}
// jsonBlock is a json representation of a block in mine format
type jsonBlock struct {
ID string `json:"id"`
@@ -258,3 +266,16 @@ func (tc *testConsensus) BuildHeaderWithParents(parentHashes []*externalapi.Doma
return tc.testBlockBuilder.BuildUTXOInvalidHeader(parentHashes)
}
func (tc *testConsensus) UpdatePruningPointByVirtual() error {
tc.lock.Lock()
defer tc.lock.Unlock()
stagingArea := model.NewStagingArea()
err := tc.pruningManager.UpdatePruningPointByVirtual(stagingArea)
if err != nil {
return err
}
return staging.CommitAllChanges(tc.databaseContext, stagingArea)
}

View File

@@ -318,7 +318,7 @@ func TestCheckLockTimeVerifyConditionedByAbsoluteTime(t *testing.T) {
blockHeader := tipBlock.Header.ToMutable()
blockHeader.SetTimeInMilliseconds(timeStampBlockE + i*1000)
tipBlock.Header = blockHeader.ToImmutable()
_, err = testConsensus.ValidateAndInsertBlock(tipBlock, true)
err = testConsensus.ValidateAndInsertBlock(tipBlock, true)
if err != nil {
t.Fatalf("Error validating and inserting tip block: %v", err)
}
@@ -439,7 +439,7 @@ func TestCheckLockTimeVerifyConditionedByAbsoluteTimeWithWrongLockTime(t *testin
blockHeader := tipBlock.Header.ToMutable()
blockHeader.SetTimeInMilliseconds(timeStampBlockE + i*1000)
tipBlock.Header = blockHeader.ToImmutable()
_, err = testConsensus.ValidateAndInsertBlock(tipBlock, true)
err = testConsensus.ValidateAndInsertBlock(tipBlock, true)
if err != nil {
t.Fatalf("Error validating and inserting tip block: %v", err)
}

View File

@@ -0,0 +1,102 @@
package consensushashing
import (
"fmt"
"testing"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
)
func TestTransactionHash(t *testing.T) {
tx := externalapi.DomainTransaction{0, []*externalapi.DomainTransactionInput{}, []*externalapi.DomainTransactionOutput{}, 0,
externalapi.DomainSubnetworkID{}, 0, []byte{}, 0, 0,
nil}
id := TransactionID(&tx)
fmt.Printf("%s\n", id)
tx_hash := TransactionHash(&tx)
fmt.Printf("%s\n\n", tx_hash)
inputs := []*externalapi.DomainTransactionInput{&externalapi.DomainTransactionInput{
PreviousOutpoint: externalapi.DomainOutpoint{
TransactionID: externalapi.DomainTransactionID{},
Index: 2,
},
SignatureScript: []byte{1, 2},
Sequence: 7,
SigOpCount: 5,
UTXOEntry: nil,
}}
tx = externalapi.DomainTransaction{1, inputs, []*externalapi.DomainTransactionOutput{}, 0,
externalapi.DomainSubnetworkID{}, 0, []byte{}, 0, 0,
nil}
id = TransactionID(&tx)
fmt.Printf("%s\n", id)
tx_hash = TransactionHash(&tx)
fmt.Printf("%s\n\n", tx_hash)
outputs := []*externalapi.DomainTransactionOutput{&externalapi.DomainTransactionOutput{
Value: 1564,
ScriptPublicKey: &externalapi.ScriptPublicKey{
Script: []byte{1, 2, 3, 4, 5},
Version: 7,
},
}}
tx = externalapi.DomainTransaction{1, inputs, outputs, 0,
externalapi.DomainSubnetworkID{}, 0, []byte{}, 0, 0,
nil}
id = TransactionID(&tx)
fmt.Printf("%s\n", id)
tx_hash = TransactionHash(&tx)
fmt.Printf("%s\n\n", tx_hash)
tx = externalapi.DomainTransaction{2, inputs, outputs, 54,
externalapi.DomainSubnetworkID{}, 3, []byte{}, 4, 7,
nil}
id = TransactionID(&tx)
fmt.Printf("%s\n", id)
tx_hash = TransactionHash(&tx)
fmt.Printf("%s\n\n", tx_hash)
transactionId, err := externalapi.NewDomainHashFromString("59b3d6dc6cdc660c389c3fdb5704c48c598d279cdf1bab54182db586a4c95dd5")
if err != nil {
t.Fatalf("%s", err)
}
inputs = []*externalapi.DomainTransactionInput{&externalapi.DomainTransactionInput{
PreviousOutpoint: externalapi.DomainOutpoint{
TransactionID: externalapi.DomainTransactionID(*transactionId),
Index: 2,
},
SignatureScript: []byte{1, 2},
Sequence: 7,
SigOpCount: 5,
UTXOEntry: nil,
}}
tx = externalapi.DomainTransaction{2, inputs, outputs, 54,
externalapi.DomainSubnetworkID{}, 3, []byte{}, 4, 7,
nil}
id = TransactionID(&tx)
fmt.Printf("%s\n", id)
tx_hash = TransactionHash(&tx)
fmt.Printf("%s\n\n", tx_hash)
tx = externalapi.DomainTransaction{2, inputs, outputs, 54,
subnetworks.SubnetworkIDCoinbase, 3, []byte{}, 4, 7,
nil}
id = TransactionID(&tx)
fmt.Printf("%s\n", id)
tx_hash = TransactionHash(&tx)
fmt.Printf("%s\n\n", tx_hash)
tx = externalapi.DomainTransaction{2, inputs, outputs, 54,
subnetworks.SubnetworkIDRegistry, 3, []byte{}, 4, 7,
nil}
id = TransactionID(&tx)
fmt.Printf("%s\n", id)
tx_hash = TransactionHash(&tx)
fmt.Printf("%s\n\n", tx_hash)
}

View File

@@ -1,10 +1,80 @@
package hashes
import (
"fmt"
"math/rand"
"testing"
)
func TestNewBlockHash(t *testing.T) {
datas := [][]byte{
{},
{1},
{5, 199, 126, 44, 71, 32, 82, 139, 122, 217, 43, 48, 52, 112, 40, 209, 180, 83, 139, 231, 72, 48, 136, 48, 168, 226, 133, 7, 60, 4, 160, 205},
{42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42},
{0, 0, 0, 0, 0, 0, 0, 0},
}
tests := []struct {
hasher HashWriter
expected []string
}{
{NewTransactionHashWriter(), []string{
"50272a9e37c728026f93d0eda6ab4467f627338b879076483c88d291193cb3bf",
"f9bf7e04c712621a0f4bb75d763f9ef5f73af6c438fd15b80744393bc96398ad",
"8e791f3edcc92b71b8de2778efbc4666ee5bd146acbe8723a55bca26b022b0e0",
"a6dab1a3088548c62d13a082fa28e870fdbbe51adcd8c364e2ea37e473c04d81",
"3b79b78b967233843ad30f707b165eb3d6a91af8338076be8755c46a963c3d1d",
}},
{NewTransactionIDWriter(), []string{
"e5f65efda0894d2b0590c2e9e46e9acc03032f505a1522f5e8c78c5ec70b1d9c",
"aea52cf5e5a13da13a52dd69abd636eb1b0f86e58bc1dda6b17886b94593415a",
"a50a2f87bdce075740189e9e23907ae22b5addbd875ccb70c116811b1fa5fb18",
"0db7a485f7013a346a8f7f5caf73d52ca3c3b5ee101ad8753adedd4235b7236b",
"2afc9c855854b0a6e94a722c3451d0cdfc8c11748b78ef65b9786f87b48d0d07",
}},
{NewTransactionSigningHashWriter(), []string{
"34c75037ad62740d4b3228f88f844f7901c07bfacd55a045be518eabc15e52ce",
"8523b0471bcbea04575ccaa635eef9f9114f2890bda54367e5ff8caa3878bf82",
"a51c49d9eb3d13f9de16e1aa8d1ff17668d55633ce00f36a643ac714b0fb137f",
"487f199ef74c3e893e85bd37770e6334575a2d4d113b2e10474593c49807de93",
"6392adc33a8e24e9a0a0c4c5f07f9c1cc958ad40c16d7a9a276e374cebb4e32b",
}},
{NewTransactionSigningHashECDSAWriter(), []string{
"b31ad1fbbe41b0e2a90e07c84708b38ba581f0c0e9185416913a04fb6d342027",
"c43e1f75ea9df6379b56a95074c2b6289ed8c5a01fff2d49d9d44ad5575c164b",
"49085f99fa0084b5436663f757a5916b1e4290c3321707fb76921ed4e47844ec",
"3f887e866428de813c1d0463b14eef3ca1363c8187e917dda1eee0ec5996490b",
"56de89a8c75f0fee2de61b11ab05d0d42e29ed50879467cf128dd80800a52ada",
}},
{NewBlockHashWriter(), []string{
"a80b6aa20f20b15ebabe2b1949527f78a257594a732e774de637d85e6973a768",
"5643023add641f9421187b8c9aa3c6c73227d5ec34131c61a08d35b43e7e4b65",
"4dc3bf72045431e46f8839a7d390898f27c887fddd8637149bfb70f732f04334",
"15d7648e69023dca65c949a61ea166192049f449c604523494813873b19918a7",
"3ac41af8385ea5d902ce6d47f509b7accc9c631f1d57a719d777874467f6d877",
}},
{NewMerkleBranchHashWriter(), []string{
"4de3617db456d01248173f17ec58196e92fbd994b636476db4b875ed2ec84054",
"5737cd8b6fca5a30c19a491323a14e6b7021641cb3f8875f10c7a2eafd3cf43f",
"a49eeda61cc75e0a8e5915829752fe0ad97620d6d32de7c9883595b0810ca33e",
"28f33681dcff1313674e07dacc2d74c3089f6d8cea7a4f8792a71fd870988ee5",
"2d53a43a42020a5091c125230bcd8a4cf0eeb188333e68325d4bce58a1c75ca3",
}},
}
for _, testVector := range tests {
hasher := testVector.hasher
for i, data := range datas {
hasher.InfallibleWrite(data)
res := hasher.Finalize().String()
if res != testVector.expected[i] {
panic(fmt.Sprintf("expected: %s, got: %s", testVector.expected[i], res))
}
}
}
}
func BenchmarkNewBlockHashWriterSmall(b *testing.B) {
r := rand.New(rand.NewSource(0))
var someBytes [32]byte

View File

@@ -214,7 +214,8 @@ var MainnetParams = Params{
RPCPort: "16110",
DefaultPort: "16111",
DNSSeeds: []string{
"mainnet-dnsseed.daglabs-dev.com",
// This DNS seeder is run by Wolfie
"mainnet-dnsseed.kas.pa",
// This DNS seeder is run by Denis Mashkevich
"mainnet-dnsseed-1.kaspanet.org",
// This DNS seeder is run by Denis Mashkevich
@@ -296,7 +297,7 @@ var TestnetParams = Params{
Net: appmessage.Testnet,
RPCPort: "16210",
DefaultPort: "16211",
DNSSeeds: []string{"testnet-9-dnsseed.daglabs-dev.com"},
DNSSeeds: []string{"testnet-10-dnsseed.kas.pa"},
// DAG parameters
GenesisBlock: &testnetGenesisBlock,

View File

@@ -57,7 +57,7 @@ func TestCreateStagingConsensus(t *testing.T) {
},
},
}
_, err = domainInstance.StagingConsensus().ValidateAndInsertBlockWithTrustedData(genesisWithTrustedData, true)
err = domainInstance.StagingConsensus().ValidateAndInsertBlockWithTrustedData(genesisWithTrustedData, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlockWithTrustedData: %+v", err)
}
@@ -74,7 +74,7 @@ func TestCreateStagingConsensus(t *testing.T) {
t.Fatalf("BuildBlock: %+v", err)
}
_, err = domainInstance.StagingConsensus().ValidateAndInsertBlock(block, true)
err = domainInstance.StagingConsensus().ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
@@ -120,7 +120,7 @@ func TestCreateStagingConsensus(t *testing.T) {
}
addGenesisToStagingConsensus()
_, err = domainInstance.StagingConsensus().ValidateAndInsertBlock(block, true)
err = domainInstance.StagingConsensus().ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}

View File

@@ -106,7 +106,7 @@ func syncConsensuses(syncer, syncee externalapi.Consensus) error {
})
}
_, err = syncee.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
err = syncee.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
if err != nil {
return err
}
@@ -161,7 +161,7 @@ func syncConsensuses(syncer, syncee externalapi.Consensus) error {
return err
}
_, err = syncee.ValidateAndInsertBlock(block, false)
err = syncee.ValidateAndInsertBlock(block, false)
if err != nil {
return err
}
@@ -214,34 +214,19 @@ func syncConsensuses(syncer, syncee externalapi.Consensus) error {
return err
}
virtualDAAScoreStart, err := syncee.GetVirtualDAAScore()
err = syncer.ResolveVirtual(func(virtualDAAScoreStart uint64, virtualDAAScore uint64) {
if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 {
percents = 100
} else {
percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
}
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
})
if err != nil {
return err
}
percents = 0
for i := 0; ; i++ {
if i%10 == 0 {
virtualDAAScore, err := syncee.GetVirtualDAAScore()
if err != nil {
return err
}
newPercents := int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100)
if newPercents > percents {
percents = newPercents
log.Infof("Resolving virtual. Estimated progress: %d%%", percents)
}
}
_, isCompletelyResolved, err := syncee.ResolveVirtual()
if err != nil {
return err
}
if isCompletelyResolved {
log.Infof("Resolved virtual")
break
}
}
log.Infof("Resolved virtual")
return nil
}

View File

@@ -1,9 +1,10 @@
package mempool
import (
"github.com/kaspanet/kaspad/domain/consensusreference"
"sync"
"github.com/kaspanet/kaspad/domain/consensusreference"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
miningmanagermodel "github.com/kaspanet/kaspad/domain/miningmanager/model"
)
@@ -42,25 +43,89 @@ func (mp *mempool) ValidateAndInsertTransaction(transaction *externalapi.DomainT
return mp.validateAndInsertTransaction(transaction, isHighPriority, allowOrphan)
}
func (mp *mempool) GetTransaction(transactionID *externalapi.DomainTransactionID) (*externalapi.DomainTransaction, bool) {
func (mp *mempool) GetTransaction(transactionID *externalapi.DomainTransactionID,
includeTransactionPool bool,
includeOrphanPool bool) (
transaction *externalapi.DomainTransaction,
isOrphan bool,
found bool) {
mp.mtx.RLock()
defer mp.mtx.RUnlock()
return mp.transactionsPool.getTransaction(transactionID)
var transactionfound bool
isOrphan = false
if includeTransactionPool {
transaction, transactionfound = mp.transactionsPool.getTransaction(transactionID, true)
isOrphan = false
}
if !transactionfound && includeOrphanPool {
transaction, transactionfound = mp.orphansPool.getOrphanTransaction(transactionID)
isOrphan = true
}
return transaction, isOrphan, transactionfound
}
func (mp *mempool) AllTransactions() []*externalapi.DomainTransaction {
func (mp *mempool) GetTransactionsByAddresses(includeTransactionPool bool, includeOrphanPool bool) (
sendingInTransactionPool map[string]*externalapi.DomainTransaction,
receivingInTransactionPool map[string]*externalapi.DomainTransaction,
sendingInOrphanPool map[string]*externalapi.DomainTransaction,
receivingInOrphanPool map[string]*externalapi.DomainTransaction,
err error) {
mp.mtx.RLock()
defer mp.mtx.RUnlock()
return mp.transactionsPool.getAllTransactions()
if includeTransactionPool {
sendingInTransactionPool, receivingInTransactionPool, err = mp.transactionsPool.getTransactionsByAddresses()
if err != nil {
return nil, nil, nil, nil, err
}
}
if includeOrphanPool {
sendingInTransactionPool, receivingInOrphanPool, err = mp.orphansPool.getOrphanTransactionsByAddresses()
if err != nil {
return nil, nil, nil, nil, err
}
}
return sendingInTransactionPool, receivingInTransactionPool, sendingInTransactionPool, receivingInOrphanPool, nil
}
func (mp *mempool) TransactionCount() int {
func (mp *mempool) AllTransactions(includeTransactionPool bool, includeOrphanPool bool) (
transactionPoolTransactions []*externalapi.DomainTransaction,
orphanPoolTransactions []*externalapi.DomainTransaction) {
mp.mtx.RLock()
defer mp.mtx.RUnlock()
return mp.transactionsPool.transactionCount()
if includeTransactionPool {
transactionPoolTransactions = mp.transactionsPool.getAllTransactions()
}
if includeOrphanPool {
orphanPoolTransactions = mp.orphansPool.getAllOrphanTransactions()
}
return transactionPoolTransactions, orphanPoolTransactions
}
func (mp *mempool) TransactionCount(includeTransactionPool bool, includeOrphanPool bool) int {
mp.mtx.RLock()
defer mp.mtx.RUnlock()
transactionCount := 0
if includeOrphanPool {
transactionCount += mp.orphansPool.orphanTransactionCount()
}
if includeTransactionPool {
transactionCount += mp.transactionsPool.transactionCount()
}
return transactionCount
}
func (mp *mempool) HandleNewBlockTransactions(transactions []*externalapi.DomainTransaction) (

View File

@@ -51,7 +51,7 @@ func (mpus *mempoolUTXOSet) addTransaction(transaction *model.MempoolTransaction
func (mpus *mempoolUTXOSet) removeTransaction(transaction *model.MempoolTransaction) {
for _, input := range transaction.Transaction().Inputs {
// If the transaction creating the output spent by this input is in the mempool - restore it's UTXO
if _, ok := mpus.mempool.transactionsPool.getTransaction(&input.PreviousOutpoint.TransactionID); ok {
if _, ok := mpus.mempool.transactionsPool.getTransaction(&input.PreviousOutpoint.TransactionID, false); ok {
mpus.poolUnspentOutputs[input.PreviousOutpoint] = input.UTXOEntry
}
delete(mpus.transactionByPreviousOutpoint, input.PreviousOutpoint)

View File

@@ -15,3 +15,6 @@ type OutpointToUTXOEntryMap map[externalapi.DomainOutpoint]externalapi.UTXOEntry
// OutpointToTransactionMap maps an outpoint to a MempoolTransaction
type OutpointToTransactionMap map[externalapi.DomainOutpoint]*MempoolTransaction
// ScriptPublicKeyStringToDomainTransaction maps an outpoint to a DomainTransaction
type ScriptPublicKeyStringToDomainTransaction map[string]*externalapi.DomainTransaction

View File

@@ -169,7 +169,7 @@ func (op *orphansPool) processOrphansAfterAcceptedTransaction(acceptedTransactio
}
return nil, err
}
acceptedOrphans = append(acceptedOrphans, orphan.Transaction())
acceptedOrphans = append(acceptedOrphans, orphan.Transaction().Clone()) //these pointers leave the mempool, hence the clone
}
}
}
@@ -328,3 +328,48 @@ func (op *orphansPool) randomNonHighPriorityOrphan() *model.OrphanTransaction {
return nil
}
func (op *orphansPool) getOrphanTransaction(transactionID *externalapi.DomainTransactionID) (*externalapi.DomainTransaction, bool) {
if orphanTransaction, ok := op.allOrphans[*transactionID]; ok {
return orphanTransaction.Transaction().Clone(), true //this pointer leaves the mempool, hence we clone.
}
return nil, false
}
func (op *orphansPool) getOrphanTransactionsByAddresses() (
sending model.ScriptPublicKeyStringToDomainTransaction,
receiving model.ScriptPublicKeyStringToDomainTransaction,
err error) {
sending = make(model.ScriptPublicKeyStringToDomainTransaction)
receiving = make(model.ScriptPublicKeyStringToDomainTransaction, op.orphanTransactionCount())
var transaction *externalapi.DomainTransaction
for _, mempoolTransaction := range op.allOrphans {
transaction = mempoolTransaction.Transaction().Clone() //these pointers leave the mempool, hence we clone.
for _, input := range transaction.Inputs {
if input.UTXOEntry == nil { //this is not a bug, but a valid state of orphan transactions with missing outpoints.
continue
}
sending[input.UTXOEntry.ScriptPublicKey().String()] = transaction
}
for _, output := range transaction.Outputs {
receiving[output.ScriptPublicKey.String()] = transaction
}
}
return sending, receiving, nil
}
func (op *orphansPool) getAllOrphanTransactions() []*externalapi.DomainTransaction {
allOrphanTransactions := make([]*externalapi.DomainTransaction, len(op.allOrphans))
i := 0
for _, mempoolTransaction := range op.allOrphans {
allOrphanTransactions[i] = mempoolTransaction.Transaction().Clone() //these pointers leave the mempool, hence we clone.
i++
}
return allOrphanTransactions
}
func (op *orphansPool) orphanTransactionCount() int {
return len(op.allOrphans)
}

View File

@@ -11,7 +11,6 @@ func (mp *mempool) revalidateHighPriorityTransactions() ([]*externalapi.DomainTr
defer onEnd()
validTransactions := []*externalapi.DomainTransaction{}
for _, transaction := range mp.transactionsPool.highPriorityTransactions {
isValid, err := mp.revalidateTransaction(transaction)
if err != nil {
@@ -21,7 +20,7 @@ func (mp *mempool) revalidateHighPriorityTransactions() ([]*externalapi.DomainTr
continue
}
validTransactions = append(validTransactions, transaction.Transaction())
validTransactions = append(validTransactions, transaction.Transaction().Clone())
}
return validTransactions, nil

View File

@@ -6,6 +6,7 @@ import (
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/miningmanager/mempool/model"
)
@@ -135,7 +136,7 @@ func (tp *transactionsPool) allReadyTransactions() []*externalapi.DomainTransact
for _, mempoolTransaction := range tp.allTransactions {
if len(mempoolTransaction.ParentTransactionsInPool()) == 0 {
result = append(result, mempoolTransaction.Transaction())
result = append(result, mempoolTransaction.Transaction().Clone()) //this pointer leaves the mempool, and gets its utxo set to nil, hence we clone.
}
}
@@ -204,17 +205,44 @@ func (tp *transactionsPool) limitTransactionCount() error {
return nil
}
func (tp *transactionsPool) getTransaction(transactionID *externalapi.DomainTransactionID) (*externalapi.DomainTransaction, bool) {
func (tp *transactionsPool) getTransaction(transactionID *externalapi.DomainTransactionID, clone bool) (*externalapi.DomainTransaction, bool) {
if mempoolTransaction, ok := tp.allTransactions[*transactionID]; ok {
if clone {
return mempoolTransaction.Transaction().Clone(), true //this pointer leaves the mempool, hence we clone.
}
return mempoolTransaction.Transaction(), true
}
return nil, false
}
func (tp *transactionsPool) getAllTransactions() []*externalapi.DomainTransaction {
allTransactions := make([]*externalapi.DomainTransaction, 0, len(tp.allTransactions))
func (tp *transactionsPool) getTransactionsByAddresses() (
sending model.ScriptPublicKeyStringToDomainTransaction,
receiving model.ScriptPublicKeyStringToDomainTransaction,
err error) {
sending = make(model.ScriptPublicKeyStringToDomainTransaction, tp.transactionCount())
receiving = make(model.ScriptPublicKeyStringToDomainTransaction, tp.transactionCount())
var transaction *externalapi.DomainTransaction
for _, mempoolTransaction := range tp.allTransactions {
allTransactions = append(allTransactions, mempoolTransaction.Transaction())
transaction = mempoolTransaction.Transaction().Clone() //this pointer leaves the mempool, hence we clone.
for _, input := range transaction.Inputs {
if input.UTXOEntry == nil {
return nil, nil, errors.Errorf("Mempool transaction %s is missing an UTXOEntry. This should be fixed, and not happen", consensushashing.TransactionID(transaction).String())
}
sending[input.UTXOEntry.ScriptPublicKey().String()] = transaction
}
for _, output := range transaction.Outputs {
receiving[output.ScriptPublicKey.String()] = transaction
}
}
return sending, receiving, nil
}
func (tp *transactionsPool) getAllTransactions() []*externalapi.DomainTransaction {
allTransactions := make([]*externalapi.DomainTransaction, len(tp.allTransactions))
i := 0
for _, mempoolTransaction := range tp.allTransactions {
allTransactions[i] = mempoolTransaction.Transaction().Clone() //this pointer leaves the mempool, hence we clone.
i++
}
return allTransactions
}

View File

@@ -54,7 +54,7 @@ func (mp *mempool) validateAndInsertTransaction(transaction *externalapi.DomainT
return nil, err
}
acceptedTransactions = append([]*externalapi.DomainTransaction{transaction}, acceptedOrphans...)
acceptedTransactions = append([]*externalapi.DomainTransaction{transaction.Clone()}, acceptedOrphans...) //these pointer leave the mempool, hence we clone.
err = mp.transactionsPool.limitTransactionCount()
if err != nil {

View File

@@ -1,11 +1,12 @@
package miningmanager
import (
"sync"
"time"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensusreference"
miningmanagermodel "github.com/kaspanet/kaspad/domain/miningmanager/model"
"sync"
"time"
)
// MiningManager creates block templates for mining as well as maintaining
@@ -14,9 +15,20 @@ type MiningManager interface {
GetBlockTemplate(coinbaseData *externalapi.DomainCoinbaseData) (block *externalapi.DomainBlock, isNearlySynced bool, err error)
ClearBlockTemplate()
GetBlockTemplateBuilder() miningmanagermodel.BlockTemplateBuilder
GetTransaction(transactionID *externalapi.DomainTransactionID) (*externalapi.DomainTransaction, bool)
AllTransactions() []*externalapi.DomainTransaction
TransactionCount() int
GetTransaction(transactionID *externalapi.DomainTransactionID, includeTransactionPool bool, includeOrphanPool bool) (
transactionPoolTransaction *externalapi.DomainTransaction,
isOrphan bool,
found bool)
GetTransactionsByAddresses(includeTransactionPool bool, includeOrphanPool bool) (
sendingInTransactionPool map[string]*externalapi.DomainTransaction,
receivingInTransactionPool map[string]*externalapi.DomainTransaction,
sendingInOrphanPool map[string]*externalapi.DomainTransaction,
receivingInOrphanPool map[string]*externalapi.DomainTransaction,
err error)
AllTransactions(includeTransactionPool bool, includeOrphanPool bool) (
transactionPoolTransactions []*externalapi.DomainTransaction,
orphanPoolTransactions []*externalapi.DomainTransaction)
TransactionCount(includeTransactionPool bool, includeOrphanPool bool) int
HandleNewBlockTransactions(txs []*externalapi.DomainTransaction) ([]*externalapi.DomainTransaction, error)
ValidateAndInsertTransaction(transaction *externalapi.DomainTransaction, isHighPriority bool, allowOrphan bool) (
acceptedTransactions []*externalapi.DomainTransaction, err error)
@@ -106,17 +118,35 @@ func (mm *miningManager) ValidateAndInsertTransaction(transaction *externalapi.D
}
func (mm *miningManager) GetTransaction(
transactionID *externalapi.DomainTransactionID) (*externalapi.DomainTransaction, bool) {
transactionID *externalapi.DomainTransactionID,
includeTransactionPool bool,
includeOrphanPool bool) (
transactionPoolTransaction *externalapi.DomainTransaction,
isOrphan bool,
found bool) {
return mm.mempool.GetTransaction(transactionID)
return mm.mempool.GetTransaction(transactionID, includeTransactionPool, includeOrphanPool)
}
func (mm *miningManager) AllTransactions() []*externalapi.DomainTransaction {
return mm.mempool.AllTransactions()
func (mm *miningManager) AllTransactions(includeTransactionPool bool, includeOrphanPool bool) (
transactionPoolTransactions []*externalapi.DomainTransaction,
orphanPoolTransactions []*externalapi.DomainTransaction) {
return mm.mempool.AllTransactions(includeTransactionPool, includeOrphanPool)
}
func (mm *miningManager) TransactionCount() int {
return mm.mempool.TransactionCount()
func (mm *miningManager) GetTransactionsByAddresses(includeTransactionPool bool, includeOrphanPool bool) (
sendingInTransactionPool map[string]*externalapi.DomainTransaction,
receivingInTransactionPool map[string]*externalapi.DomainTransaction,
sendingInOrphanPool map[string]*externalapi.DomainTransaction,
receivingInOrphanPool map[string]*externalapi.DomainTransaction,
err error) {
return mm.mempool.GetTransactionsByAddresses(includeTransactionPool, includeOrphanPool)
}
func (mm *miningManager) TransactionCount(includeTransactionPool bool, includeOrphanPool bool) int {
return mm.mempool.TransactionCount(includeTransactionPool, includeOrphanPool)
}
func (mm *miningManager) RevalidateHighPriorityTransactions() (

View File

@@ -52,7 +52,7 @@ func TestValidateAndInsertTransaction(t *testing.T) {
}
// The UTXOEntry was filled manually for those transactions, so the transactions won't be considered orphans.
// Therefore, all the transactions expected to be contained in the mempool.
transactionsFromMempool := miningManager.AllTransactions()
transactionsFromMempool, _ := miningManager.AllTransactions(true, false)
if len(transactionsToInsert) != len(transactionsFromMempool) {
t.Fatalf("Wrong number of transactions in mempool: expected: %d, got: %d", len(transactionsToInsert), len(transactionsFromMempool))
}
@@ -72,7 +72,7 @@ func TestValidateAndInsertTransaction(t *testing.T) {
if err != nil {
t.Fatalf("ValidateAndInsertTransaction: %v", err)
}
transactionsFromMempool = miningManager.AllTransactions()
transactionsFromMempool, _ = miningManager.AllTransactions(true, false)
if !contains(transactionNotAnOrphan, transactionsFromMempool) {
t.Fatalf("Missing transaction %s in the mempool", consensushashing.TransactionID(transactionNotAnOrphan))
}
@@ -99,7 +99,7 @@ func TestImmatureSpend(t *testing.T) {
if !errors.As(err, txRuleError) || txRuleError.RejectCode != mempool.RejectImmatureSpend {
t.Fatalf("Unexpected error %+v", err)
}
transactionsFromMempool := miningManager.AllTransactions()
transactionsFromMempool, _ := miningManager.AllTransactions(true, false)
if contains(tx, transactionsFromMempool) {
t.Fatalf("Mempool contains a transaction with immature coinbase")
}
@@ -205,7 +205,7 @@ func TestHandleNewBlockTransactions(t *testing.T) {
if err != nil {
t.Fatalf("HandleNewBlockTransactions: %v", err)
}
mempoolTransactions := miningManager.AllTransactions()
mempoolTransactions, _ := miningManager.AllTransactions(true, false)
for _, removedTransaction := range blockWithFirstPartOfTheTransactions {
if contains(removedTransaction, mempoolTransactions) {
t.Fatalf("This transaction shouldnt be in mempool: %s", consensushashing.TransactionID(removedTransaction))
@@ -214,7 +214,7 @@ func TestHandleNewBlockTransactions(t *testing.T) {
// There are no chained/double-spends transactions, and hence it is expected that all the other
// transactions, will still be included in the mempool.
mempoolTransactions = miningManager.AllTransactions()
mempoolTransactions, _ = miningManager.AllTransactions(true, false)
for _, transaction := range blockWithRestOfTheTransactions[transactionhelper.CoinbaseTransactionIndex+1:] {
if !contains(transaction, mempoolTransactions) {
t.Fatalf("This transaction %s should be in mempool.", consensushashing.TransactionID(transaction))
@@ -225,8 +225,9 @@ func TestHandleNewBlockTransactions(t *testing.T) {
if err != nil {
t.Fatalf("HandleNewBlockTransactions: %v", err)
}
if len(miningManager.AllTransactions()) != 0 {
blockIDs := domainBlocksToBlockIds(miningManager.AllTransactions())
mempoolTransactions, _ = miningManager.AllTransactions(true, false)
if len(mempoolTransactions) != 0 {
blockIDs := domainBlocksToBlockIds(mempoolTransactions)
t.Fatalf("The mempool contains unexpected transactions: %s", blockIDs)
}
})
@@ -269,7 +270,8 @@ func TestDoubleSpendWithBlock(t *testing.T) {
if err != nil {
t.Fatalf("HandleNewBlockTransactions: %v", err)
}
if contains(transactionInTheMempool, miningManager.AllTransactions()) {
mempoolTransactions, _ := miningManager.AllTransactions(true, false)
if contains(transactionInTheMempool, mempoolTransactions) {
t.Fatalf("The transaction %s, shouldn't be in the mempool, since at least one "+
"output was already spent.", consensushashing.TransactionID(transactionInTheMempool))
}
@@ -303,7 +305,7 @@ func TestOrphanTransactions(t *testing.T) {
t.Fatalf("ValidateAndInsertTransaction: %v", err)
}
}
transactionsMempool := miningManager.AllTransactions()
transactionsMempool, _ := miningManager.AllTransactions(true, false)
for _, transaction := range transactionsMempool {
if contains(transaction, childTransactions) {
t.Fatalf("Error: an orphan transaction is exist in the mempool")
@@ -345,7 +347,7 @@ func TestOrphanTransactions(t *testing.T) {
if err != nil {
t.Fatalf("HandleNewBlockTransactions: %+v", err)
}
transactionsMempool = miningManager.AllTransactions()
transactionsMempool, _ = miningManager.AllTransactions(true, false)
if len(transactionsMempool) != len(childTransactions) {
t.Fatalf("Expected %d transactions in the mempool but got %d", len(childTransactions), len(transactionsMempool))
}
@@ -553,8 +555,8 @@ func TestRevalidateHighPriorityTransactions(t *testing.T) {
}
// Make sure spendingTransaction is still in mempool
allTransactions := miningManager.AllTransactions()
if len(allTransactions) != 1 || !allTransactions[0].Equal(spendingTransaction) {
mempoolTransactions, _ := miningManager.AllTransactions(true, false)
if len(mempoolTransactions) != 1 || !mempoolTransactions[0].Equal(spendingTransaction) {
t.Fatalf("Expected to have spendingTransaction as only validTransaction returned from "+
"RevalidateHighPriorityTransactions, but got %v instead", validTransactions)
}
@@ -568,9 +570,9 @@ func TestRevalidateHighPriorityTransactions(t *testing.T) {
t.Fatalf("Expected to have empty validTransactions, but got %v instead", validTransactions)
}
// And also AllTransactions should be empty as well
allTransactions = miningManager.AllTransactions()
if len(allTransactions) != 0 {
t.Fatalf("Expected to have empty allTransactions, but got %v instead", allTransactions)
mempoolTransactions, _ = miningManager.AllTransactions(true, false)
if len(mempoolTransactions) != 0 {
t.Fatalf("Expected to have empty allTransactions, but got %v instead", mempoolTransactions)
}
})
}
@@ -605,7 +607,7 @@ func TestModifyBlockTemplate(t *testing.T) {
t.Fatalf("ValidateAndInsertTransaction: %v", err)
}
}
transactionsMempool := miningManager.AllTransactions()
transactionsMempool, _ := miningManager.AllTransactions(true, false)
for _, transaction := range transactionsMempool {
if contains(transaction, childTransactions) {
t.Fatalf("Error: an orphan transaction is exist in the mempool")
@@ -654,7 +656,7 @@ func TestModifyBlockTemplate(t *testing.T) {
if err != nil {
t.Fatalf("HandleNewBlockTransactions: %+v", err)
}
transactionsMempool = miningManager.AllTransactions()
transactionsMempool, _ = miningManager.AllTransactions(true, false)
if len(transactionsMempool) != len(childTransactions) {
t.Fatalf("Expected %d transactions in the mempool but got %d", len(childTransactions), len(transactionsMempool))
}

View File

@@ -12,9 +12,31 @@ type Mempool interface {
ValidateAndInsertTransaction(transaction *externalapi.DomainTransaction, isHighPriority bool, allowOrphan bool) (
acceptedTransactions []*externalapi.DomainTransaction, err error)
RemoveTransactions(txs []*externalapi.DomainTransaction, removeRedeemers bool) error
GetTransaction(transactionID *externalapi.DomainTransactionID) (*externalapi.DomainTransaction, bool)
AllTransactions() []*externalapi.DomainTransaction
TransactionCount() int
GetTransaction(
transactionID *externalapi.DomainTransactionID,
includeTransactionPool bool,
includeOrphanPool bool,
) (
transactionPoolTransaction *externalapi.DomainTransaction,
isOrphan bool,
found bool)
GetTransactionsByAddresses(
includeTransactionPool bool,
includeOrphanPool bool) (
sendingInTransactionPool map[string]*externalapi.DomainTransaction,
receivingInTransactionPool map[string]*externalapi.DomainTransaction,
sendingInOrphanPool map[string]*externalapi.DomainTransaction,
receivingInOrphanPool map[string]*externalapi.DomainTransaction,
err error)
AllTransactions(
includeTransactionPool bool,
includeOrphanPool bool,
) (
transactionPoolTransactions []*externalapi.DomainTransaction,
orphanPoolTransactions []*externalapi.DomainTransaction)
TransactionCount(
includeTransactionPool bool,
includeOrphanPool bool) int
RevalidateHighPriorityTransactions() (validTransactions []*externalapi.DomainTransaction, err error)
IsTransactionOutputDust(output *externalapi.DomainTransactionOutput) bool
}

View File

@@ -1,7 +1,6 @@
package utxoindex
import (
"encoding/binary"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
@@ -20,24 +19,5 @@ type UTXOOutpoints map[externalapi.DomainOutpoint]interface{}
// a successful update
type UTXOChanges struct {
Added map[ScriptPublicKeyString]UTXOOutpointEntryPairs
Removed map[ScriptPublicKeyString]UTXOOutpoints
}
// ConvertScriptPublicKeyToString converts the given scriptPublicKey to a string
func ConvertScriptPublicKeyToString(scriptPublicKey *externalapi.ScriptPublicKey) ScriptPublicKeyString {
var versionBytes = make([]byte, 2) // uint16
binary.LittleEndian.PutUint16(versionBytes, scriptPublicKey.Version)
versionString := ScriptPublicKeyString(versionBytes)
scriptString := ScriptPublicKeyString(scriptPublicKey.Script)
return versionString + scriptString
}
// ConvertStringToScriptPublicKey converts the given string to a scriptPublicKey
func ConvertStringToScriptPublicKey(string ScriptPublicKeyString) *externalapi.ScriptPublicKey {
bytes := []byte(string)
version := binary.LittleEndian.Uint16(bytes[:2])
script := bytes[2:]
return &externalapi.ScriptPublicKey{Script: script, Version: version}
Removed map[ScriptPublicKeyString]UTXOOutpointEntryPairs
}

View File

@@ -2,6 +2,8 @@ package utxoindex
import (
"encoding/binary"
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/logger"
@@ -10,11 +12,13 @@ import (
var utxoIndexBucket = database.MakeBucket([]byte("utxo-index"))
var virtualParentsKey = database.MakeBucket([]byte("")).Key([]byte("utxo-index-virtual-parents"))
var circulatingSupplyKey = database.MakeBucket([]byte("")).Key([]byte("utxo-index-circulating-supply"))
type utxoIndexStore struct {
database database.Database
toAdd map[ScriptPublicKeyString]UTXOOutpointEntryPairs
toRemove map[ScriptPublicKeyString]UTXOOutpoints
database database.Database
toAdd map[ScriptPublicKeyString]UTXOOutpointEntryPairs
toRemove map[ScriptPublicKeyString]UTXOOutpointEntryPairs
virtualParents []*externalapi.DomainHash
}
@@ -22,13 +26,13 @@ func newUTXOIndexStore(database database.Database) *utxoIndexStore {
return &utxoIndexStore{
database: database,
toAdd: make(map[ScriptPublicKeyString]UTXOOutpointEntryPairs),
toRemove: make(map[ScriptPublicKeyString]UTXOOutpoints),
toRemove: make(map[ScriptPublicKeyString]UTXOOutpointEntryPairs),
}
}
func (uis *utxoIndexStore) add(scriptPublicKey *externalapi.ScriptPublicKey, outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry) error {
key := ConvertScriptPublicKeyToString(scriptPublicKey)
key := ScriptPublicKeyString(scriptPublicKey.String())
log.Tracef("Adding outpoint %s:%d to scriptPublicKey %s",
outpoint.TransactionID, outpoint.Index, key)
@@ -61,8 +65,8 @@ func (uis *utxoIndexStore) add(scriptPublicKey *externalapi.ScriptPublicKey, out
return nil
}
func (uis *utxoIndexStore) remove(scriptPublicKey *externalapi.ScriptPublicKey, outpoint *externalapi.DomainOutpoint) error {
key := ConvertScriptPublicKeyToString(scriptPublicKey)
func (uis *utxoIndexStore) remove(scriptPublicKey *externalapi.ScriptPublicKey, outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry) error {
key := ScriptPublicKeyString(scriptPublicKey.String())
log.Tracef("Removing outpoint %s:%d from scriptPublicKey %s",
outpoint.TransactionID, outpoint.Index, key)
@@ -76,19 +80,19 @@ func (uis *utxoIndexStore) remove(scriptPublicKey *externalapi.ScriptPublicKey,
}
}
// Create a UTXOOutpoints entry in `toRemove` if it doesn't exist
// Create a UTXOOutpointEntryPair in `toRemove` if it doesn't exist
if _, ok := uis.toRemove[key]; !ok {
log.Tracef("Creating key %s in `toRemove`", key)
uis.toRemove[key] = make(UTXOOutpoints)
uis.toRemove[key] = make(UTXOOutpointEntryPairs)
}
// Return an error if the outpoint already exists in `toRemove`
toRemoveOutpointsOfKey := uis.toRemove[key]
if _, ok := toRemoveOutpointsOfKey[*outpoint]; ok {
toRemovePairsOfKey := uis.toRemove[key]
if _, ok := toRemovePairsOfKey[*outpoint]; ok {
return errors.Errorf("cannot remove outpoint %s because it's being removed already", outpoint)
}
toRemoveOutpointsOfKey[*outpoint] = struct{}{}
toRemovePairsOfKey[*outpoint] = utxoEntry
log.Tracef("Removed outpoint %s:%d from scriptPublicKey %s",
outpoint.TransactionID, outpoint.Index, key)
@@ -101,7 +105,7 @@ func (uis *utxoIndexStore) updateVirtualParents(virtualParents []*externalapi.Do
func (uis *utxoIndexStore) discard() {
uis.toAdd = make(map[ScriptPublicKeyString]UTXOOutpointEntryPairs)
uis.toRemove = make(map[ScriptPublicKeyString]UTXOOutpoints)
uis.toRemove = make(map[ScriptPublicKeyString]UTXOOutpointEntryPairs)
uis.virtualParents = nil
}
@@ -115,10 +119,12 @@ func (uis *utxoIndexStore) commit() error {
}
defer dbTransaction.RollbackUnlessClosed()
for scriptPublicKeyString, toRemoveOutpointsOfKey := range uis.toRemove {
scriptPublicKey := ConvertStringToScriptPublicKey(scriptPublicKeyString)
toRemoveSompiSupply := uint64(0)
for scriptPublicKeyString, toRemoveUTXOOutpointEntryPairs := range uis.toRemove {
scriptPublicKey := externalapi.NewScriptPublicKeyFromString(string(scriptPublicKeyString))
bucket := uis.bucketForScriptPublicKey(scriptPublicKey)
for outpointToRemove := range toRemoveOutpointsOfKey {
for outpointToRemove, utxoEntryToRemove := range toRemoveUTXOOutpointEntryPairs {
key, err := uis.convertOutpointToKey(bucket, &outpointToRemove)
if err != nil {
return err
@@ -127,11 +133,14 @@ func (uis *utxoIndexStore) commit() error {
if err != nil {
return err
}
toRemoveSompiSupply = toRemoveSompiSupply + utxoEntryToRemove.Amount()
}
}
toAddSompiSupply := uint64(0)
for scriptPublicKeyString, toAddUTXOOutpointEntryPairs := range uis.toAdd {
scriptPublicKey := ConvertStringToScriptPublicKey(scriptPublicKeyString)
scriptPublicKey := externalapi.NewScriptPublicKeyFromString(string(scriptPublicKeyString))
bucket := uis.bucketForScriptPublicKey(scriptPublicKey)
for outpointToAdd, utxoEntryToAdd := range toAddUTXOOutpointEntryPairs {
key, err := uis.convertOutpointToKey(bucket, &outpointToAdd)
@@ -146,6 +155,7 @@ func (uis *utxoIndexStore) commit() error {
if err != nil {
return err
}
toAddSompiSupply = toAddSompiSupply + utxoEntryToAdd.Amount()
}
}
@@ -155,6 +165,11 @@ func (uis *utxoIndexStore) commit() error {
return err
}
err = uis.updateCirculatingSompiSupply(dbTransaction, toAddSompiSupply, toRemoveSompiSupply)
if err != nil {
return err
}
err = dbTransaction.Commit()
if err != nil {
return err
@@ -165,20 +180,29 @@ func (uis *utxoIndexStore) commit() error {
}
func (uis *utxoIndexStore) addAndCommitOutpointsWithoutTransaction(utxoPairs []*externalapi.OutpointAndUTXOEntryPair) error {
toAddSompiSupply := uint64(0)
for _, pair := range utxoPairs {
bucket := uis.bucketForScriptPublicKey(pair.UTXOEntry.ScriptPublicKey())
key, err := uis.convertOutpointToKey(bucket, pair.Outpoint)
if err != nil {
return err
}
serializedUTXOEntry, err := serializeUTXOEntry(pair.UTXOEntry)
if err != nil {
return err
}
err = uis.database.Put(key, serializedUTXOEntry)
if err != nil {
return err
}
toAddSompiSupply = toAddSompiSupply + pair.UTXOEntry.Amount()
}
err := uis.updateCirculatingSompiSupplyWithoutTransaction(toAddSompiSupply, uint64(0))
if err != nil {
return err
}
return nil
@@ -211,7 +235,7 @@ func (uis *utxoIndexStore) convertKeyToOutpoint(key *database.Key) (*externalapi
func (uis *utxoIndexStore) stagedData() (
toAdd map[ScriptPublicKeyString]UTXOOutpointEntryPairs,
toRemove map[ScriptPublicKeyString]UTXOOutpoints,
toRemove map[ScriptPublicKeyString]UTXOOutpointEntryPairs,
virtualParents []*externalapi.DomainHash) {
toAddClone := make(map[ScriptPublicKeyString]UTXOOutpointEntryPairs, len(uis.toAdd))
@@ -223,13 +247,13 @@ func (uis *utxoIndexStore) stagedData() (
toAddClone[scriptPublicKeyString] = toAddUTXOOutpointEntryPairsClone
}
toRemoveClone := make(map[ScriptPublicKeyString]UTXOOutpoints, len(uis.toRemove))
for scriptPublicKeyString, toRemoveOutpoints := range uis.toRemove {
toRemoveOutpointsClone := make(UTXOOutpoints, len(toRemoveOutpoints))
for outpoint := range toRemoveOutpoints {
toRemoveOutpointsClone[outpoint] = struct{}{}
toRemoveClone := make(map[ScriptPublicKeyString]UTXOOutpointEntryPairs, len(uis.toRemove))
for scriptPublicKeyString, toRemoveUTXOOutpointEntryPairs := range uis.toRemove {
toRemoveUTXOOutpointEntryPairsClone := make(UTXOOutpointEntryPairs, len(toRemoveUTXOOutpointEntryPairs))
for outpoint, utxoEntry := range toRemoveUTXOOutpointEntryPairs {
toRemoveUTXOOutpointEntryPairsClone[outpoint] = utxoEntry
}
toRemoveClone[scriptPublicKeyString] = toRemoveOutpointsClone
toRemoveClone[scriptPublicKeyString] = toRemoveUTXOOutpointEntryPairsClone
}
return toAddClone, toRemoveClone, uis.virtualParents
@@ -294,6 +318,11 @@ func (uis *utxoIndexStore) deleteAll() error {
return err
}
err = uis.database.Delete(circulatingSupplyKey)
if err != nil {
return err
}
cursor, err := uis.database.Cursor(utxoIndexBucket)
if err != nil {
return err
@@ -313,3 +342,92 @@ func (uis *utxoIndexStore) deleteAll() error {
return nil
}
func (uis *utxoIndexStore) initializeCirculatingSompiSupply() error {
cursor, err := uis.database.Cursor(utxoIndexBucket)
if err != nil {
return err
}
defer cursor.Close()
circulatingSompiSupplyInDatabase := uint64(0)
for cursor.Next() {
serializedUTXOEntry, err := cursor.Value()
if err != nil {
return err
}
utxoEntry, err := deserializeUTXOEntry(serializedUTXOEntry)
if err != nil {
return err
}
circulatingSompiSupplyInDatabase = circulatingSompiSupplyInDatabase + utxoEntry.Amount()
}
err = uis.database.Put(
circulatingSupplyKey,
binaryserialization.SerializeUint64(circulatingSompiSupplyInDatabase),
)
if err != nil {
return err
}
return nil
}
func (uis *utxoIndexStore) updateCirculatingSompiSupply(dbTransaction database.Transaction, toAddSompiSupply uint64, toRemoveSompiSupply uint64) error {
if toAddSompiSupply != toRemoveSompiSupply {
circulatingSupplyBytes, err := dbTransaction.Get(circulatingSupplyKey)
if err != nil {
return err
}
circulatingSupply, err := binaryserialization.DeserializeUint64(circulatingSupplyBytes)
if err != nil {
return err
}
err = dbTransaction.Put(
circulatingSupplyKey,
binaryserialization.SerializeUint64(circulatingSupply+toAddSompiSupply-toRemoveSompiSupply),
)
if err != nil {
return err
}
}
return nil
}
func (uis *utxoIndexStore) updateCirculatingSompiSupplyWithoutTransaction(toAddSompiSupply uint64, toRemoveSompiSupply uint64) error {
if toAddSompiSupply != toRemoveSompiSupply {
circulatingSupplyBytes, err := uis.database.Get(circulatingSupplyKey)
if err != nil {
return err
}
circulatingSupply, err := binaryserialization.DeserializeUint64(circulatingSupplyBytes)
if err != nil {
return err
}
err = uis.database.Put(
circulatingSupplyKey,
binaryserialization.SerializeUint64(circulatingSupply+toAddSompiSupply-toRemoveSompiSupply),
)
if err != nil {
return err
}
}
return nil
}
func (uis *utxoIndexStore) getCirculatingSompiSupply() (uint64, error) {
if uis.isAnythingStaged() {
return 0, errors.Errorf("cannot get circulatingSupply while staging isn't empty")
}
circulatingSupply, err := uis.database.Get(circulatingSupplyKey)
if err != nil {
return 0, err
}
return binaryserialization.DeserializeUint64(circulatingSupply)
}

Some files were not shown because too many files have changed in this diff Show More