Compare commits

...

63 Commits

Author SHA1 Message Date
Svarog
4bca7342d3 [NOD-883] Fix dockerfile in kaspaminer + set real version for go-libsecp256k1 (#673) 2020-03-26 17:50:09 +02:00
Elichai Turkel
f80908fb4e [NOD-876] Replace ecc with go-secp256k1 for public keys (#670)
* Replace ecc with go-secp256k1 in txscript

* Replace ecc with go-secp256k1 in util and cmd

* Replace ecc.Multiset with secp256k1.MultiSet
2020-03-26 17:03:39 +02:00
stasatdaglabs
e000e10738 [NOD-880] Remove CGO_ENABLED=0 from Dockerfile. (#671) 2020-03-26 14:02:57 +02:00
Ori Newman
d83862f36c [NOD-855] Save ECMH for block utxo and not diff utxo (#669)
* [NOD-855] Save ECMH for each block UTXO

* [NOD-855] Remove UpdateExtraNonce method

* [NOD-855] Remove multiset data from UTXO diffs

* [NOD-855] Fix to fetch multiset of selected parent

* [NOD-855] Don't remove coinbase inputs from multiset

* [NOD-855] Create multisetBucketName on startup

* [NOD-855] Remove multiset from UTXO diff tests

* [NOD-855] clear new entries from multisetstore on saveChangesFromBlock

* [NOD-855] Fix tests

* [NOD-855] Use UnacceptedBlueScore when adding current block transactions to multiset

* [NOD-855] Hash utxo before adding it to multiset

* [NOD-855] Pass isCoinbase to NewUTXOEntry

* [NOD-855] Do not use hash when adding entries to multiset

* [NOD-855] When calculating multiset, replace the unaccepted blue score of selected parent transaction with the block blue score

* [NOD-855] Manually add a chained transaction to a block in TestChainedTransactions

* [NOD-855] Change name and comments

* [NOD-855] Use FindAcceptanceData to find a specific block acceptance data

* [NOD-855] Remove redundant copy of txIn.PreviousOutpoint

* [NOD-855] Use fmt.Sprintf when creating internalRPCError
2020-03-26 13:06:12 +02:00
Svarog
1020402b34 [NOD-869] Close panicHandlerDone instead of sending an empty struct + use time.After instead of time.Tick (#668) 2020-03-25 16:14:08 +02:00
Mike Zak
bc6ce6ed53 Update version to v0.2.0 2020-03-25 11:51:14 +02:00
Ori Newman
d3b1953deb [NOD-848] optimize utxo diffs serialize allocations (#666)
* [NOD-848] Optimize allocations when serializing UTXO diffs

* [NOD-848] Use same UTXO serialization everywhere, and use compression as well

* [NOD-848] Fix usage of wrong buffer

* [NOD-848] Fix tests

* [NOD-848] Fix wire tests

* [NOD-848] Fix tests

* [NOD-848] Remove VLQ

* [NOD-848] Fix comments

* [NOD-848] Add varint for big endian encoding

* [NOD-848] In TestVarIntWire, assume the expected decoded value is the same as the serialization input

* [NOD-848] Serialize outpoint index with big endian varint

* [NOD-848] Remove p2pk from compression support

* [NOD-848] Fix comments

* [NOD-848] Remove p2pk from decompression support

* [NOD-848] Make entry compression optional

* [NOD-848] Fix tests

* [NOD-848] Fix comments and var names

* [NOD-848] Remove UTXO compression

* [NOD-848] Fix tests

* [NOD-848] Remove big endian varint

* [NOD-848] Fix comments

* [NOD-848] Rename ReadVarIntLittleEndian->ReadVarInt and fix WriteVarInt comment

* [NOD-848] Add outpointIndexByteOrder variable

* [NOD-848] Remove redundant comment

* [NOD-848] Fix outpointMaxSerializeSize to the correct value

* [NOD-848] Move subBuffer to utils
2020-03-24 16:44:41 +02:00
Svarog
3c67215e76 [NOD-796] Upgrade to go 1.14 (#665) 2020-03-22 14:50:13 +02:00
Svarog
586624c836 [NOD-853] Add profiler server to kaspaminer (#664) 2020-03-19 17:19:31 +02:00
Svarog
49855e6333 [NOD-823] Use WithDiffInPlace for the implementation of WithDiff (#657)
* [NOD-823] Use WithDiffInPlace for the implementation of WithDiff

* [NOD-823] Unexport withDiffInPlace
2020-03-17 11:19:02 +02:00
Ori Newman
624249c0f3 [NOD-842] Use flushToDB with the same transaction as everything else in saveChangesFromBlock and never ignore flushToDB errors (#662) 2020-03-16 11:05:17 +02:00
Ori Newman
1cf443a63b [NOD-841] Fix tests to not be dependent on block rate (#661)
* [NOD-841] Fix TestDifficulty

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Fix TestCheckBlockSanity

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Shorten long lines
2020-03-15 18:08:03 +02:00
Ori Newman
8909679f44 [NOD-818] Remove time adjustment (#658)
* [NOD-818] Remove time adjustment

* [NOD-818] Remove interface ensuring and copyright message

* [NOD-818] Update comment
2020-03-15 17:37:01 +02:00
Ori Newman
e58efbf0ea [NOD-839] Panic from non-rule error from ProcessBlock (#660) 2020-03-15 17:26:53 +02:00
Ori Newman
34fb066590 [NOD-518] Implement getmempoolentry (#656) 2020-03-12 16:00:18 +02:00
stasatdaglabs
299826f392 [NOD-827] Get rid of dbtools insecureimport.go and loadheaders.go (#655)
* [NOD-827] Get rid of dbtools insecureimport.go and loadheaders.go

* [NOD-827] Remove commands from realMain().
2020-03-10 16:31:13 +02:00
stasatdaglabs
3d8dd8724d [NOD-816] Remove TxIndex and AddrIndex (#653)
* [NOD-816] Remove TxIndex.

* [NOD-816] Remove AddrIndex.

* [NOD-816] Remove mentions of TxIndex and AddrIndex.

* [NOD-816] Remove mentions of getrawtransaction.

* [NOD-816] Remove mentions of searchrawtransaction.

* [NOD-816] Remove cmd/addsubnetwork.

* [NOD-816] Fix a comment.

* [NOD-816] Fix a comment.

* [NOD-816] Implement BlockDAG.TxConfirmations.

* [NOD-816] Return confirmations in getTxOut.

* [NOD-816] Rename TxConfirmations to UTXOConfirmations.

* [NOD-816] Rename txConfirmations to utxoConfirmations.

* [NOD-816] Fix capitalization in variable names.

* [NOD-816] Add acceptance index to addblock.

* [NOD-816] Get rid of txrawresult-confirmations.

* [NOD-816] Fix config flag.
2020-03-10 16:09:31 +02:00
Svarog
b8a00f7519 [NOD-778] Optimize RestoreUTXO (#652)
* [NOD-778] Add WithDiffInPlace

* [NOD-778] Fix bug in WithDiffInPlace

* [NOD-778] Add comment to WithDiffInPlace

* [NOD-778] Add double dag.restoreUTXO to benchmark, to remove time for hard-disk loading

* [NOD-778] Also test WithDiffInPlace in TestUTXODiffRules

* [NOD-778] Add tests for all cases possible in TestUTXODiffRules

* [NOD-778] Fix test-case 'first in toAdd in this, second in toRemove in this and toAdd in other'

* [NOD-778] Fixed in WithDiffInPlace

* [NOD-778] Update error messages when diffFrom(withDiffResult) fails in TestUTXODiffRules

* [NOD-778] diffFrom: disallow utxos both in d.toAdd, other.toAdd, and only one of d.toRemove and other.toRemove

* [NOD-778] Fix expected value in 'first in toRemove in this, second in toRemove in other'

* [NOD-778] diffFrom: Disallow situations where utxo both in d.toRemove and other.toRemove with different blue scores and no corresponding utxo in d.toAdd

* [NOD-778] WithDiff: Fix faulty logic that allows updates to blue scores

* [NOD-778] Fix WithDiffInPlace to pass all tests

* [NOD-778] Deleted temporary prints

* [NOD-778] Sorted TestUTXODiffRules tests according to spreadsheet

* [NOD-778] Delete deeputxo_test.go

* [NOD-778] Updated comments

* [NOD-778] Re-order

* [NOD-778] Re-order test-cases to be according to spreadsheet

* [NOD-778] Simplified case when both d.toRemove and other.toRemove have the same outpoint in diffFrom

* [NOD-778] Change a few error messages that say 'transaction' instead of 'outpoint'

* [NOD-778] Rename: utxoToAdd/Remove -> entryToAdd/Remove

* [NOD-788] Remove redundant else

* [NOD-778] Rename: existingUTXO -> existingEntry + remove redundant else

* [NOD-778] Correct test name
2020-03-10 15:32:19 +02:00
stasatdaglabs
4dfc8cf5b0 [NOD-816] Remove addsubnetwork. (#654) 2020-03-10 11:09:33 +02:00
Ori Newman
5a99e4d2f3 [NOD-806] Exit early after panic (#650)
* [NOD-806] After panic, gracefully stop logs, and then exit immediately

* [NOD-806] Convert non-kaspad applications to use the new spawn

* [NOD-806] Fix disabled log at rpcclient

* [NOD-806] Refactor HandlePanic

* [NOD-806] Cancel Logger interface

* [NOD-806] Remove redundant spawn checks from waitgroup_test.go

* [NOD-806] Use caller subsystem when logging panics

* [NOD-806] Fix go vet errors
2020-03-08 11:24:37 +02:00
Svarog
606cd668ff [NOD-810] Fix error text in lookupParentNodes (#651) 2020-03-05 15:49:36 +02:00
Ori Newman
dd537f5143 [NOD-808] Use syndtr/goleveldb instead of btcsuite/goleveldb. (#649) 2020-03-05 12:26:48 +02:00
stasatdaglabs
a1c631be62 [NOD-798] Disconnect from a peer if a block received from it gets rejected (#648)
* [NOD-798] Disconnect from a peer if its block gets rejected.

* [NOD-798] Make a comment less ambiguous.
2020-03-03 09:47:22 +02:00
Ori Newman
707a728656 [NOD-552] Add NormalizeRPCServerAddress and use it where needed (#643)
* [NOD-552] Add NormalizeRPCServerAddress and use it where needed

* [NOD-552] Make NormalizeAddress return an error for an invalid address

* [NOD-552] Use longer lines for a comment
2020-03-01 16:37:26 +02:00
stasatdaglabs
80b5631a48 [NOD-726] Only print "no sync peer" message when not current (#646)
* [NOD-726] Only print "no sync peer" message when not current.

* [NOD-726] Shorten duration in which "no sync peer" messages would not print.
2020-02-27 17:38:39 +02:00
Ori Newman
2373965551 [NOD-576] Rename NextHashes to ChildHashes in GetBlock/GetBlockHeaders rpc call (#645)
* [NOD-576] Rename NextHashes to ChildHashes in GetBlock/GetBlockHeaders rpc call

* [NOD-576] Fix typo
2020-02-27 17:34:38 +02:00
Ori Newman
65cbb6655b [NOD-661] Change BCDB subsystem tag (for logs) to KSDB (#644) 2020-02-27 17:30:08 +02:00
Ori Newman
cdd96d0670 [NOD-664] Remove version from everything inside kaspad/cmd - use kaspad version instead (#642)
* [NOD-664] Remove version from everything inside kaspad/cmd - use kaspad version instead

* [NOD-664] Fix broken import
2020-02-27 13:26:22 +02:00
Dan Aharoni
ad04bbde83 [NOD-782] Make sure errors.As gets parameter that implements error interface (#641)
* [NOD-782] Make sure errors.As gets parameter that implements error interface.

* [NOD-782] Pass pointer to errors.As
2020-02-27 12:27:38 +02:00
Ori Newman
5374d95416 [NOD-656] Log hashrate in kaspaminer (#632)
* [NOD-656] Log hashrate in kaspaminer

* [NOD-656] Measure hash rate in kilohashes

* [NOD-656] Show hash rate once in 10 seconds

* [NOD-656] Put hash rate logic in a separate function

* [NOD-656] Create logHashRateInterval constant
2020-02-24 11:59:02 +02:00
Ori Newman
de9aa39cc5 [NOD-721] Add defers (#638)
* [NOD-721] Defer unlocks

* [NOD-721] Add functions with locks to rpcmodel

* [NOD-721] Defer unlocks

* [NOD-721] Add filterDataWithLock function

* [NOD-721] Defer unlocks

* [NOD-721] Defer .Close()

* [NOD-721] Fix access to wsc.filterData without a lock

* [NOD-721] De-anonymize some anonymous functions

* [NOD-721] Remove redundant assignments

* [NOD-721] Remove redundant assignments

* [NOD-721] Remove redundant assignments

* [NOD-721] Get rid of submitOld, and break handleGetBlockTemplateLongPoll to smaller functions

* [NOD-721] Rename existsUnspentOutpoint->existsUnspentOutpointNoLock, existsUnspentOutpointWithLock->existsUnspentOutpoint

* [NOD-721] Rename filterDataWithLock->FilterData

* [NOD-721] Fixed comments
2020-02-24 09:19:44 +02:00
Ori Newman
98987f4a8f [NOD-603] Update validateParents to use reachability (#640)
* [NOD-603] Update validateParents to use reachability

* [NOD-603] Break a long line

* [NOD-721] Remove redundant check if block parent is a tip
2020-02-24 08:59:12 +02:00
Ori Newman
9745f31b69 [NOD-693] Update link to license (#639) 2020-02-20 17:12:53 +02:00
Ori Newman
ee08531a52 [NOD-610] Rename newSet->newBlockSet and setFromSlice->blockSetFromSlice (#635) 2020-02-20 16:19:28 +02:00
stasatdaglabs
61baf7b260 [NOD-769] Add a log for when a reachability reindex occurs (#637)
*  [NOD-719] Added defers to unlocks  (#618)

* [NOD-719] Added defers to unlocks

* [NOD-719] Added another defer to another Unlock

* [NOD-719] Added yet another defer to yet another Unlock

* [NOD-747] Change FinalityInterval to be 24 hours, isCurrent to be true if the DAG's time is less than 12 hours than the present, and change MaxInvPerMsg to be 1 << 17 (#625)

* [NOD-769] Add a log for when a reachability reindex occurs.

Co-authored-by: Svarog <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-02-19 13:39:45 +02:00
Ori Newman
650e4f735e [NOD-757] Readd addrmanager tests (#628) 2020-02-18 18:12:19 +02:00
Dan Aharoni
550b12b041 [NOD-772] Fix a bug where we ignore the return value of forAllOutboundPeers. (#636) 2020-02-18 18:02:15 +02:00
Ori Newman
a4bb070722 [NOD-754] Fix staticcheck errors (#627)
* [NOD-754] Fix staticcheck errors

* [NOD-754] Remove some unused exported functions

* [NOD-754] Fix staticcheck errors

* [NOD-754] Don't panic if out/in close fails

* [NOD-754] Wrap outside errors with custom message
2020-02-18 16:56:38 +02:00
Ori Newman
30fe0c279b [NOD-738] Move rpcmodel helper functions to pointers package (#629)
* [NOD-738] Move rpcmodel helper functions to copytopointer package

* [NOD-738] Rename copytopointer->pointers
2020-02-18 14:06:34 +02:00
stasatdaglabs
e405dd5981 [NOD-694] Fix requesting blocks that will surely be orphaned during netsync. (#630) 2020-02-18 12:12:34 +02:00
stasatdaglabs
243b4b8021 [NOD-765] Fix database corruption after restart in reachabilitystore and utxodiffstore. (#634) 2020-02-18 12:04:50 +02:00
Ori Newman
dd4c93e1ef [NOD-759] Merge v0.1.1-dev into v0.1.2-dev (#633)
*  [NOD-719] Added defers to unlocks  (#618)

* [NOD-719] Added defers to unlocks

* [NOD-719] Added another defer to another Unlock

* [NOD-719] Added yet another defer to yet another Unlock

* [NOD-747] Change FinalityInterval to be 24 hours, isCurrent to be true if the DAG's time is less than 12 hours than the present, and change MaxInvPerMsg to be 1 << 17 (#625)
2020-02-18 11:02:25 +02:00
Ori Newman
a07335d74d [NOD-737] Remove btc prefix from util file names (#631) 2020-02-17 13:11:24 +02:00
Dan Aharoni
7567cd4cb9 [NOD-744] Wrap go routines with spawn (#626)
* [NOD-744] Wrap go routines with spawn

* [NOD-747] Wrap some more go routines with spawn

* [NOD-744] Some more missing go routines

* [NOD-744] Break lines so make code more readable

* [NOD-744] Declare a local scope variable so the func would use it.

* [NOD-744] Fix type and update comment.

* [NOD-744] Declare local var so go routine would use it

* [NOD-744] Rename variable, use normal assignment;

* [NOD-744] Rename variable.
2020-02-13 13:10:07 +02:00
stasatdaglabs
51ff9e2562 [NOD-571] Cover ghostdag in tests where possible (#613)
* [NOD-571] Cover reachabilityInterval split methods.

* [NOD-571] Cover reindexInterval.

* [NOD-571] Cover reachability String() methods.

* [NOD-571] Cover blueAnticoneSize.

* [NOD-571] Remove unnecessary error from setTreeNode.

* [NOD-571] Add TestGHOSTDAGErrors.

* [NOD-571] Use PrepareBlockForTest in TestBlueAnticoneSizeErrors.

* [NOD-571] Use PrepareBlockForTest in TestGHOSTDAGErrors.

* [NOD-571] Add substring checks to TestSplitFractionErrors.

* [NOD-571] Add substring checks to TestSplitExactErrors and TestSplitWithExponentialBiasErrors.

* [NOD-571] Add comments to TestReindexIntervalErrors.

* [NOD-571] Add additional info in some error messages.

* [NOD-571] Fix error messages.
2020-02-09 11:27:10 +02:00
stasatdaglabs
5b8ab63890 [NOD-717] Fix nodes getting stuck in an infinite loop in addrManager.getAddress (#624)
* [NOD-717] Fix nodes getting stuck in an infinite loop in addrManager.getAddress.

* [NOD-717] Rename ResetFailedAttempts -> NotifyConnectionRequestComplete.
2020-02-06 18:17:10 +02:00
Dan Aharoni
3dd7dc4496 [NOD-727] Do not allow delayed blocks from RPC. (#623)
* [NOD-727] Do not allow delayed blocks from RPC.

* [NOD-727] Refactor sentFromRPC -> DisallowDelay

* [NOD-727] Clarify comment; Clarify error message.

* [NOD-727] Change error message.
2020-02-05 11:14:26 +02:00
Ori Newman
d90a08ecfa [NOD-722] Fix processBlockMsg case in blockHandler to send only one response to msg.reply, and rename blockHandler->messageHandler (#622) 2020-02-04 18:10:15 +02:00
Ori Newman
45dc1a3e7b [NOD-545] Remove headers first related logic (#621)
* [NOD-545] Remove headers first related logic

* [NOD-545] Fix tests

* [NOD-545] Change getTopHeadersMaxHeaders to be equal to getHeadersMaxHeaders
2020-02-04 14:54:42 +02:00
Ori Newman
4ffb5daa37 [NOD-622] Fix populateTemplateFromCandidates to sort txsForBlockTemplate.txMasses and txsForBlockTemplate.txFees (#617)
* [NOD-622] Fix populateTemplateFromCandidates to sort txsForBlockTemplate.txMasses and txsForBlockTemplate.txFees

* [NOD-622] Sort transactions in PrepareBlockForTest

* [NOD-622] Remove duplicate append of selected transactions
2020-02-03 13:42:40 +02:00
Ori Newman
b9138b720d [NOD-597] Make BlockIndex clear its dirty entries only after it successfully written them to disk (#620) 2020-02-03 13:39:25 +02:00
Ori Newman
d8954f1339 [NOD-615] Make bluesAnticoneSizes a map with *blockNode as a key (#619) 2020-02-03 12:40:39 +02:00
Ori Newman
eb953286ec [NOD-641] Upgrade to github.com/pkg/errors v0.9.1 and use errors.As where needed (#614)
* [NOD-641] Upgrade to github.com/pkg/errors v0.9.1 and use errors.As where needed

* [NOD-641] Fix find and replace error

* [NOD-641] Use errors.As for error type checking

* [NOD-641] Fix errors.As for pointer types

* [NOD-641] Use errors.As where needed

* [NOD-641] Rename rErr->ruleErr

* [NOD-641] Rename derr->dbErr

* [NOD-641] e->flagsErr where necessary

* [NOD-641] change jerr to more appropriate name

* [NOD-641] Rename cerr->bdRuleErr

* [NOD-641] Rename serr->scriptErr

* [NOD-641] Use errors.Is instead of testutil.AreErrorsEqual in TestNewHashFromStr

* [NOD-641] Rename bdRuleErr->dagRuleErr

* [NOD-641] Rename mErr->msgErr

* [NOD-641] Rename dErr->deserializeErr
2020-02-03 12:38:33 +02:00
Ori Newman
41c8178ad3 [NOD-648] Add TestProcessDelayedBlocks (#612)
* [NOD-648] Add TestProcessDelayedBlocks

* [NOD-648] Add one second to secondsUntilDelayedBlockIsValid to make sure the delayedBlock timestamp will be valid, and add comments

* [NOD-648] Remove redundant import

* [NOD-648] Use fakeTimeSource instead of time.Sleep

* [NOD-648] Rename dag.HaveBlock->dag.IsKnownBlock,  dag.BlockExists->dag.IsInDAG

* [NOD-648] Add comment

* [NOD-641] Rename HaveBlock->IsKnownBlock, BlockExists->IsInDAG
2020-02-03 11:30:03 +02:00
Ori Newman
aa74b51e6f [NOD-687] Remove -gcflags='-l' from all tests (#616) 2020-02-02 15:26:26 +02:00
Mike Zak
f7800eb5c4 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-02-02 15:17:25 +02:00
Ori Newman
44c55900f8 [NOD-715] Replace testDbRoot with os.TempDir() (#611) 2020-01-30 12:54:15 +02:00
Ori Newman
4c0ea78026 [NOD-586] Remove subTreeSize from reachabilityTreeNode (#610)
* [NOD-586] Remove subTreeSize from reachabilityTreeNode

* [NOD-586] Convert else { if { ... } } to else if { ... }
2020-01-30 10:39:53 +02:00
stasatdaglabs
03a93fe51e [NOD-647] Create a default config file even if the sample default config file is missing (#609)
* [NOD-647] Create a default config file even if the sample default config file is missing.

* [NOD-647] Unfancify WriteString().
2020-01-29 17:40:59 +02:00
Mike Zak
eca0514465 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 14:39:22 +02:00
Mike Zak
5daab45947 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 12:25:52 +02:00
Mike Zak
25bdaeed31 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 11:35:21 +02:00
Mike Zak
a3dc2f7da7 Update version to v0.1.2 2020-01-28 10:53:10 +02:00
284 changed files with 4771 additions and 9939 deletions

View File

@@ -4,7 +4,7 @@ Kaspad
Warning: This is pre-alpha software. There's no guarantee anything works.
====
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad)
Kaspad is the reference full node Kaspa implementation written in Go (golang).
@@ -75,5 +75,5 @@ The documentation is a work-in-progress. It is located in the [docs](https://git
## License
Kaspad is licensed under the [copyfree](http://copyfree.org) ISC License.
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).

View File

@@ -717,11 +717,7 @@ func (a *AddrManager) Start() {
// Start the address ticker to save addresses periodically.
a.wg.Add(1)
spawn(a.addressHandler, a.handlePanic)
}
func (a *AddrManager) handlePanic() {
atomic.AddInt32(&a.shutdown, 1)
spawn(a.addressHandler)
}
// Stop gracefully shuts down the address manager by stopping the main handler.

View File

@@ -5,8 +5,14 @@
package addrmgr
import (
"fmt"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/subnetworkid"
"net"
"reflect"
"testing"
"time"
"github.com/pkg/errors"
@@ -108,6 +114,551 @@ func TestStartStop(t *testing.T) {
}
}
func TestAddAddressByIP(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
fmtErr := errors.Errorf("")
addrErr := &net.AddrError{}
var tests = []struct {
addrIP string
err error
}{
{
someIP + ":16111",
nil,
},
{
someIP,
addrErr,
},
{
someIP[:12] + ":8333",
fmtErr,
},
{
someIP + ":abcd",
fmtErr,
},
}
amgr := New("testaddressbyip", nil, nil)
for i, test := range tests {
err := amgr.AddAddressByIP(test.addrIP, nil)
if test.err != nil && err == nil {
t.Errorf("TestAddAddressByIP test %d failed expected an error and got none", i)
continue
}
if test.err == nil && err != nil {
t.Errorf("TestAddAddressByIP test %d failed expected no error and got one", i)
continue
}
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("TestAddAddressByIP test %d failed got %v, want %v", i,
reflect.TypeOf(err), reflect.TypeOf(test.err))
continue
}
}
}
func TestAddLocalAddress(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
var tests = []struct {
address wire.NetAddress
priority AddressPriority
valid bool
}{
{
wire.NetAddress{IP: net.ParseIP("192.168.0.100")},
InterfacePrio,
false,
},
{
wire.NetAddress{IP: net.ParseIP("204.124.1.1")},
InterfacePrio,
true,
},
{
wire.NetAddress{IP: net.ParseIP("204.124.1.1")},
BoundPrio,
true,
},
{
wire.NetAddress{IP: net.ParseIP("::1")},
InterfacePrio,
false,
},
{
wire.NetAddress{IP: net.ParseIP("fe80::1")},
InterfacePrio,
false,
},
{
wire.NetAddress{IP: net.ParseIP("2620:100::1")},
InterfacePrio,
true,
},
}
amgr := New("testaddlocaladdress", nil, nil)
for x, test := range tests {
result := amgr.AddLocalAddress(&test.address, test.priority)
if result == nil && !test.valid {
t.Errorf("TestAddLocalAddress test #%d failed: %s should have "+
"been accepted", x, test.address.IP)
continue
}
if result != nil && test.valid {
t.Errorf("TestAddLocalAddress test #%d failed: %s should not have "+
"been accepted", x, test.address.IP)
continue
}
}
}
func TestAttempt(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("testattempt", lookupFunc, nil)
// Add a new address and get it
err := n.AddAddressByIP(someIP+":8333", nil)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
ka := n.GetAddress()
if !ka.LastAttempt().IsZero() {
t.Errorf("Address should not have attempts, but does")
}
na := ka.NetAddress()
n.Attempt(na)
if ka.LastAttempt().IsZero() {
t.Errorf("Address should have an attempt, but does not")
}
}
func TestConnected(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("testconnected", lookupFunc, nil)
// Add a new address and get it
err := n.AddAddressByIP(someIP+":8333", nil)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
ka := n.GetAddress()
na := ka.NetAddress()
// make it an hour ago
na.Timestamp = time.Unix(time.Now().Add(time.Hour*-1).Unix(), 0)
n.Connected(na)
if !ka.NetAddress().Timestamp.After(na.Timestamp) {
t.Errorf("Address should have a new timestamp, but does not")
}
}
func TestNeedMoreAddresses(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("testneedmoreaddresses", lookupFunc, nil)
addrsToAdd := 1500
b := n.NeedMoreAddresses()
if !b {
t.Errorf("Expected that we need more addresses")
}
addrs := make([]*wire.NetAddress, addrsToAdd)
var err error
for i := 0; i < addrsToAdd; i++ {
s := fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60)
addrs[i], err = n.DeserializeNetAddress(s)
if err != nil {
t.Errorf("Failed to turn %s into an address: %v", s, err)
}
}
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
n.AddAddresses(addrs, srcAddr, nil)
numAddrs := n.TotalNumAddresses()
if numAddrs > addrsToAdd {
t.Errorf("Number of addresses is too many %d vs %d", numAddrs, addrsToAdd)
}
b = n.NeedMoreAddresses()
if b {
t.Errorf("Expected that we don't need more addresses")
}
}
func TestGood(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("testgood", lookupFunc, nil)
addrsToAdd := 64 * 64
addrs := make([]*wire.NetAddress, addrsToAdd)
subnetworkCount := 32
subnetworkIDs := make([]*subnetworkid.SubnetworkID, subnetworkCount)
var err error
for i := 0; i < addrsToAdd; i++ {
s := fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60)
addrs[i], err = n.DeserializeNetAddress(s)
if err != nil {
t.Errorf("Failed to turn %s into an address: %v", s, err)
}
}
for i := 0; i < subnetworkCount; i++ {
subnetworkIDs[i] = &subnetworkid.SubnetworkID{0xff - byte(i)}
}
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
n.AddAddresses(addrs, srcAddr, nil)
for i, addr := range addrs {
n.Good(addr, subnetworkIDs[i%subnetworkCount])
}
numAddrs := n.TotalNumAddresses()
if numAddrs >= addrsToAdd {
t.Errorf("Number of addresses is too many: %d vs %d", numAddrs, addrsToAdd)
}
numCache := len(n.AddressCache(true, nil))
if numCache == 0 || numCache >= numAddrs/4 {
t.Errorf("Number of addresses in cache: got %d, want positive and less than %d",
numCache, numAddrs/4)
}
for i := 0; i < subnetworkCount; i++ {
numCache = len(n.AddressCache(false, subnetworkIDs[i]))
if numCache == 0 || numCache >= numAddrs/subnetworkCount {
t.Errorf("Number of addresses in subnetwork cache: got %d, want positive and less than %d",
numCache, numAddrs/4/subnetworkCount)
}
}
}
func TestGoodChangeSubnetworkID(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("test_good_change_subnetwork_id", lookupFunc, nil)
addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
addrKey := NetAddressKey(addr)
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
oldSubnetwork := subnetworkid.SubnetworkIDNative
n.AddAddress(addr, srcAddr, oldSubnetwork)
n.Good(addr, oldSubnetwork)
// make sure address was saved to addrIndex under oldSubnetwork
ka := n.find(addr)
if ka == nil {
t.Fatalf("Address was not found after first time .Good called")
}
if !ka.SubnetworkID().IsEqual(oldSubnetwork) {
t.Fatalf("Address index did not point to oldSubnetwork")
}
// make sure address was added to correct bucket under oldSubnetwork
bucket := n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)]
wasFound := false
for e := bucket.Front(); e != nil; e = e.Next() {
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
wasFound = true
}
}
if !wasFound {
t.Fatalf("Address was not found in the correct bucket in oldSubnetwork")
}
// now call .Good again with a different subnetwork
newSubnetwork := subnetworkid.SubnetworkIDRegistry
n.Good(addr, newSubnetwork)
// make sure address was updated in addrIndex under newSubnetwork
ka = n.find(addr)
if ka == nil {
t.Fatalf("Address was not found after second time .Good called")
}
if !ka.SubnetworkID().IsEqual(newSubnetwork) {
t.Fatalf("Address index did not point to newSubnetwork")
}
// make sure address was removed from bucket under oldSubnetwork
bucket = n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)]
wasFound = false
for e := bucket.Front(); e != nil; e = e.Next() {
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
wasFound = true
}
}
if wasFound {
t.Fatalf("Address was not removed from bucket in oldSubnetwork")
}
// make sure address was added to correct bucket under newSubnetwork
bucket = n.addrTried[*newSubnetwork][n.getTriedBucket(addr)]
wasFound = false
for e := bucket.Front(); e != nil; e = e.Next() {
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
wasFound = true
}
}
if !wasFound {
t.Fatalf("Address was not found in the correct bucket in newSubnetwork")
}
}
func TestGetAddress(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
localSubnetworkID := &subnetworkid.SubnetworkID{0xff}
n := New("testgetaddress", lookupFunc, localSubnetworkID)
// Get an address from an empty set (should error)
if rv := n.GetAddress(); rv != nil {
t.Errorf("GetAddress failed: got: %v want: %v\n", rv, nil)
}
// Add a new address and get it
err := n.AddAddressByIP(someIP+":8332", localSubnetworkID)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
ka := n.GetAddress()
if ka == nil {
t.Fatalf("Did not get an address where there is one in the pool")
}
n.Attempt(ka.NetAddress())
// Checks that we don't get it if we find that it has other subnetwork ID than expected.
actualSubnetworkID := &subnetworkid.SubnetworkID{0xfe}
n.Good(ka.NetAddress(), actualSubnetworkID)
ka = n.GetAddress()
if ka != nil {
t.Errorf("Didn't expect to get an address because there shouldn't be any address from subnetwork ID %s or nil", localSubnetworkID)
}
// Checks that the total number of addresses incremented although the new address is not full node or a partial node of the same subnetwork as the local node.
numAddrs := n.TotalNumAddresses()
if numAddrs != 1 {
t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1)
}
// Now we repeat the same process, but now the address has the expected subnetwork ID.
// Add a new address and get it
err = n.AddAddressByIP(someIP+":8333", localSubnetworkID)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
ka = n.GetAddress()
if ka == nil {
t.Fatalf("Did not get an address where there is one in the pool")
}
if ka.NetAddress().IP.String() != someIP {
t.Errorf("Wrong IP: got %v, want %v", ka.NetAddress().IP.String(), someIP)
}
if !ka.SubnetworkID().IsEqual(localSubnetworkID) {
t.Errorf("Wrong Subnetwork ID: got %v, want %v", *ka.SubnetworkID(), localSubnetworkID)
}
n.Attempt(ka.NetAddress())
// Mark this as a good address and get it
n.Good(ka.NetAddress(), localSubnetworkID)
ka = n.GetAddress()
if ka == nil {
t.Fatalf("Did not get an address where there is one in the pool")
}
if ka.NetAddress().IP.String() != someIP {
t.Errorf("Wrong IP: got %v, want %v", ka.NetAddress().IP.String(), someIP)
}
if *ka.SubnetworkID() != *localSubnetworkID {
t.Errorf("Wrong Subnetwork ID: got %v, want %v", ka.SubnetworkID(), localSubnetworkID)
}
numAddrs = n.TotalNumAddresses()
if numAddrs != 2 {
t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1)
}
}
func TestGetBestLocalAddress(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
localAddrs := []wire.NetAddress{
{IP: net.ParseIP("192.168.0.100")},
{IP: net.ParseIP("::1")},
{IP: net.ParseIP("fe80::1")},
{IP: net.ParseIP("2001:470::1")},
}
var tests = []struct {
remoteAddr wire.NetAddress
want0 wire.NetAddress
want1 wire.NetAddress
want2 wire.NetAddress
want3 wire.NetAddress
}{
{
// Remote connection from public IPv4
wire.NetAddress{IP: net.ParseIP("204.124.8.1")},
wire.NetAddress{IP: net.IPv4zero},
wire.NetAddress{IP: net.IPv4zero},
wire.NetAddress{IP: net.ParseIP("204.124.8.100")},
wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")},
},
{
// Remote connection from private IPv4
wire.NetAddress{IP: net.ParseIP("172.16.0.254")},
wire.NetAddress{IP: net.IPv4zero},
wire.NetAddress{IP: net.IPv4zero},
wire.NetAddress{IP: net.IPv4zero},
wire.NetAddress{IP: net.IPv4zero},
},
{
// Remote connection from public IPv6
wire.NetAddress{IP: net.ParseIP("2602:100:abcd::102")},
wire.NetAddress{IP: net.IPv6zero},
wire.NetAddress{IP: net.ParseIP("2001:470::1")},
wire.NetAddress{IP: net.ParseIP("2001:470::1")},
wire.NetAddress{IP: net.ParseIP("2001:470::1")},
},
/* XXX
{
// Remote connection from Tor
wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43::100")},
wire.NetAddress{IP: net.IPv4zero},
wire.NetAddress{IP: net.ParseIP("204.124.8.100")},
wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")},
},
*/
}
amgr := New("testgetbestlocaladdress", nil, nil)
// Test against default when there's no address
for x, test := range tests {
got := amgr.GetBestLocalAddress(&test.remoteAddr)
if !test.want0.IP.Equal(got.IP) {
t.Errorf("TestGetBestLocalAddress test1 #%d failed for remote address %s: want %s got %s",
x, test.remoteAddr.IP, test.want1.IP, got.IP)
continue
}
}
for _, localAddr := range localAddrs {
amgr.AddLocalAddress(&localAddr, InterfacePrio)
}
// Test against want1
for x, test := range tests {
got := amgr.GetBestLocalAddress(&test.remoteAddr)
if !test.want1.IP.Equal(got.IP) {
t.Errorf("TestGetBestLocalAddress test1 #%d failed for remote address %s: want %s got %s",
x, test.remoteAddr.IP, test.want1.IP, got.IP)
continue
}
}
// Add a public IP to the list of local addresses.
localAddr := wire.NetAddress{IP: net.ParseIP("204.124.8.100")}
amgr.AddLocalAddress(&localAddr, InterfacePrio)
// Test against want2
for x, test := range tests {
got := amgr.GetBestLocalAddress(&test.remoteAddr)
if !test.want2.IP.Equal(got.IP) {
t.Errorf("TestGetBestLocalAddress test2 #%d failed for remote address %s: want %s got %s",
x, test.remoteAddr.IP, test.want2.IP, got.IP)
continue
}
}
/*
// Add a Tor generated IP address
localAddr = wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")}
amgr.AddLocalAddress(&localAddr, ManualPrio)
// Test against want3
for x, test := range tests {
got := amgr.GetBestLocalAddress(&test.remoteAddr)
if !test.want3.IP.Equal(got.IP) {
t.Errorf("TestGetBestLocalAddress test3 #%d failed for remote address %s: want %s got %s",
x, test.remoteAddr.IP, test.want3.IP, got.IP)
continue
}
}
*/
}
func TestNetAddressKey(t *testing.T) {
addNaTests()

View File

@@ -10,4 +10,4 @@ import (
)
var log, _ = logger.Get(logger.SubsystemTags.ADXR)
var spawn = panics.GoroutineWrapperFuncWithPanicHandler(log)
var spawn = panics.GoroutineWrapperFunc(log)

222
addrmgr/network_test.go Normal file
View File

@@ -0,0 +1,222 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addrmgr_test
import (
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"net"
"testing"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/wire"
)
// TestIPTypes ensures the various functions which determine the type of an IP
// address based on RFCs work as intended.
func TestIPTypes(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
type ipTest struct {
in wire.NetAddress
rfc1918 bool
rfc2544 bool
rfc3849 bool
rfc3927 bool
rfc3964 bool
rfc4193 bool
rfc4380 bool
rfc4843 bool
rfc4862 bool
rfc5737 bool
rfc6052 bool
rfc6145 bool
rfc6598 bool
local bool
valid bool
routable bool
}
newIPTest := func(ip string, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964,
rfc4193, rfc4380, rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598,
local, valid, routable bool) ipTest {
nip := net.ParseIP(ip)
na := *wire.NewNetAddressIPPort(nip, 16111, wire.SFNodeNetwork)
test := ipTest{na, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964, rfc4193, rfc4380,
rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598, local, valid, routable}
return test
}
tests := []ipTest{
newIPTest("10.255.255.255", true, false, false, false, false, false,
false, false, false, false, false, false, false, false, true, false),
newIPTest("192.168.0.1", true, false, false, false, false, false,
false, false, false, false, false, false, false, false, true, false),
newIPTest("172.31.255.1", true, false, false, false, false, false,
false, false, false, false, false, false, false, false, true, false),
newIPTest("172.32.1.1", false, false, false, false, false, false, false, false,
false, false, false, false, false, false, true, true),
newIPTest("169.254.250.120", false, false, false, true, false, false,
false, false, false, false, false, false, false, false, true, false),
newIPTest("0.0.0.0", false, false, false, false, false, false, false,
false, false, false, false, false, false, true, false, false),
newIPTest("255.255.255.255", false, false, false, false, false, false,
false, false, false, false, false, false, false, false, false, false),
newIPTest("127.0.0.1", false, false, false, false, false, false,
false, false, false, false, false, false, false, true, true, false),
newIPTest("fd00:dead::1", false, false, false, false, false, true,
false, false, false, false, false, false, false, false, true, false),
newIPTest("2001::1", false, false, false, false, false, false,
true, false, false, false, false, false, false, false, true, true),
newIPTest("2001:10:abcd::1:1", false, false, false, false, false, false,
false, true, false, false, false, false, false, false, true, false),
newIPTest("fe80::1", false, false, false, false, false, false,
false, false, true, false, false, false, false, false, true, false),
newIPTest("fe80:1::1", false, false, false, false, false, false,
false, false, false, false, false, false, false, false, true, true),
newIPTest("64:ff9b::1", false, false, false, false, false, false,
false, false, false, false, true, false, false, false, true, true),
newIPTest("::ffff:abcd:ef12:1", false, false, false, false, false, false,
false, false, false, false, false, false, false, false, true, true),
newIPTest("::1", false, false, false, false, false, false, false, false,
false, false, false, false, false, true, true, false),
newIPTest("198.18.0.1", false, true, false, false, false, false, false,
false, false, false, false, false, false, false, true, false),
newIPTest("100.127.255.1", false, false, false, false, false, false, false,
false, false, false, false, false, true, false, true, false),
newIPTest("203.0.113.1", false, false, false, false, false, false, false,
false, false, false, false, false, false, false, true, false),
}
t.Logf("Running %d tests", len(tests))
for _, test := range tests {
if rv := addrmgr.IsRFC1918(&test.in); rv != test.rfc1918 {
t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc1918)
}
if rv := addrmgr.IsRFC3849(&test.in); rv != test.rfc3849 {
t.Errorf("IsRFC3849 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3849)
}
if rv := addrmgr.IsRFC3927(&test.in); rv != test.rfc3927 {
t.Errorf("IsRFC3927 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3927)
}
if rv := addrmgr.IsRFC3964(&test.in); rv != test.rfc3964 {
t.Errorf("IsRFC3964 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3964)
}
if rv := addrmgr.IsRFC4193(&test.in); rv != test.rfc4193 {
t.Errorf("IsRFC4193 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4193)
}
if rv := addrmgr.IsRFC4380(&test.in); rv != test.rfc4380 {
t.Errorf("IsRFC4380 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4380)
}
if rv := addrmgr.IsRFC4843(&test.in); rv != test.rfc4843 {
t.Errorf("IsRFC4843 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4843)
}
if rv := addrmgr.IsRFC4862(&test.in); rv != test.rfc4862 {
t.Errorf("IsRFC4862 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4862)
}
if rv := addrmgr.IsRFC6052(&test.in); rv != test.rfc6052 {
t.Errorf("isRFC6052 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6052)
}
if rv := addrmgr.IsRFC6145(&test.in); rv != test.rfc6145 {
t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6145)
}
if rv := addrmgr.IsLocal(&test.in); rv != test.local {
t.Errorf("IsLocal %s\n got: %v want: %v", test.in.IP, rv, test.local)
}
if rv := addrmgr.IsValid(&test.in); rv != test.valid {
t.Errorf("IsValid %s\n got: %v want: %v", test.in.IP, rv, test.valid)
}
if rv := addrmgr.IsRoutable(&test.in); rv != test.routable {
t.Errorf("IsRoutable %s\n got: %v want: %v", test.in.IP, rv, test.routable)
}
}
}
// TestGroupKey tests the GroupKey function to ensure it properly groups various
// IP addresses.
func TestGroupKey(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
tests := []struct {
name string
ip string
expected string
}{
// Local addresses.
{name: "ipv4 localhost", ip: "127.0.0.1", expected: "local"},
{name: "ipv6 localhost", ip: "::1", expected: "local"},
{name: "ipv4 zero", ip: "0.0.0.0", expected: "local"},
{name: "ipv4 first octet zero", ip: "0.1.2.3", expected: "local"},
// Unroutable addresses.
{name: "ipv4 invalid bcast", ip: "255.255.255.255", expected: "unroutable"},
{name: "ipv4 rfc1918 10/8", ip: "10.1.2.3", expected: "unroutable"},
{name: "ipv4 rfc1918 172.16/12", ip: "172.16.1.2", expected: "unroutable"},
{name: "ipv4 rfc1918 192.168/16", ip: "192.168.1.2", expected: "unroutable"},
{name: "ipv6 rfc3849 2001:db8::/32", ip: "2001:db8::1234", expected: "unroutable"},
{name: "ipv4 rfc3927 169.254/16", ip: "169.254.1.2", expected: "unroutable"},
{name: "ipv6 rfc4193 fc00::/7", ip: "fc00::1234", expected: "unroutable"},
{name: "ipv6 rfc4843 2001:10::/28", ip: "2001:10::1234", expected: "unroutable"},
{name: "ipv6 rfc4862 fe80::/64", ip: "fe80::1234", expected: "unroutable"},
// IPv4 normal.
{name: "ipv4 normal class a", ip: "12.1.2.3", expected: "12.1.0.0"},
{name: "ipv4 normal class b", ip: "173.1.2.3", expected: "173.1.0.0"},
{name: "ipv4 normal class c", ip: "196.1.2.3", expected: "196.1.0.0"},
// IPv6/IPv4 translations.
{name: "ipv6 rfc3964 with ipv4 encap", ip: "2002:0c01:0203::", expected: "12.1.0.0"},
{name: "ipv6 rfc4380 toredo ipv4", ip: "2001:0:1234::f3fe:fdfc", expected: "12.1.0.0"},
{name: "ipv6 rfc6052 well-known prefix with ipv4", ip: "64:ff9b::0c01:0203", expected: "12.1.0.0"},
{name: "ipv6 rfc6145 translated ipv4", ip: "::ffff:0:0c01:0203", expected: "12.1.0.0"},
// Tor.
{name: "ipv6 tor onioncat", ip: "fd87:d87e:eb43:1234::5678", expected: "unroutable"},
{name: "ipv6 tor onioncat 2", ip: "fd87:d87e:eb43:1245::6789", expected: "unroutable"},
{name: "ipv6 tor onioncat 3", ip: "fd87:d87e:eb43:1345::6789", expected: "unroutable"},
// IPv6 normal.
{name: "ipv6 normal", ip: "2602:100::1", expected: "2602:100::"},
{name: "ipv6 normal 2", ip: "2602:0100::1234", expected: "2602:100::"},
{name: "ipv6 hurricane electric", ip: "2001:470:1f10:a1::2", expected: "2001:470:1000::"},
{name: "ipv6 hurricane electric 2", ip: "2001:0470:1f10:a1::2", expected: "2001:470:1000::"},
}
for i, test := range tests {
nip := net.ParseIP(test.ip)
na := *wire.NewNetAddressIPPort(nip, 8333, wire.SFNodeNetwork)
if key := addrmgr.GroupKey(&na); key != test.expected {
t.Errorf("TestGroupKey #%d (%s): unexpected group key "+
"- got '%s', want '%s'", i, test.name,
key, test.expected)
}
}
}

View File

@@ -1,7 +1,7 @@
blockchain
==========
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/blockchain)
Package blockdag implements Kaspa block handling, organization of the blockDAG,

View File

@@ -8,11 +8,12 @@ import (
"fmt"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
)
func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error {
blockHeader := &block.MsgBlock().Header
newNode, _ := dag.newBlockNode(blockHeader, newSet())
newNode, _ := dag.newBlockNode(blockHeader, newBlockSet())
newNode.status = statusInvalidAncestor
dag.index.AddNode(newNode)
return dag.index.flushToDB()
@@ -30,7 +31,8 @@ func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error
func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error {
parents, err := lookupParentNodes(block, dag)
if err != nil {
if rErr, ok := err.(RuleError); ok && rErr.ErrorCode == ErrInvalidAncestorBlock {
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); ok && ruleErr.ErrorCode == ErrInvalidAncestorBlock {
err := dag.addNodeToIndexWithInvalidAncestor(block)
if err != nil {
return err
@@ -112,14 +114,14 @@ func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error)
header := block.MsgBlock().Header
parentHashes := header.ParentHashes
nodes := newSet()
nodes := newBlockSet()
for _, parentHash := range parentHashes {
node := blockDAG.index.LookupNode(parentHash)
if node == nil {
str := fmt.Sprintf("parent block %s is unknown", parentHashes)
str := fmt.Sprintf("parent block %s is unknown", parentHash)
return nil, ruleError(ErrParentBlockUnknown, str)
} else if blockDAG.index.NodeStatus(node).KnownInvalid() {
str := fmt.Sprintf("parent block %s is known to be invalid", parentHashes)
str := fmt.Sprintf("parent block %s is known to be invalid", parentHash)
return nil, ruleError(ErrInvalidAncestorBlock, str)
}

View File

@@ -1,6 +1,7 @@
package blockdag
import (
"errors"
"path/filepath"
"testing"
@@ -33,8 +34,8 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Expected: %s, got: <nil>", ErrParentBlockUnknown)
}
ruleErr, ok := err.(RuleError)
if !ok {
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrParentBlockUnknown {
@@ -71,8 +72,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
"Expected: %s, got: <nil>", ErrInvalidAncestorBlock)
}
ruleErr, ok = err.(RuleError)
if !ok {
if ok := errors.As(err, &ruleErr); !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
"Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrInvalidAncestorBlock {
@@ -91,8 +91,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
"Expected: %s, got: <nil>", ErrUnexpectedDifficulty)
}
ruleErr, ok = err.(RuleError)
if !ok {
if ok := errors.As(err, &ruleErr); !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
"Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrUnexpectedDifficulty {

View File

@@ -19,12 +19,12 @@ func TestBlockHeap(t *testing.T) {
defer teardownFunc()
block0Header := dagconfig.MainnetParams.GenesisBlock.Header
block0, _ := dag.newBlockNode(&block0Header, newSet())
block0, _ := dag.newBlockNode(&block0Header, newBlockSet())
block100000Header := Block100000.Header
block100000, _ := dag.newBlockNode(&block100000Header, setFromSlice(block0))
block100000, _ := dag.newBlockNode(&block100000Header, blockSetFromSlice(block0))
block0smallHash, _ := dag.newBlockNode(&block0Header, newSet())
block0smallHash, _ := dag.newBlockNode(&block0Header, newBlockSet())
block0smallHash.hash = &daghash.Hash{}
tests := []struct {

View File

@@ -43,8 +43,8 @@ func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
// This function is safe for concurrent access.
func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool {
bi.RLock()
defer bi.RUnlock()
_, hasBlock := bi.index[*hash]
bi.RUnlock()
return hasBlock
}
@@ -54,8 +54,8 @@ func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool {
// This function is safe for concurrent access.
func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode {
bi.RLock()
defer bi.RUnlock()
node := bi.index[*hash]
bi.RUnlock()
return node
}
@@ -65,9 +65,9 @@ func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode {
// This function is safe for concurrent access.
func (bi *blockIndex) AddNode(node *blockNode) {
bi.Lock()
defer bi.Unlock()
bi.addNode(node)
bi.dirty[node] = struct{}{}
bi.Unlock()
}
// addNode adds the provided node to the block index, but does not mark it as
@@ -83,8 +83,8 @@ func (bi *blockIndex) addNode(node *blockNode) {
// This function is safe for concurrent access.
func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
bi.RLock()
defer bi.RUnlock()
status := node.status
bi.RUnlock()
return status
}
@@ -95,9 +95,9 @@ func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
// This function is safe for concurrent access.
func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
bi.Lock()
defer bi.Unlock()
node.status |= flags
bi.dirty[node] = struct{}{}
bi.Unlock()
}
// UnsetStatusFlags flips the provided status flags on the block node to off,
@@ -106,9 +106,9 @@ func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
// This function is safe for concurrent access.
func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
bi.Lock()
defer bi.Unlock()
node.status &^= flags
bi.dirty[node] = struct{}{}
bi.Unlock()
}
// flushToDB writes all dirty block nodes to the database. If all writes
@@ -134,8 +134,9 @@ func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
return err
}
}
// If write was successful, clear the dirty set.
bi.dirty = make(map[*blockNode]struct{})
return nil
}
func (bi *blockIndex) clearDirtyEntries() {
bi.dirty = make(map[*blockNode]struct{})
}

View File

@@ -18,7 +18,7 @@ func TestAncestorErrors(t *testing.T) {
}
defer teardownFunc()
node := newTestNode(dag, newSet(), int32(0x10000000), 0, time.Unix(0, 0))
node := newTestNode(dag, newBlockSet(), int32(0x10000000), 0, time.Unix(0, 0))
node.blueScore = 2
ancestor := node.SelectedAncestor(3)
if ancestor != nil {

View File

@@ -31,11 +31,6 @@ const (
// statusInvalidAncestor indicates that one of the block's ancestors has
// has failed validation, thus the block is also invalid.
statusInvalidAncestor
// statusNone indicates that the block has no validation state flags set.
//
// NOTE: This must be defined last in order to avoid influencing iota.
statusNone blockStatus = 0
)
// KnownValid returns whether the block is known to be valid. This will return
@@ -80,7 +75,7 @@ type blockNode struct {
// bluesAnticoneSizes is a map holding the set of blues affected by this block and their
// modified blue anticone size.
bluesAnticoneSizes map[daghash.Hash]dagconfig.KType
bluesAnticoneSizes map[*blockNode]dagconfig.KType
// hash is the double sha 256 of the block.
hash *daghash.Hash
@@ -115,8 +110,8 @@ func (dag *BlockDAG) newBlockNode(blockHeader *wire.BlockHeader, parents blockSe
parents: parents,
children: make(blockSet),
blueScore: math.MaxUint64, // Initialized to the max value to avoid collisions with the genesis block
timestamp: dag.AdjustedTime().Unix(),
bluesAnticoneSizes: make(map[daghash.Hash]dagconfig.KType),
timestamp: dag.Now().Unix(),
bluesAnticoneSizes: make(map[*blockNode]dagconfig.KType),
}
// blockHeader is nil only for the virtual block
@@ -184,7 +179,7 @@ func (node *blockNode) Header() *wire.BlockHeader {
//
// This function is safe for concurrent access.
func (node *blockNode) SelectedAncestor(blueScore uint64) *blockNode {
if blueScore < 0 || blueScore > node.blueScore {
if blueScore > node.blueScore {
return nil
}

View File

@@ -25,16 +25,17 @@ func TestBlueAnticoneSizesSize(t *testing.T) {
}
blockHeader := dagconfig.SimnetParams.GenesisBlock.Header
node, _ := dag.newBlockNode(&blockHeader, newSet())
hash := daghash.Hash{1}
node, _ := dag.newBlockNode(&blockHeader, newBlockSet())
fakeBlue := &blockNode{hash: &daghash.Hash{1}}
dag.index.AddNode(fakeBlue)
// Setting maxKType to maximum value of KType.
// As we verify above that KType is unsigned we can be sure that maxKType is indeed the maximum value of KType.
maxKType := ^dagconfig.KType(0)
node.bluesAnticoneSizes[hash] = maxKType
node.bluesAnticoneSizes[fakeBlue] = maxKType
serializedNode, _ := serializeBlockNode(node)
deserializedNode, _ := dag.deserializeBlockNode(serializedNode)
if deserializedNode.bluesAnticoneSizes[hash] != maxKType {
if deserializedNode.bluesAnticoneSizes[fakeBlue] != maxKType {
t.Fatalf("TestBlueAnticoneSizesSize: BlueAnticoneSize should not change when deserializing. Expected: %v but got %v",
maxKType, deserializedNode.bluesAnticoneSizes[hash])
maxKType, deserializedNode.bluesAnticoneSizes[fakeBlue])
}
}

View File

@@ -9,14 +9,14 @@ import (
// blockSet implements a basic unsorted set of blocks
type blockSet map[*blockNode]struct{}
// newSet creates a new, empty BlockSet
func newSet() blockSet {
// newBlockSet creates a new, empty BlockSet
func newBlockSet() blockSet {
return map[*blockNode]struct{}{}
}
// setFromSlice converts a slice of blockNodes into an unordered set represented as map
func setFromSlice(nodes ...*blockNode) blockSet {
set := newSet()
// blockSetFromSlice converts a slice of blockNodes into an unordered set represented as map
func blockSetFromSlice(nodes ...*blockNode) blockSet {
set := newBlockSet()
for _, node := range nodes {
set.add(node)
}
@@ -36,7 +36,7 @@ func (bs blockSet) remove(node *blockNode) {
// clone clones thie block set
func (bs blockSet) clone() blockSet {
clone := newSet()
clone := newBlockSet()
for node := range bs {
clone.add(node)
}
@@ -45,7 +45,7 @@ func (bs blockSet) clone() blockSet {
// subtract returns the difference between the BlockSet and another BlockSet
func (bs blockSet) subtract(other blockSet) blockSet {
diff := newSet()
diff := newBlockSet()
for node := range bs {
if !other.contains(node) {
diff.add(node)
@@ -102,17 +102,6 @@ func (bs blockSet) String() string {
return strings.Join(nodeStrs, ",")
}
// anyChildInSet returns true iff any child of node is contained within this set
func (bs blockSet) anyChildInSet(node *blockNode) bool {
for child := range node.children {
if bs.contains(child) {
return true
}
}
return false
}
func (bs blockSet) bluest() *blockNode {
var bluestNode *blockNode
var maxScore uint64

View File

@@ -8,7 +8,7 @@ import (
)
func TestHashes(t *testing.T) {
bs := setFromSlice(
bs := blockSetFromSlice(
&blockNode{
hash: &daghash.Hash{3},
},
@@ -49,33 +49,33 @@ func TestBlockSetSubtract(t *testing.T) {
}{
{
name: "both sets empty",
setA: setFromSlice(),
setB: setFromSlice(),
expectedResult: setFromSlice(),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(),
},
{
name: "subtract an empty set",
setA: setFromSlice(node1),
setB: setFromSlice(),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(node1),
},
{
name: "subtract from empty set",
setA: setFromSlice(),
setB: setFromSlice(node1),
expectedResult: setFromSlice(),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(),
},
{
name: "subtract unrelated set",
setA: setFromSlice(node1),
setB: setFromSlice(node2),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1),
setB: blockSetFromSlice(node2),
expectedResult: blockSetFromSlice(node1),
},
{
name: "typical case",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node2, node3),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node2, node3),
expectedResult: blockSetFromSlice(node1),
},
}
@@ -101,33 +101,33 @@ func TestBlockSetAddSet(t *testing.T) {
}{
{
name: "both sets empty",
setA: setFromSlice(),
setB: setFromSlice(),
expectedResult: setFromSlice(),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(),
},
{
name: "add an empty set",
setA: setFromSlice(node1),
setB: setFromSlice(),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(node1),
},
{
name: "add to empty set",
setA: setFromSlice(),
setB: setFromSlice(node1),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(node1),
},
{
name: "add already added member",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node1),
expectedResult: setFromSlice(node1, node2),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(node1, node2),
},
{
name: "typical case",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node2, node3),
expectedResult: setFromSlice(node1, node2, node3),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node2, node3),
expectedResult: blockSetFromSlice(node1, node2, node3),
},
}
@@ -153,33 +153,33 @@ func TestBlockSetAddSlice(t *testing.T) {
}{
{
name: "add empty slice to empty set",
set: setFromSlice(),
set: blockSetFromSlice(),
slice: []*blockNode{},
expectedResult: setFromSlice(),
expectedResult: blockSetFromSlice(),
},
{
name: "add an empty slice",
set: setFromSlice(node1),
set: blockSetFromSlice(node1),
slice: []*blockNode{},
expectedResult: setFromSlice(node1),
expectedResult: blockSetFromSlice(node1),
},
{
name: "add to empty set",
set: setFromSlice(),
set: blockSetFromSlice(),
slice: []*blockNode{node1},
expectedResult: setFromSlice(node1),
expectedResult: blockSetFromSlice(node1),
},
{
name: "add already added member",
set: setFromSlice(node1, node2),
set: blockSetFromSlice(node1, node2),
slice: []*blockNode{node1},
expectedResult: setFromSlice(node1, node2),
expectedResult: blockSetFromSlice(node1, node2),
},
{
name: "typical case",
set: setFromSlice(node1, node2),
set: blockSetFromSlice(node1, node2),
slice: []*blockNode{node2, node3},
expectedResult: setFromSlice(node1, node2, node3),
expectedResult: blockSetFromSlice(node1, node2, node3),
},
}
@@ -205,33 +205,33 @@ func TestBlockSetUnion(t *testing.T) {
}{
{
name: "both sets empty",
setA: setFromSlice(),
setB: setFromSlice(),
expectedResult: setFromSlice(),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(),
},
{
name: "union against an empty set",
setA: setFromSlice(node1),
setB: setFromSlice(),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(node1),
},
{
name: "union from an empty set",
setA: setFromSlice(),
setB: setFromSlice(node1),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(node1),
},
{
name: "union with subset",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node1),
expectedResult: setFromSlice(node1, node2),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(node1, node2),
},
{
name: "typical case",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node2, node3),
expectedResult: setFromSlice(node1, node2, node3),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node2, node3),
expectedResult: blockSetFromSlice(node1, node2, node3),
},
}

View File

@@ -23,17 +23,6 @@ import (
"github.com/kaspanet/kaspad/wire"
)
func loadBlocksWithLog(t *testing.T, filename string) ([]*util.Block, error) {
blocks, err := LoadBlocks(filename)
if err == nil {
t.Logf("Loaded %d blocks from file %s", len(blocks), filename)
for i, b := range blocks {
t.Logf("Block #%d: %s", i, b.Hash())
}
}
return blocks, err
}
// loadUTXOSet returns a utxo view loaded from a file.
func loadUTXOSet(filename string) (UTXOSet, error) {
// The utxostore file format is:
@@ -84,15 +73,8 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
return nil, err
}
// Serialized utxo entry.
serialized := make([]byte, numBytes)
_, err = io.ReadAtLeast(r, serialized, int(numBytes))
if err != nil {
return nil, err
}
// Deserialize it and add it to the view.
entry, err := deserializeUTXOEntry(serialized)
// Deserialize the UTXO entry and add it to the UTXO set.
entry, err := deserializeUTXOEntry(r)
if err != nil {
return nil, err
}
@@ -117,7 +99,7 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
dag := &BlockDAG{
dagParams: params,
timeSource: NewMedianTime(),
timeSource: NewTimeSource(),
targetTimePerBlock: targetTimePerBlock,
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
@@ -129,10 +111,10 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
// Create a genesis block node and block index index populated with it
// on the above fake DAG.
dag.genesis, _ = dag.newBlockNode(&params.GenesisBlock.Header, newSet())
dag.genesis, _ = dag.newBlockNode(&params.GenesisBlock.Header, newBlockSet())
index.AddNode(dag.genesis)
dag.virtual = newVirtualBlock(dag, setFromSlice(dag.genesis))
dag.virtual = newVirtualBlock(dag, blockSetFromSlice(dag.genesis))
return dag
}
@@ -222,3 +204,15 @@ func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNod
}
return node
}
type fakeTimeSource struct {
time time.Time
}
func (fts *fakeTimeSource) Now() time.Time {
return time.Unix(fts.time.Unix(), 0)
}
func newFakeTimeSource(fakeTime time.Time) TimeSource {
return &fakeTimeSource{time: fakeTime}
}

View File

@@ -1,584 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/txscript"
)
// -----------------------------------------------------------------------------
// A variable length quantity (VLQ) is an encoding that uses an arbitrary number
// of binary octets to represent an arbitrarily large integer. The scheme
// employs a most significant byte (MSB) base-128 encoding where the high bit in
// each byte indicates whether or not the byte is the final one. In addition,
// to ensure there are no redundant encodings, an offset is subtracted every
// time a group of 7 bits is shifted out. Therefore each integer can be
// represented in exactly one way, and each representation stands for exactly
// one integer.
//
// Another nice property of this encoding is that it provides a compact
// representation of values that are typically used to indicate sizes. For
// example, the values 0 - 127 are represented with a single byte, 128 - 16511
// with two bytes, and 16512 - 2113663 with three bytes.
//
// While the encoding allows arbitrarily large integers, it is artificially
// limited in this code to an unsigned 64-bit integer for efficiency purposes.
//
// Example encodings:
// 0 -> [0x00]
// 127 -> [0x7f] * Max 1-byte value
// 128 -> [0x80 0x00]
// 129 -> [0x80 0x01]
// 255 -> [0x80 0x7f]
// 256 -> [0x81 0x00]
// 16511 -> [0xff 0x7f] * Max 2-byte value
// 16512 -> [0x80 0x80 0x00]
// 32895 -> [0x80 0xff 0x7f]
// 2113663 -> [0xff 0xff 0x7f] * Max 3-byte value
// 270549119 -> [0xff 0xff 0xff 0x7f] * Max 4-byte value
// 2^64-1 -> [0x80 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0x7f]
//
// References:
// https://en.wikipedia.org/wiki/Variable-length_quantity
// http://www.codecodex.com/wiki/Variable-Length_Integers
// -----------------------------------------------------------------------------
// serializeSizeVLQ returns the number of bytes it would take to serialize the
// passed number as a variable-length quantity according to the format described
// above.
func serializeSizeVLQ(n uint64) int {
size := 1
for ; n > 0x7f; n = (n >> 7) - 1 {
size++
}
return size
}
// putVLQ serializes the provided number to a variable-length quantity according
// to the format described above and returns the number of bytes of the encoded
// value. The result is placed directly into the passed byte slice which must
// be at least large enough to handle the number of bytes returned by the
// serializeSizeVLQ function or it will panic.
func putVLQ(target []byte, n uint64) int {
offset := 0
for ; ; offset++ {
// The high bit is set when another byte follows.
highBitMask := byte(0x80)
if offset == 0 {
highBitMask = 0x00
}
target[offset] = byte(n&0x7f) | highBitMask
if n <= 0x7f {
break
}
n = (n >> 7) - 1
}
// Reverse the bytes so it is MSB-encoded.
for i, j := 0, offset; i < j; i, j = i+1, j-1 {
target[i], target[j] = target[j], target[i]
}
return offset + 1
}
// deserializeVLQ deserializes the provided variable-length quantity according
// to the format described above. It also returns the number of bytes
// deserialized.
func deserializeVLQ(serialized []byte) (uint64, int) {
var n uint64
var size int
for _, val := range serialized {
size++
n = (n << 7) | uint64(val&0x7f)
if val&0x80 != 0x80 {
break
}
n++
}
return n, size
}
// -----------------------------------------------------------------------------
// In order to reduce the size of stored scripts, a domain specific compression
// algorithm is used which recognizes standard scripts and stores them using
// less bytes than the original script.
//
// The general serialized format is:
//
// <script size or type><script data>
//
// Field Type Size
// script size or type VLQ variable
// script data []byte variable
//
// The specific serialized format for each recognized standard script is:
//
// - Pay-to-pubkey-hash: (21 bytes) - <0><20-byte pubkey hash>
// - Pay-to-script-hash: (21 bytes) - <1><20-byte script hash>
// - Pay-to-pubkey**: (33 bytes) - <2, 3, 4, or 5><32-byte pubkey X value>
// 2, 3 = compressed pubkey with bit 0 specifying the y coordinate to use
// 4, 5 = uncompressed pubkey with bit 0 specifying the y coordinate to use
// ** Only valid public keys starting with 0x02, 0x03, and 0x04 are supported.
//
// Any scripts which are not recognized as one of the aforementioned standard
// scripts are encoded using the general serialized format and encode the script
// size as the sum of the actual size of the script and the number of special
// cases.
// -----------------------------------------------------------------------------
// The following constants specify the special constants used to identify a
// special script type in the domain-specific compressed script encoding.
//
// NOTE: This section specifically does not use iota since these values are
// serialized and must be stable for long-term storage.
const (
// cstPayToPubKeyHash identifies a compressed pay-to-pubkey-hash script.
cstPayToPubKeyHash = 0
// cstPayToScriptHash identifies a compressed pay-to-script-hash script.
cstPayToScriptHash = 1
// cstPayToPubKeyComp2 identifies a compressed pay-to-pubkey script to
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyComp2 = 2
// cstPayToPubKeyComp3 identifies a compressed pay-to-pubkey script to
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyComp3 = 3
// cstPayToPubKeyUncomp4 identifies a compressed pay-to-pubkey script to
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyUncomp4 = 4
// cstPayToPubKeyUncomp5 identifies a compressed pay-to-pubkey script to
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyUncomp5 = 5
// numSpecialScripts is the number of special scripts recognized by the
// domain-specific script compression algorithm.
numSpecialScripts = 6
)
// isPubKeyHash returns whether or not the passed public key script is a
// standard pay-to-pubkey-hash script along with the pubkey hash it is paying to
// if it is.
func isPubKeyHash(script []byte) (bool, []byte) {
if len(script) == 25 && script[0] == txscript.OpDup &&
script[1] == txscript.OpHash160 &&
script[2] == txscript.OpData20 &&
script[23] == txscript.OpEqualVerify &&
script[24] == txscript.OpCheckSig {
return true, script[3:23]
}
return false, nil
}
// isScriptHash returns whether or not the passed public key script is a
// standard pay-to-script-hash script along with the script hash it is paying to
// if it is.
func isScriptHash(script []byte) (bool, []byte) {
if len(script) == 23 && script[0] == txscript.OpHash160 &&
script[1] == txscript.OpData20 &&
script[22] == txscript.OpEqual {
return true, script[2:22]
}
return false, nil
}
// isPubKey returns whether or not the passed public key script is a standard
// pay-to-pubkey script that pays to a valid compressed or uncompressed public
// key along with the serialized pubkey it is paying to if it is.
//
// NOTE: This function ensures the public key is actually valid since the
// compression algorithm requires valid pubkeys. It does not support hybrid
// pubkeys. This means that even if the script has the correct form for a
// pay-to-pubkey script, this function will only return true when it is paying
// to a valid compressed or uncompressed pubkey.
func isPubKey(script []byte) (bool, []byte) {
// Pay-to-compressed-pubkey script.
if len(script) == 35 && script[0] == txscript.OpData33 &&
script[34] == txscript.OpCheckSig && (script[1] == 0x02 ||
script[1] == 0x03) {
// Ensure the public key is valid.
serializedPubKey := script[1:34]
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
if err == nil {
return true, serializedPubKey
}
}
// Pay-to-uncompressed-pubkey script.
if len(script) == 67 && script[0] == txscript.OpData65 &&
script[66] == txscript.OpCheckSig && script[1] == 0x04 {
// Ensure the public key is valid.
serializedPubKey := script[1:66]
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
if err == nil {
return true, serializedPubKey
}
}
return false, nil
}
// compressedScriptSize returns the number of bytes the passed script would take
// when encoded with the domain specific compression algorithm described above.
func compressedScriptSize(scriptPubKey []byte) int {
// Pay-to-pubkey-hash script.
if valid, _ := isPubKeyHash(scriptPubKey); valid {
return 21
}
// Pay-to-script-hash script.
if valid, _ := isScriptHash(scriptPubKey); valid {
return 21
}
// Pay-to-pubkey (compressed or uncompressed) script.
if valid, _ := isPubKey(scriptPubKey); valid {
return 33
}
// When none of the above special cases apply, encode the script as is
// preceded by the sum of its size and the number of special cases
// encoded as a variable length quantity.
return serializeSizeVLQ(uint64(len(scriptPubKey)+numSpecialScripts)) +
len(scriptPubKey)
}
// decodeCompressedScriptSize treats the passed serialized bytes as a compressed
// script, possibly followed by other data, and returns the number of bytes it
// occupies taking into account the special encoding of the script size by the
// domain specific compression algorithm described above.
func decodeCompressedScriptSize(serialized []byte) int {
scriptSize, bytesRead := deserializeVLQ(serialized)
if bytesRead == 0 {
return 0
}
switch scriptSize {
case cstPayToPubKeyHash:
return 21
case cstPayToScriptHash:
return 21
case cstPayToPubKeyComp2, cstPayToPubKeyComp3, cstPayToPubKeyUncomp4,
cstPayToPubKeyUncomp5:
return 33
}
scriptSize -= numSpecialScripts
scriptSize += uint64(bytesRead)
return int(scriptSize)
}
// putCompressedScript compresses the passed script according to the domain
// specific compression algorithm described above directly into the passed
// target byte slice. The target byte slice must be at least large enough to
// handle the number of bytes returned by the compressedScriptSize function or
// it will panic.
func putCompressedScript(target, scriptPubKey []byte) int {
// Pay-to-pubkey-hash script.
if valid, hash := isPubKeyHash(scriptPubKey); valid {
target[0] = cstPayToPubKeyHash
copy(target[1:21], hash)
return 21
}
// Pay-to-script-hash script.
if valid, hash := isScriptHash(scriptPubKey); valid {
target[0] = cstPayToScriptHash
copy(target[1:21], hash)
return 21
}
// Pay-to-pubkey (compressed or uncompressed) script.
if valid, serializedPubKey := isPubKey(scriptPubKey); valid {
pubKeyFormat := serializedPubKey[0]
switch pubKeyFormat {
case 0x02, 0x03:
target[0] = pubKeyFormat
copy(target[1:33], serializedPubKey[1:33])
return 33
case 0x04:
// Encode the oddness of the serialized pubkey into the
// compressed script type.
target[0] = pubKeyFormat | (serializedPubKey[64] & 0x01)
copy(target[1:33], serializedPubKey[1:33])
return 33
}
}
// When none of the above special cases apply, encode the unmodified
// script preceded by the sum of its size and the number of special
// cases encoded as a variable length quantity.
encodedSize := uint64(len(scriptPubKey) + numSpecialScripts)
vlqSizeLen := putVLQ(target, encodedSize)
copy(target[vlqSizeLen:], scriptPubKey)
return vlqSizeLen + len(scriptPubKey)
}
// decompressScript returns the original script obtained by decompressing the
// passed compressed script according to the domain specific compression
// algorithm described above.
//
// NOTE: The script parameter must already have been proven to be long enough
// to contain the number of bytes returned by decodeCompressedScriptSize or it
// will panic. This is acceptable since it is only an internal function.
func decompressScript(compressedScriptPubKey []byte) []byte {
// In practice this function will not be called with a zero-length or
// nil script since the nil script encoding includes the length, however
// the code below assumes the length exists, so just return nil now if
// the function ever ends up being called with a nil script in the
// future.
if len(compressedScriptPubKey) == 0 {
return nil
}
// Decode the script size and examine it for the special cases.
encodedScriptSize, bytesRead := deserializeVLQ(compressedScriptPubKey)
switch encodedScriptSize {
// Pay-to-pubkey-hash script. The resulting script is:
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
case cstPayToPubKeyHash:
scriptPubKey := make([]byte, 25)
scriptPubKey[0] = txscript.OpDup
scriptPubKey[1] = txscript.OpHash160
scriptPubKey[2] = txscript.OpData20
copy(scriptPubKey[3:], compressedScriptPubKey[bytesRead:bytesRead+20])
scriptPubKey[23] = txscript.OpEqualVerify
scriptPubKey[24] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-script-hash script. The resulting script is:
// <OP_HASH160><20 byte script hash><OP_EQUAL>
case cstPayToScriptHash:
scriptPubKey := make([]byte, 23)
scriptPubKey[0] = txscript.OpHash160
scriptPubKey[1] = txscript.OpData20
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+20])
scriptPubKey[22] = txscript.OpEqual
return scriptPubKey
// Pay-to-compressed-pubkey script. The resulting script is:
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
scriptPubKey := make([]byte, 35)
scriptPubKey[0] = txscript.OpData33
scriptPubKey[1] = byte(encodedScriptSize)
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+32])
scriptPubKey[34] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-uncompressed-pubkey script. The resulting script is:
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyUncomp4, cstPayToPubKeyUncomp5:
// Change the leading byte to the appropriate compressed pubkey
// identifier (0x02 or 0x03) so it can be decoded as a
// compressed pubkey. This really should never fail since the
// encoding ensures it is valid before compressing to this type.
compressedKey := make([]byte, 33)
compressedKey[0] = byte(encodedScriptSize - 2)
copy(compressedKey[1:], compressedScriptPubKey[1:])
key, err := ecc.ParsePubKey(compressedKey, ecc.S256())
if err != nil {
return nil
}
scriptPubKey := make([]byte, 67)
scriptPubKey[0] = txscript.OpData65
copy(scriptPubKey[1:], key.SerializeUncompressed())
scriptPubKey[66] = txscript.OpCheckSig
return scriptPubKey
}
// When none of the special cases apply, the script was encoded using
// the general format, so reduce the script size by the number of
// special cases and return the unmodified script.
scriptSize := int(encodedScriptSize - numSpecialScripts)
scriptPubKey := make([]byte, scriptSize)
copy(scriptPubKey, compressedScriptPubKey[bytesRead:bytesRead+scriptSize])
return scriptPubKey
}
// -----------------------------------------------------------------------------
// In order to reduce the size of stored amounts, a domain specific compression
// algorithm is used which relies on there typically being a lot of zeroes at
// end of the amounts.
//
// While this is simply exchanging one uint64 for another, the resulting value
// for typical amounts has a much smaller magnitude which results in fewer bytes
// when encoded as variable length quantity. For example, consider the amount
// of 0.1 KAS which is 10000000 sompi. Encoding 10000000 as a VLQ would take
// 4 bytes while encoding the compressed value of 8 as a VLQ only takes 1 byte.
//
// Essentially the compression is achieved by splitting the value into an
// exponent in the range [0-9] and a digit in the range [1-9], when possible,
// and encoding them in a way that can be decoded. More specifically, the
// encoding is as follows:
// - 0 is 0
// - Find the exponent, e, as the largest power of 10 that evenly divides the
// value up to a maximum of 9
// - When e < 9, the final digit can't be 0 so store it as d and remove it by
// dividing the value by 10 (call the result n). The encoded value is thus:
// 1 + 10*(9*n + d-1) + e
// - When e==9, the only thing known is the amount is not 0. The encoded value
// is thus:
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
//
// Example encodings:
// (The numbers in parenthesis are the number of bytes when serialized as a VLQ)
// 0 (1) -> 0 (1) * 0.00000000 KAS
// 1000 (2) -> 4 (1) * 0.00001000 KAS
// 10000 (2) -> 5 (1) * 0.00010000 KAS
// 12345678 (4) -> 111111101(4) * 0.12345678 KAS
// 50000000 (4) -> 47 (1) * 0.50000000 KAS
// 100000000 (4) -> 9 (1) * 1.00000000 KAS
// 500000000 (5) -> 49 (1) * 5.00000000 KAS
// 1000000000 (5) -> 10 (1) * 10.00000000 KAS
// -----------------------------------------------------------------------------
// compressTxOutAmount compresses the passed amount according to the domain
// specific compression algorithm described above.
func compressTxOutAmount(amount uint64) uint64 {
// No need to do any work if it's zero.
if amount == 0 {
return 0
}
// Find the largest power of 10 (max of 9) that evenly divides the
// value.
exponent := uint64(0)
for amount%10 == 0 && exponent < 9 {
amount /= 10
exponent++
}
// The compressed result for exponents less than 9 is:
// 1 + 10*(9*n + d-1) + e
if exponent < 9 {
lastDigit := amount % 10
amount /= 10
return 1 + 10*(9*amount+lastDigit-1) + exponent
}
// The compressed result for an exponent of 9 is:
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
return 10 + 10*(amount-1)
}
// decompressTxOutAmount returns the original amount the passed compressed
// amount represents according to the domain specific compression algorithm
// described above.
func decompressTxOutAmount(amount uint64) uint64 {
// No need to do any work if it's zero.
if amount == 0 {
return 0
}
// The decompressed amount is either of the following two equations:
// x = 1 + 10*(9*n + d - 1) + e
// x = 1 + 10*(n - 1) + 9
amount--
// The decompressed amount is now one of the following two equations:
// x = 10*(9*n + d - 1) + e
// x = 10*(n - 1) + 9
exponent := amount % 10
amount /= 10
// The decompressed amount is now one of the following two equations:
// x = 9*n + d - 1 | where e < 9
// x = n - 1 | where e = 9
n := uint64(0)
if exponent < 9 {
lastDigit := amount%9 + 1
amount /= 9
n = amount*10 + lastDigit
} else {
n = amount + 1
}
// Apply the exponent.
for ; exponent > 0; exponent-- {
n *= 10
}
return n
}
// -----------------------------------------------------------------------------
// Compressed transaction outputs consist of an amount and a public key script
// both compressed using the domain specific compression algorithms previously
// described.
//
// The serialized format is:
//
// <compressed amount><compressed script>
//
// Field Type Size
// compressed amount VLQ variable
// compressed script []byte variable
// -----------------------------------------------------------------------------
// compressedTxOutSize returns the number of bytes the passed transaction output
// fields would take when encoded with the format described above.
func compressedTxOutSize(amount uint64, scriptPubKey []byte) int {
return serializeSizeVLQ(compressTxOutAmount(amount)) +
compressedScriptSize(scriptPubKey)
}
// putCompressedTxOut compresses the passed amount and script according to their
// domain specific compression algorithms and encodes them directly into the
// passed target byte slice with the format described above. The target byte
// slice must be at least large enough to handle the number of bytes returned by
// the compressedTxOutSize function or it will panic.
func putCompressedTxOut(target []byte, amount uint64, scriptPubKey []byte) int {
offset := putVLQ(target, compressTxOutAmount(amount))
offset += putCompressedScript(target[offset:], scriptPubKey)
return offset
}
// decodeCompressedTxOut decodes the passed compressed txout, possibly followed
// by other data, into its uncompressed amount and script and returns them along
// with the number of bytes they occupied prior to decompression.
func decodeCompressedTxOut(serialized []byte) (uint64, []byte, int, error) {
// Deserialize the compressed amount and ensure there are bytes
// remaining for the compressed script.
compressedAmount, bytesRead := deserializeVLQ(serialized)
if bytesRead >= len(serialized) {
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
"data after compressed amount")
}
// Decode the compressed script size and ensure there are enough bytes
// left in the slice for it.
scriptSize := decodeCompressedScriptSize(serialized[bytesRead:])
if len(serialized[bytesRead:]) < scriptSize {
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
"data after script size")
}
// Decompress and return the amount and script.
amount := decompressTxOutAmount(compressedAmount)
script := decompressScript(serialized[bytesRead : bytesRead+scriptSize])
return amount, script, bytesRead + scriptSize, nil
}

View File

@@ -1,436 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"bytes"
"encoding/hex"
"testing"
)
// hexToBytes converts the passed hex string into bytes and will panic if there
// is an error. This is only provided for the hard-coded constants so errors in
// the source code can be detected. It will only (and must only) be called with
// hard-coded values.
func hexToBytes(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
panic("invalid hex in source file: " + s)
}
return b
}
// TestVLQ ensures the variable length quantity serialization, deserialization,
// and size calculation works as expected.
func TestVLQ(t *testing.T) {
t.Parallel()
tests := []struct {
val uint64
serialized []byte
}{
{0, hexToBytes("00")},
{1, hexToBytes("01")},
{127, hexToBytes("7f")},
{128, hexToBytes("8000")},
{129, hexToBytes("8001")},
{255, hexToBytes("807f")},
{256, hexToBytes("8100")},
{16383, hexToBytes("fe7f")},
{16384, hexToBytes("ff00")},
{16511, hexToBytes("ff7f")}, // Max 2-byte value
{16512, hexToBytes("808000")},
{16513, hexToBytes("808001")},
{16639, hexToBytes("80807f")},
{32895, hexToBytes("80ff7f")},
{2113663, hexToBytes("ffff7f")}, // Max 3-byte value
{2113664, hexToBytes("80808000")},
{270549119, hexToBytes("ffffff7f")}, // Max 4-byte value
{270549120, hexToBytes("8080808000")},
{2147483647, hexToBytes("86fefefe7f")},
{2147483648, hexToBytes("86fefeff00")},
{4294967295, hexToBytes("8efefefe7f")}, // Max uint32, 5 bytes
// Max uint64, 10 bytes
{18446744073709551615, hexToBytes("80fefefefefefefefe7f")},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the value is calculated properly.
gotSize := serializeSizeVLQ(test.val)
if gotSize != len(test.serialized) {
t.Errorf("serializeSizeVLQ: did not get expected size "+
"for %d - got %d, want %d", test.val, gotSize,
len(test.serialized))
continue
}
// Ensure the value serializes to the expected bytes.
gotBytes := make([]byte, gotSize)
gotBytesWritten := putVLQ(gotBytes, test.val)
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("putVLQUnchecked: did not get expected bytes "+
"for %d - got %x, want %x", test.val, gotBytes,
test.serialized)
continue
}
if gotBytesWritten != len(test.serialized) {
t.Errorf("putVLQUnchecked: did not get expected number "+
"of bytes written for %d - got %d, want %d",
test.val, gotBytesWritten, len(test.serialized))
continue
}
// Ensure the serialized bytes deserialize to the expected
// value.
gotVal, gotBytesRead := deserializeVLQ(test.serialized)
if gotVal != test.val {
t.Errorf("deserializeVLQ: did not get expected value "+
"for %x - got %d, want %d", test.serialized,
gotVal, test.val)
continue
}
if gotBytesRead != len(test.serialized) {
t.Errorf("deserializeVLQ: did not get expected number "+
"of bytes read for %d - got %d, want %d",
test.serialized, gotBytesRead,
len(test.serialized))
continue
}
}
}
// TestScriptCompression ensures the domain-specific script compression and
// decompression works as expected.
func TestScriptCompression(t *testing.T) {
t.Parallel()
tests := []struct {
name string
uncompressed []byte
compressed []byte
}{
{
name: "nil",
uncompressed: nil,
compressed: hexToBytes("06"),
},
{
name: "pay-to-pubkey-hash 1",
uncompressed: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
compressed: hexToBytes("001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
},
{
name: "pay-to-pubkey-hash 2",
uncompressed: hexToBytes("76a914e34cce70c86373273efcc54ce7d2a491bb4a0e8488ac"),
compressed: hexToBytes("00e34cce70c86373273efcc54ce7d2a491bb4a0e84"),
},
{
name: "pay-to-script-hash 1",
uncompressed: hexToBytes("a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87"),
compressed: hexToBytes("01da1745e9b549bd0bfa1a569971c77eba30cd5a4b"),
},
{
name: "pay-to-script-hash 2",
uncompressed: hexToBytes("a914f815b036d9bbbce5e9f2a00abd1bf3dc91e9551087"),
compressed: hexToBytes("01f815b036d9bbbce5e9f2a00abd1bf3dc91e95510"),
},
{
name: "pay-to-pubkey compressed 0x02",
uncompressed: hexToBytes("2102192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4ac"),
compressed: hexToBytes("02192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
{
name: "pay-to-pubkey compressed 0x03",
uncompressed: hexToBytes("2103b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65ac"),
compressed: hexToBytes("03b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"),
},
{
name: "pay-to-pubkey uncompressed 0x04 even",
uncompressed: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
compressed: hexToBytes("04192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
{
name: "pay-to-pubkey uncompressed 0x04 odd",
uncompressed: hexToBytes("410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac"),
compressed: hexToBytes("0511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c"),
},
{
name: "pay-to-pubkey invalid pubkey",
uncompressed: hexToBytes("3302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
compressed: hexToBytes("293302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
},
{
name: "requires 2 size bytes - data push 200 bytes",
uncompressed: append(hexToBytes("4cc8"), bytes.Repeat([]byte{0x00}, 200)...),
// [0x80, 0x50] = 208 as a variable length quantity
// [0x4c, 0xc8] = OP_PUSHDATA1 200
compressed: append(hexToBytes("80504cc8"), bytes.Repeat([]byte{0x00}, 200)...),
},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the value is calculated properly.
gotSize := compressedScriptSize(test.uncompressed)
if gotSize != len(test.compressed) {
t.Errorf("compressedScriptSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotSize, len(test.compressed))
continue
}
// Ensure the script compresses to the expected bytes.
gotCompressed := make([]byte, gotSize)
gotBytesWritten := putCompressedScript(gotCompressed,
test.uncompressed)
if !bytes.Equal(gotCompressed, test.compressed) {
t.Errorf("putCompressedScript (%s): did not get "+
"expected bytes - got %x, want %x", test.name,
gotCompressed, test.compressed)
continue
}
if gotBytesWritten != len(test.compressed) {
t.Errorf("putCompressedScript (%s): did not get "+
"expected number of bytes written - got %d, "+
"want %d", test.name, gotBytesWritten,
len(test.compressed))
continue
}
// Ensure the compressed script size is properly decoded from
// the compressed script.
gotDecodedSize := decodeCompressedScriptSize(test.compressed)
if gotDecodedSize != len(test.compressed) {
t.Errorf("decodeCompressedScriptSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotDecodedSize, len(test.compressed))
continue
}
// Ensure the script decompresses to the expected bytes.
gotDecompressed := decompressScript(test.compressed)
if !bytes.Equal(gotDecompressed, test.uncompressed) {
t.Errorf("decompressScript (%s): did not get expected "+
"bytes - got %x, want %x", test.name,
gotDecompressed, test.uncompressed)
continue
}
}
}
// TestScriptCompressionErrors ensures calling various functions related to
// script compression with incorrect data returns the expected results.
func TestScriptCompressionErrors(t *testing.T) {
t.Parallel()
// A nil script must result in a decoded size of 0.
if gotSize := decodeCompressedScriptSize(nil); gotSize != 0 {
t.Fatalf("decodeCompressedScriptSize with nil script did not "+
"return 0 - got %d", gotSize)
}
// A nil script must result in a nil decompressed script.
if gotScript := decompressScript(nil); gotScript != nil {
t.Fatalf("decompressScript with nil script did not return nil "+
"decompressed script - got %x", gotScript)
}
// A compressed script for a pay-to-pubkey (uncompressed) that results
// in an invalid pubkey must result in a nil decompressed script.
compressedScript := hexToBytes("04012d74d0cb94344c9569c2e77901573d8d" +
"7903c3ebec3a957724895dca52c6b4")
if gotScript := decompressScript(compressedScript); gotScript != nil {
t.Fatalf("decompressScript with compressed pay-to-"+
"uncompressed-pubkey that is invalid did not return "+
"nil decompressed script - got %x", gotScript)
}
}
// TestAmountCompression ensures the domain-specific transaction output amount
// compression and decompression works as expected.
func TestAmountCompression(t *testing.T) {
t.Parallel()
tests := []struct {
name string
uncompressed uint64
compressed uint64
}{
{
name: "0 KAS",
uncompressed: 0,
compressed: 0,
},
{
name: "546 Sompi (current network dust value)",
uncompressed: 546,
compressed: 4911,
},
{
name: "0.00001 KAS (typical transaction fee)",
uncompressed: 1000,
compressed: 4,
},
{
name: "0.0001 KAS (typical transaction fee)",
uncompressed: 10000,
compressed: 5,
},
{
name: "0.12345678 KAS",
uncompressed: 12345678,
compressed: 111111101,
},
{
name: "0.5 KAS",
uncompressed: 50000000,
compressed: 48,
},
{
name: "1 KAS",
uncompressed: 100000000,
compressed: 9,
},
{
name: "5 KAS",
uncompressed: 500000000,
compressed: 49,
},
{
name: "21000000 KAS (max minted coins)",
uncompressed: 2100000000000000,
compressed: 21000000,
},
}
for _, test := range tests {
// Ensure the amount compresses to the expected value.
gotCompressed := compressTxOutAmount(test.uncompressed)
if gotCompressed != test.compressed {
t.Errorf("compressTxOutAmount (%s): did not get "+
"expected value - got %d, want %d", test.name,
gotCompressed, test.compressed)
continue
}
// Ensure the value decompresses to the expected value.
gotDecompressed := decompressTxOutAmount(test.compressed)
if gotDecompressed != test.uncompressed {
t.Errorf("decompressTxOutAmount (%s): did not get "+
"expected value - got %d, want %d", test.name,
gotDecompressed, test.uncompressed)
continue
}
}
}
// TestCompressedTxOut ensures the transaction output serialization and
// deserialization works as expected.
func TestCompressedTxOut(t *testing.T) {
t.Parallel()
tests := []struct {
name string
amount uint64
scriptPubKey []byte
compressed []byte
}{
{
name: "pay-to-pubkey-hash dust",
amount: 546,
scriptPubKey: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
},
{
name: "pay-to-pubkey uncompressed 1 KAS",
amount: 100000000,
scriptPubKey: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the txout is calculated properly.
gotSize := compressedTxOutSize(test.amount, test.scriptPubKey)
if gotSize != len(test.compressed) {
t.Errorf("compressedTxOutSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotSize, len(test.compressed))
continue
}
// Ensure the txout compresses to the expected value.
gotCompressed := make([]byte, gotSize)
gotBytesWritten := putCompressedTxOut(gotCompressed,
test.amount, test.scriptPubKey)
if !bytes.Equal(gotCompressed, test.compressed) {
t.Errorf("compressTxOut (%s): did not get expected "+
"bytes - got %x, want %x", test.name,
gotCompressed, test.compressed)
continue
}
if gotBytesWritten != len(test.compressed) {
t.Errorf("compressTxOut (%s): did not get expected "+
"number of bytes written - got %d, want %d",
test.name, gotBytesWritten,
len(test.compressed))
continue
}
// Ensure the serialized bytes are decoded back to the expected
// uncompressed values.
gotAmount, gotScript, gotBytesRead, err := decodeCompressedTxOut(
test.compressed)
if err != nil {
t.Errorf("decodeCompressedTxOut (%s): unexpected "+
"error: %v", test.name, err)
continue
}
if gotAmount != test.amount {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected amount - got %d, want %d",
test.name, gotAmount, test.amount)
continue
}
if !bytes.Equal(gotScript, test.scriptPubKey) {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected script - got %x, want %x",
test.name, gotScript, test.scriptPubKey)
continue
}
if gotBytesRead != len(test.compressed) {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected number of bytes read - got %d, want %d",
test.name, gotBytesRead, len(test.compressed))
continue
}
}
}
// TestTxOutCompressionErrors ensures calling various functions related to
// txout compression with incorrect data returns the expected results.
func TestTxOutCompressionErrors(t *testing.T) {
t.Parallel()
// A compressed txout with missing compressed script must error.
compressedTxOut := hexToBytes("00")
_, _, _, err := decodeCompressedTxOut(compressedTxOut)
if !isDeserializeErr(err) {
t.Fatalf("decodeCompressedTxOut with missing compressed script "+
"did not return expected error type - got %T, want "+
"errDeserialize", err)
}
// A compressed txout with short compressed script must error.
compressedTxOut = hexToBytes("0010")
_, _, _, err = decodeCompressedTxOut(compressedTxOut)
if !isDeserializeErr(err) {
t.Fatalf("decodeCompressedTxOut with short compressed script "+
"did not return expected error type - got %T, want "+
"errDeserialize", err)
}
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/txscript"
@@ -27,6 +28,8 @@ const (
// maxOrphanBlocks is the maximum number of orphan blocks that can be
// queued.
maxOrphanBlocks = 100
isDAGCurrentMaxDiff = 12 * time.Hour
)
// orphanBlock represents a block that we don't yet have the parent for. It
@@ -59,7 +62,7 @@ type BlockDAG struct {
// separate mutex.
db database.DB
dagParams *dagconfig.Params
timeSource MedianTimeSource
timeSource TimeSource
sigCache *txscript.SigCache
indexManager IndexManager
genesis *blockNode
@@ -151,25 +154,26 @@ type BlockDAG struct {
SubnetworkStore *SubnetworkStore
utxoDiffStore *utxoDiffStore
reachabilityStore *reachabilityStore
multisetStore *multisetStore
}
// HaveBlock returns whether or not the DAG instance has the block represented
// IsKnownBlock returns whether or not the DAG instance has the block represented
// by the passed hash. This includes checking the various places a block can
// be in, like part of the DAG or the orphan pool.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) HaveBlock(hash *daghash.Hash) bool {
return dag.BlockExists(hash) || dag.IsKnownOrphan(hash) || dag.isKnownDelayedBlock(hash)
func (dag *BlockDAG) IsKnownBlock(hash *daghash.Hash) bool {
return dag.IsInDAG(hash) || dag.IsKnownOrphan(hash) || dag.isKnownDelayedBlock(hash)
}
// HaveBlocks returns whether or not the DAG instances has all blocks represented
// AreKnownBlocks returns whether or not the DAG instances has all blocks represented
// by the passed hashes. This includes checking the various places a block can
// be in, like part of the DAG or the orphan pool.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) HaveBlocks(hashes []*daghash.Hash) bool {
func (dag *BlockDAG) AreKnownBlocks(hashes []*daghash.Hash) bool {
for _, hash := range hashes {
haveBlock := dag.HaveBlock(hash)
haveBlock := dag.IsKnownBlock(hash)
if !haveBlock {
return false
}
@@ -192,8 +196,8 @@ func (dag *BlockDAG) IsKnownOrphan(hash *daghash.Hash) bool {
// Protect concurrent access. Using a read lock only so multiple
// readers can query without blocking each other.
dag.orphanLock.RLock()
defer dag.orphanLock.RUnlock()
_, exists := dag.orphans[*hash]
dag.orphanLock.RUnlock()
return exists
}
@@ -230,11 +234,9 @@ func (dag *BlockDAG) GetOrphanMissingAncestorHashes(orphanHash *daghash.Hash) ([
visited[*current] = true
orphan, orphanExists := dag.orphans[*current]
if orphanExists {
for _, parentHash := range orphan.block.MsgBlock().Header.ParentHashes {
queue = append(queue, parentHash)
}
queue = append(queue, orphan.block.MsgBlock().Header.ParentHashes...)
} else {
if !dag.BlockExists(current) && current != orphanHash {
if !dag.IsInDAG(current) && current != orphanHash {
missingAncestorsHashes = append(missingAncestorsHashes, current)
}
}
@@ -484,27 +486,16 @@ func (dag *BlockDAG) addBlock(node *blockNode,
// Connect the block to the DAG.
chainUpdates, err := dag.connectBlock(node, block, selectedParentAnticone, fastAdd)
if err != nil {
if _, ok := err.(RuleError); ok {
if errors.As(err, &RuleError{}) {
dag.index.SetStatusFlags(node, statusValidateFailed)
} else {
return nil, err
err := dag.index.flushToDB()
if err != nil {
return nil, err
}
}
} else {
dag.blockCount++
}
// Intentionally ignore errors writing updated node status to DB. If
// it fails to write, it's not the end of the world. If the block is
// invalid, the worst that can happen is we revalidate the block
// after a restart.
if writeErr := dag.index.flushToDB(); writeErr != nil {
log.Warnf("Error flushing block index changes to disk: %s",
writeErr)
}
// If dag.connectBlock returned a rule error, return it here after updating DB
if err != nil {
return nil, err
}
dag.blockCount++
return chainUpdates, nil
}
@@ -571,13 +562,13 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
return nil, err
}
newBlockUTXO, txsAcceptanceData, newBlockFeeData, err := node.verifyAndBuildUTXO(dag, block.Transactions(), fastAdd)
newBlockUTXO, txsAcceptanceData, newBlockFeeData, newBlockMultiSet, err := node.verifyAndBuildUTXO(dag, block.Transactions(), fastAdd)
if err != nil {
newErrString := fmt.Sprintf("error verifying UTXO for %s: %s", node, err)
if err, ok := err.(RuleError); ok {
return nil, ruleError(err.ErrorCode, newErrString)
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); ok {
return nil, ruleError(ruleErr.ErrorCode, fmt.Sprintf("error verifying UTXO for %s: %s", node, err))
}
return nil, errors.New(newErrString)
return nil, errors.Wrapf(err, "error verifying UTXO for %s", node)
}
err = node.validateCoinbaseTransaction(dag, block, txsAcceptanceData)
@@ -586,7 +577,7 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
}
// Apply all changes to the DAG.
virtualUTXODiff, virtualTxsAcceptanceData, chainUpdates, err := dag.applyDAGChanges(node, newBlockUTXO, selectedParentAnticone)
virtualUTXODiff, virtualTxsAcceptanceData, chainUpdates, err := dag.applyDAGChanges(node, newBlockUTXO, newBlockMultiSet, selectedParentAnticone)
if err != nil {
// Since all validation logic has already ran, if applyDAGChanges errors out,
// this means we have a problem in the internal structure of the DAG - a problem which is
@@ -603,19 +594,138 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
return chainUpdates, nil
}
// calcMultiset returns the multiset of the UTXO of the given block with the given transactions.
func (node *blockNode) calcMultiset(dag *BlockDAG, transactions []*util.Tx, acceptanceData MultiBlockTxsAcceptanceData, selectedParentUTXO, pastUTXO UTXOSet) (*secp256k1.MultiSet, error) {
ms, err := node.pastUTXOMultiSet(dag, acceptanceData, selectedParentUTXO)
if err != nil {
return nil, err
}
for _, tx := range transactions {
ms, err = addTxToMultiset(ms, tx.MsgTx(), pastUTXO, UnacceptedBlueScore)
if err != nil {
return nil, err
}
}
return ms, nil
}
// acceptedSelectedParentMultiset takes the multiset of the selected
// parent, replaces all the selected parent outputs' blue score with
// the block blue score and returns the result.
func (node *blockNode) acceptedSelectedParentMultiset(dag *BlockDAG,
acceptanceData MultiBlockTxsAcceptanceData) (*secp256k1.MultiSet, error) {
if node.isGenesis() {
return secp256k1.NewMultiset(), nil
}
ms, err := dag.multisetStore.multisetByBlockNode(node.selectedParent)
if err != nil {
return nil, err
}
selectedParentAcceptanceData, exists := acceptanceData.FindAcceptanceData(node.selectedParent.hash)
if !exists {
return nil, errors.Errorf("couldn't find selected parent acceptance data for block %s", node)
}
for _, txAcceptanceData := range selectedParentAcceptanceData.TxAcceptanceData {
tx := txAcceptanceData.Tx
msgTx := tx.MsgTx()
isCoinbase := tx.IsCoinBase()
for i, txOut := range msgTx.TxOut {
outpoint := *wire.NewOutpoint(tx.ID(), uint32(i))
unacceptedEntry := NewUTXOEntry(txOut, isCoinbase, UnacceptedBlueScore)
acceptedEntry := NewUTXOEntry(txOut, isCoinbase, node.blueScore)
var err error
ms, err = removeUTXOFromMultiset(ms, unacceptedEntry, &outpoint)
if err != nil {
return nil, err
}
ms, err = addUTXOToMultiset(ms, acceptedEntry, &outpoint)
if err != nil {
return nil, err
}
}
}
return ms, nil
}
func (node *blockNode) pastUTXOMultiSet(dag *BlockDAG, acceptanceData MultiBlockTxsAcceptanceData, selectedParentUTXO UTXOSet) (*secp256k1.MultiSet, error) {
ms, err := node.acceptedSelectedParentMultiset(dag, acceptanceData)
if err != nil {
return nil, err
}
for _, blockAcceptanceData := range acceptanceData {
if blockAcceptanceData.BlockHash.IsEqual(node.selectedParent.hash) {
continue
}
for _, txAcceptanceData := range blockAcceptanceData.TxAcceptanceData {
if !txAcceptanceData.IsAccepted {
continue
}
tx := txAcceptanceData.Tx.MsgTx()
var err error
ms, err = addTxToMultiset(ms, tx, selectedParentUTXO, node.blueScore)
if err != nil {
return nil, err
}
}
}
return ms, nil
}
func addTxToMultiset(ms *secp256k1.MultiSet, tx *wire.MsgTx, pastUTXO UTXOSet, blockBlueScore uint64) (*secp256k1.MultiSet, error) {
isCoinbase := tx.IsCoinBase()
if !isCoinbase {
for _, txIn := range tx.TxIn {
entry, ok := pastUTXO.Get(txIn.PreviousOutpoint)
if !ok {
return nil, errors.Errorf("Couldn't find entry for outpoint %s", txIn.PreviousOutpoint)
}
var err error
ms, err = removeUTXOFromMultiset(ms, entry, &txIn.PreviousOutpoint)
if err != nil {
return nil, err
}
}
}
for i, txOut := range tx.TxOut {
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
entry := NewUTXOEntry(txOut, isCoinbase, blockBlueScore)
var err error
ms, err = addUTXOToMultiset(ms, entry, &outpoint)
if err != nil {
return nil, err
}
}
return ms, nil
}
func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UTXODiff,
txsAcceptanceData MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData,
feeData compactFeeData) error {
// Write any block status changes to DB before updating the DAG state.
err := dag.index.flushToDB()
if err != nil {
return err
}
// Atomically insert info into the database.
err = dag.db.Update(func(dbTx database.Tx) error {
err := dag.utxoDiffStore.flushToDB(dbTx)
err := dag.db.Update(func(dbTx database.Tx) error {
err := dag.index.flushToDBWithTx(dbTx)
if err != nil {
return err
}
err = dag.utxoDiffStore.flushToDB(dbTx)
if err != nil {
return err
}
@@ -625,6 +735,11 @@ func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UT
return err
}
err = dag.multisetStore.flushToDB(dbTx)
if err != nil {
return err
}
// Update best block state.
state := &dagState{
TipHashes: dag.TipHashes(),
@@ -671,8 +786,10 @@ func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UT
if err != nil {
return err
}
dag.index.clearDirtyEntries()
dag.utxoDiffStore.clearDirtyEntries()
dag.reachabilityStore.clearDirtyEntries()
dag.multisetStore.clearNewEntries()
return nil
}
@@ -833,16 +950,6 @@ func (dag *BlockDAG) NextBlockCoinbaseTransactionNoLock(scriptPubKey []byte, ext
return dag.virtual.blockNode.expectedCoinbaseTransaction(dag, txsAcceptanceData, scriptPubKey, extraData)
}
// NextAcceptedIDMerkleRoot prepares the acceptedIDMerkleRoot for the next mined block
//
// This function CAN'T be called with the DAG lock held.
func (dag *BlockDAG) NextAcceptedIDMerkleRoot() (*daghash.Hash, error) {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
return dag.NextAcceptedIDMerkleRootNoLock()
}
// NextAcceptedIDMerkleRootNoLock prepares the acceptedIDMerkleRoot for the next mined block
//
// This function MUST be called with the DAG read-lock held
@@ -859,7 +966,7 @@ func (dag *BlockDAG) NextAcceptedIDMerkleRootNoLock() (*daghash.Hash, error) {
//
// This function MUST be called with the DAG read-lock held
func (dag *BlockDAG) TxsAcceptedByVirtual() (MultiBlockTxsAcceptanceData, error) {
_, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
_, _, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
return txsAcceptanceData, err
}
@@ -871,22 +978,10 @@ func (dag *BlockDAG) TxsAcceptedByBlockHash(blockHash *daghash.Hash) (MultiBlock
if node == nil {
return nil, errors.Errorf("Couldn't find block %s", blockHash)
}
_, txsAcceptanceData, err := dag.pastUTXO(node)
_, _, txsAcceptanceData, err := dag.pastUTXO(node)
return txsAcceptanceData, err
}
// BlockPastUTXO retrieves the past UTXO of this block
//
// This function MUST be called with the DAG read-lock held
func (dag *BlockDAG) BlockPastUTXO(blockHash *daghash.Hash) (UTXOSet, error) {
node := dag.index.LookupNode(blockHash)
if node == nil {
return nil, errors.Errorf("Couldn't find block %s", blockHash)
}
pastUTXO, _, err := dag.pastUTXO(node)
return pastUTXO, err
}
// applyDAGChanges does the following:
// 1. Connects each of the new block's parents to the block.
// 2. Adds the new block to the DAG's tips.
@@ -894,12 +989,13 @@ func (dag *BlockDAG) BlockPastUTXO(blockHash *daghash.Hash) (UTXOSet, error) {
// 4. Updates each of the tips' utxoDiff.
// 5. Applies the new virtual's blue score to all the unaccepted UTXOs
// 6. Adds the block to the reachability structures
// 7. Updates the finality point of the DAG (if required).
// 7. Adds the multiset of the block to the multiset store.
// 8. Updates the finality point of the DAG (if required).
//
// It returns the diff in the virtual block's UTXO set.
//
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, selectedParentAnticone []*blockNode) (
func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, newBlockMultiset *secp256k1.MultiSet, selectedParentAnticone []*blockNode) (
virtualUTXODiff *UTXODiff, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData,
chainUpdates *chainUpdates, err error) {
@@ -909,6 +1005,8 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, sele
return nil, nil, nil, errors.Wrap(err, "failed updating reachability")
}
dag.multisetStore.setMultiset(node, newBlockMultiset)
if err = node.updateParents(dag, newBlockUTXO); err != nil {
return nil, nil, nil, errors.Wrapf(err, "failed updating parents of %s", node)
}
@@ -917,7 +1015,7 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, sele
chainUpdates = dag.virtual.AddTip(node)
// Build a UTXO set for the new virtual block
newVirtualUTXO, virtualTxsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
newVirtualUTXO, _, virtualTxsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "could not restore past UTXO for virtual")
}
@@ -968,42 +1066,49 @@ func (node *blockNode) diffFromTxs(pastUTXO UTXOSet, transactions []*util.Tx) (*
}
// verifyAndBuildUTXO verifies all transactions in the given block and builds its UTXO
// to save extra traversals it returns the transactions acceptance data and the compactFeeData for the new block
// to save extra traversals it returns the transactions acceptance data, the compactFeeData
// for the new block and its multiset.
func (node *blockNode) verifyAndBuildUTXO(dag *BlockDAG, transactions []*util.Tx, fastAdd bool) (
newBlockUTXO UTXOSet, txsAcceptanceData MultiBlockTxsAcceptanceData, newBlockFeeData compactFeeData, err error) {
newBlockUTXO UTXOSet, txsAcceptanceData MultiBlockTxsAcceptanceData, newBlockFeeData compactFeeData, multiset *secp256k1.MultiSet, err error) {
pastUTXO, txsAcceptanceData, err := dag.pastUTXO(node)
pastUTXO, selectedParentUTXO, txsAcceptanceData, err := dag.pastUTXO(node)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
err = node.validateAcceptedIDMerkleRoot(dag, txsAcceptanceData)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
feeData, err := dag.checkConnectToPastUTXO(node, pastUTXO, transactions, fastAdd)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
diffFromTxs, err := node.diffFromTxs(pastUTXO, transactions)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
utxo, err := pastUTXO.WithDiff(diffFromTxs)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
calculatedMultisetHash := utxo.Multiset().Hash()
multiset, err = node.calcMultiset(dag, transactions, txsAcceptanceData, selectedParentUTXO, pastUTXO)
if err != nil {
return nil, nil, nil, nil, err
}
calculatedMultisetHash := daghash.Hash(*multiset.Finalize())
if !calculatedMultisetHash.IsEqual(node.utxoCommitment) {
str := fmt.Sprintf("block %s UTXO commitment is invalid - block "+
"header indicates %s, but calculated value is %s", node.hash,
node.utxoCommitment, calculatedMultisetHash)
return nil, nil, nil, ruleError(ErrBadUTXOCommitment, str)
return nil, nil, nil, nil, ruleError(ErrBadUTXOCommitment, str)
}
return utxo, txsAcceptanceData, feeData, nil
return utxo, txsAcceptanceData, feeData, multiset, nil
}
// TxAcceptanceData stores a transaction together with an indication
@@ -1157,28 +1262,33 @@ func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) e
// To save traversals over the blue blocks, it also returns the transaction acceptance data for
// all blue blocks
func (dag *BlockDAG) pastUTXO(node *blockNode) (
pastUTXO UTXOSet, bluesTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) {
pastUTXO, selectedParentUTXO UTXOSet, bluesTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) {
if node.isGenesis() {
return genesisPastUTXO(dag.virtual), MultiBlockTxsAcceptanceData{}, nil
return genesisPastUTXO(dag.virtual), NewFullUTXOSet(), MultiBlockTxsAcceptanceData{}, nil
}
selectedParentUTXO, err := dag.restoreUTXO(node.selectedParent)
selectedParentUTXO, err = dag.restoreUTXO(node.selectedParent)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
blueBlocks, err := node.fetchBlueBlocks(dag.db)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
selectedParent := blueBlocks[0]
acceptedSelectedParentUTXO, selectedParentAcceptanceData, err := node.acceptSelectedParentTransactions(selectedParent, selectedParentUTXO)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
return node.applyBlueBlocks(acceptedSelectedParentUTXO, selectedParentAcceptanceData, blueBlocks)
pastUTXO, bluesTxsAcceptanceData, err = node.applyBlueBlocks(acceptedSelectedParentUTXO, selectedParentAcceptanceData, blueBlocks)
if err != nil {
return nil, nil, nil, err
}
return pastUTXO, selectedParentUTXO, bluesTxsAcceptanceData, nil
}
func (node *blockNode) acceptSelectedParentTransactions(selectedParent *util.Block, selectedParentUTXO UTXOSet) (acceptedSelectedParentUTXO UTXOSet, txAcceptanceData []TxAcceptanceData, err error) {
@@ -1209,6 +1319,8 @@ func (node *blockNode) acceptSelectedParentTransactions(selectedParent *util.Blo
func (dag *BlockDAG) restoreUTXO(node *blockNode) (UTXOSet, error) {
stack := []*blockNode{}
// Iterate over the chain of diff-childs from node till virtual and add them
// all into a stack
for current := node; current != nil; {
stack = append(stack, current)
var err error
@@ -1218,20 +1330,28 @@ func (dag *BlockDAG) restoreUTXO(node *blockNode) (UTXOSet, error) {
}
}
utxo := UTXOSet(dag.virtual.utxoSet)
// Start with the top item in the stack, going over it top-to-bottom,
// applying the UTXO-diff one-by-one.
topNode, stack := stack[len(stack)-1], stack[:len(stack)-1] // pop the top item in the stack
topNodeDiff, err := dag.utxoDiffStore.diffByNode(topNode)
if err != nil {
return nil, err
}
accumulatedDiff := topNodeDiff.clone()
for i := len(stack) - 1; i >= 0; i-- {
diff, err := dag.utxoDiffStore.diffByNode(stack[i])
if err != nil {
return nil, err
}
utxo, err = utxo.WithDiff(diff)
// Use withDiffInPlace, otherwise copying the diffs again and again create a polynomial overhead
err = accumulatedDiff.withDiffInPlace(diff)
if err != nil {
return nil, err
}
}
return utxo, nil
return NewDiffUTXOSet(dag.virtual.utxoSet, accumulatedDiff), nil
}
// updateTipsUTXO builds and applies new diff UTXOs for all the DAG's tips
@@ -1274,15 +1394,15 @@ func (dag *BlockDAG) isCurrent() bool {
} else {
dagTimestamp = selectedTip.timestamp
}
minus24Hours := dag.AdjustedTime().Add(-24 * time.Hour).Unix()
return dagTimestamp >= minus24Hours
dagTime := time.Unix(dagTimestamp, 0)
return dag.Now().Sub(dagTime) <= isDAGCurrentMaxDiff
}
// AdjustedTime returns the adjusted time according to
// dag.timeSource. See MedianTimeSource.AdjustedTime for
// Now returns the adjusted time according to
// dag.timeSource. See TimeSource.Now for
// more details.
func (dag *BlockDAG) AdjustedTime() time.Time {
return dag.timeSource.AdjustedTime()
func (dag *BlockDAG) Now() time.Time {
return dag.timeSource.Now()
}
// IsCurrent returns whether or not the DAG believes it is current. Several
@@ -1386,9 +1506,21 @@ func (dag *BlockDAG) BlockConfirmationsByHashNoLock(hash *daghash.Hash) (uint64,
return dag.blockConfirmations(node)
}
// UTXOCommitment returns a commitment to the dag's current UTXOSet
func (dag *BlockDAG) UTXOCommitment() string {
return dag.UTXOSet().UTXOMultiset.Hash().String()
// UTXOConfirmations returns the confirmations for the given outpoint, if it exists
// in the DAG's UTXO set.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) UTXOConfirmations(outpoint *wire.Outpoint) (uint64, bool) {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
utxoEntry, ok := dag.GetUTXOEntry(*outpoint)
if !ok {
return 0, false
}
confirmations := dag.SelectedTipBlueScore() - utxoEntry.BlockBlueScore() + 1
return confirmations, true
}
// blockConfirmations returns the current confirmations number of the given node
@@ -1501,7 +1633,7 @@ func (dag *BlockDAG) SelectedParentChain(blockHash *daghash.Hash) ([]*daghash.Ha
if blockHash == nil {
blockHash = dag.genesis.hash
}
if !dag.BlockExists(blockHash) {
if !dag.IsInDAG(blockHash) {
return nil, nil, errors.Errorf("blockHash %s does not exist in the DAG", blockHash)
}
@@ -1670,7 +1802,7 @@ func (dag *BlockDAG) antiPastBetween(lowHash, highHash *daghash.Hash, maxEntries
// Collect every node in highNode's past (including itself) but
// NOT in the lowNode's past (excluding itself) into an up-heap
// (a heap sorted by blueScore from lowest to greatest).
visited := newSet()
visited := newBlockSet()
candidateNodes := newUpHeap()
queue := newDownHeap()
queue.Push(highNode)
@@ -1713,11 +1845,11 @@ func (dag *BlockDAG) antiPastBetween(lowHash, highHash *daghash.Hash, maxEntries
// This function is safe for concurrent access.
func (dag *BlockDAG) AntiPastHashesBetween(lowHash, highHash *daghash.Hash, maxHashes uint64) ([]*daghash.Hash, error) {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
hashes, err := dag.antiPastHashesBetween(lowHash, highHash, maxHashes)
if err != nil {
return nil, err
}
dag.dagLock.RUnlock()
return hashes, nil
}
@@ -1739,7 +1871,7 @@ func (dag *BlockDAG) antiPastHeadersBetween(lowHash, highHash *daghash.Hash, max
}
// GetTopHeaders returns the top wire.MaxBlockHeadersPerMsg block headers ordered by blue score.
func (dag *BlockDAG) GetTopHeaders(highHash *daghash.Hash) ([]*wire.BlockHeader, error) {
func (dag *BlockDAG) GetTopHeaders(highHash *daghash.Hash, maxHeaders uint64) ([]*wire.BlockHeader, error) {
highNode := &dag.virtual.blockNode
if highHash != nil {
highNode = dag.index.LookupNode(highHash)
@@ -1751,10 +1883,9 @@ func (dag *BlockDAG) GetTopHeaders(highHash *daghash.Hash) ([]*wire.BlockHeader,
queue := newDownHeap()
queue.pushSet(highNode.parents)
visited := newSet()
for i := uint32(0); queue.Len() > 0 && len(headers) < wire.MaxBlockHeadersPerMsg; i++ {
var current *blockNode
current = queue.pop()
visited := newBlockSet()
for i := uint32(0); queue.Len() > 0 && uint64(len(headers)) < maxHeaders; i++ {
current := queue.pop()
if !visited.contains(current) {
visited.add(current)
headers = append(headers, current.Header())
@@ -1789,13 +1920,13 @@ func (dag *BlockDAG) RUnlock() {
// wire.MaxBlockHeadersPerMsg block headers.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) AntiPastHeadersBetween(lowHash, highHash *daghash.Hash) ([]*wire.BlockHeader, error) {
func (dag *BlockDAG) AntiPastHeadersBetween(lowHash, highHash *daghash.Hash, maxHeaders uint64) ([]*wire.BlockHeader, error) {
dag.dagLock.RLock()
headers, err := dag.antiPastHeadersBetween(lowHash, highHash, wire.MaxBlockHeadersPerMsg)
defer dag.dagLock.RUnlock()
headers, err := dag.antiPastHeadersBetween(lowHash, highHash, maxHeaders)
if err != nil {
return nil, err
}
dag.dagLock.RUnlock()
return headers, nil
}
@@ -1805,7 +1936,7 @@ func (dag *BlockDAG) SubnetworkID() *subnetworkid.SubnetworkID {
}
func (dag *BlockDAG) addDelayedBlock(block *util.Block, delay time.Duration) error {
processTime := dag.AdjustedTime().Add(delay)
processTime := dag.Now().Add(delay)
log.Debugf("Adding block to delayed blocks queue (block hash: %s, process time: %s)", block.Hash().String(), processTime)
delayedBlock := &delayedBlock{
block: block,
@@ -1823,7 +1954,7 @@ func (dag *BlockDAG) processDelayedBlocks() error {
// Check if the delayed block with the earliest process time should be processed
for dag.delayedBlocksQueue.Len() > 0 {
earliestDelayedBlockProcessTime := dag.peekDelayedBlock().processTime
if earliestDelayedBlockProcessTime.After(dag.AdjustedTime()) {
if earliestDelayedBlockProcessTime.After(dag.Now()) {
break
}
delayedBlock := dag.popDelayedBlock()
@@ -1832,7 +1963,7 @@ func (dag *BlockDAG) processDelayedBlocks() error {
log.Errorf("Error while processing delayed block (block %s)", delayedBlock.block.Hash().String())
// Rule errors should not be propagated as they refer only to the delayed block,
// while this function runs in the context of another block
if _, ok := err.(RuleError); !ok {
if !errors.As(err, &RuleError{}) {
return err
}
}
@@ -1889,13 +2020,9 @@ type Config struct {
// This field is required.
DAGParams *dagconfig.Params
// TimeSource defines the median time source to use for things such as
// TimeSource defines the time source to use for things such as
// block processing and determining whether or not the DAG is current.
//
// The caller is expected to keep a reference to the time source as well
// and add time samples from other peers on the network so the local
// time is adjusted to be in agreement with other peers.
TimeSource MedianTimeSource
TimeSource TimeSource
// SigCache defines a signature cache to use when when validating
// signatures. This is typically most useful when individual
@@ -1962,6 +2089,7 @@ func New(config *Config) (*BlockDAG, error) {
dag.virtual = newVirtualBlock(dag, nil)
dag.utxoDiffStore = newUTXODiffStore(dag)
dag.reachabilityStore = newReachabilityStore(dag)
dag.multisetStore = newMultisetStore(dag)
// Initialize the DAG state from the passed database. When the db
// does not yet contain any DAG state, both it and the DAG state

View File

@@ -6,6 +6,7 @@ package blockdag
import (
"fmt"
"github.com/pkg/errors"
"os"
"path/filepath"
"testing"
@@ -72,8 +73,8 @@ func TestBlockCount(t *testing.T) {
}
}
// TestHaveBlock tests the HaveBlock API to ensure proper functionality.
func TestHaveBlock(t *testing.T) {
// TestIsKnownBlock tests the IsKnownBlock API to ensure proper functionality.
func TestIsKnownBlock(t *testing.T) {
// Load up blocks such that there is a fork in the DAG.
// (genesis block) -> 1 -> 2 -> 3 -> 4
// \-> 3b
@@ -164,12 +165,13 @@ func TestHaveBlock(t *testing.T) {
if err == nil {
t.Fatalf("ProcessBlock for block 3D has no error when expected to have an error\n")
}
rErr, ok := err.(RuleError)
var ruleErr RuleError
ok := errors.As(err, &ruleErr)
if !ok {
t.Fatalf("ProcessBlock for block 3D expected a RuleError, but got %v\n", err)
}
if !ok || rErr.ErrorCode != ErrDuplicateTxInputs {
t.Fatalf("ProcessBlock for block 3D expected error code %s but got %s\n", ErrDuplicateTxInputs, rErr.ErrorCode)
if !ok || ruleErr.ErrorCode != ErrDuplicateTxInputs {
t.Fatalf("ProcessBlock for block 3D expected error code %s but got %s\n", ErrDuplicateTxInputs, ruleErr.ErrorCode)
}
if isDelayed {
t.Fatalf("ProcessBlock: block 3D " +
@@ -202,7 +204,7 @@ func TestHaveBlock(t *testing.T) {
{hash: dagconfig.SimnetParams.GenesisHash.String(), want: true},
// Block 3b should be present (as a second child of Block 2).
{hash: "264176fb6072e2362db18f92d3f4b739cff071a206736df7c407c0bf9a1d7fef", want: true},
{hash: "216301e3fc03cf89973b9192b4ecdd732bf3b677cf1ca4f6c340a56f1533fb4f", want: true},
// Block 100000 should be present (as an orphan).
{hash: "65b20b048a074793ebfd1196e49341c8d194dabfc6b44a4fd0c607406e122baf", want: true},
@@ -217,9 +219,9 @@ func TestHaveBlock(t *testing.T) {
t.Fatalf("NewHashFromStr: %v", err)
}
result := dag.HaveBlock(hash)
result := dag.IsKnownBlock(hash)
if result != test.want {
t.Fatalf("HaveBlock #%d got %v want %v", i, result,
t.Fatalf("IsKnownBlock #%d got %v want %v", i, result,
test.want)
}
}
@@ -241,9 +243,9 @@ func TestCalcSequenceLock(t *testing.T) {
numBlocksToGenerate := 5
for i := 0; i < numBlocksToGenerate; i++ {
blockTime = blockTime.Add(time.Second)
node = newTestNode(dag, setFromSlice(node), blockVersion, 0, blockTime)
node = newTestNode(dag, blockSetFromSlice(node), blockVersion, 0, blockTime)
dag.index.AddNode(node)
dag.virtual.SetTips(setFromSlice(node))
dag.virtual.SetTips(blockSetFromSlice(node))
}
// Create a utxo view with a fake utxo for the inputs used in the
@@ -509,7 +511,7 @@ func TestCalcPastMedianTime(t *testing.T) {
blockTime := dag.genesis.Header().Timestamp
for i := uint32(1); i < numBlocks; i++ {
blockTime = blockTime.Add(time.Second)
nodes[i] = newTestNode(dag, setFromSlice(nodes[i-1]), blockVersion, 0, blockTime)
nodes[i] = newTestNode(dag, blockSetFromSlice(nodes[i-1]), blockVersion, 0, blockTime)
dag.index.AddNode(nodes[i])
}
@@ -544,15 +546,9 @@ func TestCalcPastMedianTime(t *testing.T) {
}
func TestNew(t *testing.T) {
// Create the root directory for test databases.
if !FileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
t.Fatalf("unable to create test db "+
"root: %s", err)
}
}
tempDir := os.TempDir()
dbPath := filepath.Join(testDbRoot, "TestNew")
dbPath := filepath.Join(tempDir, "TestNew")
_ = os.RemoveAll(dbPath)
db, err := database.Create(testDbType, dbPath, blockDataNet)
if err != nil {
@@ -561,12 +557,11 @@ func TestNew(t *testing.T) {
defer func() {
db.Close()
os.RemoveAll(dbPath)
os.RemoveAll(testDbRoot)
}()
config := &Config{
DAGParams: &dagconfig.SimnetParams,
DB: db,
TimeSource: NewMedianTime(),
TimeSource: NewTimeSource(),
SigCache: txscript.NewSigCache(1000),
}
_, err = New(config)
@@ -590,16 +585,10 @@ func TestNew(t *testing.T) {
// occur when the node shuts down improperly while a block is being
// validated.
func TestAcceptingInInit(t *testing.T) {
// Create the root directory for test databases.
if !FileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
t.Fatalf("unable to create test db "+
"root: %s", err)
}
}
tempDir := os.TempDir()
// Create a test database
dbPath := filepath.Join(testDbRoot, "TestAcceptingInInit")
dbPath := filepath.Join(tempDir, "TestAcceptingInInit")
_ = os.RemoveAll(dbPath)
db, err := database.Create(testDbType, dbPath, blockDataNet)
if err != nil {
@@ -608,14 +597,13 @@ func TestAcceptingInInit(t *testing.T) {
defer func() {
db.Close()
os.RemoveAll(dbPath)
os.RemoveAll(testDbRoot)
}()
// Create a DAG to add the test block into
config := &Config{
DAGParams: &dagconfig.SimnetParams,
DB: db,
TimeSource: NewMedianTime(),
TimeSource: NewTimeSource(),
SigCache: txscript.NewSigCache(1000),
}
dag, err := New(config)
@@ -633,7 +621,7 @@ func TestAcceptingInInit(t *testing.T) {
// Create a test blockNode with an unvalidated status
genesisNode := dag.index.LookupNode(genesisBlock.Hash())
testNode, _ := dag.newBlockNode(&testBlock.MsgBlock().Header, setFromSlice(genesisNode))
testNode, _ := dag.newBlockNode(&testBlock.MsgBlock().Header, blockSetFromSlice(genesisNode))
testNode.status = statusDataStored
// Manually add the test block to the database
@@ -922,7 +910,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
addNode := func(parent *blockNode) *blockNode {
blockTime = blockTime.Add(time.Second)
node := newTestNode(dag, setFromSlice(parent), blockVersion, 0, blockTime)
node := newTestNode(dag, blockSetFromSlice(parent), blockVersion, 0, blockTime)
node.updateParentsChildren()
dag.index.AddNode(node)
@@ -938,7 +926,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
nodes := make([]*blockNode, 0, finalityInterval)
currentNode := dag.genesis
nodes = append(nodes, currentNode)
for i := 0; i <= finalityInterval*2; i++ {
for i := uint64(0); i <= finalityInterval*2; i++ {
currentNode = addNode(currentNode)
nodes = append(nodes, currentNode)
}
@@ -1008,7 +996,7 @@ func TestDAGIndexFailedStatus(t *testing.T) {
invalidBlock := util.NewBlock(invalidMsgBlock)
isOrphan, isDelayed, err := dag.ProcessBlock(invalidBlock, BFNoPoWCheck)
if _, ok := err.(RuleError); !ok {
if !errors.As(err, &RuleError{}) {
t.Fatalf("ProcessBlock: expected a rule error but got %s instead", err)
}
if isDelayed {
@@ -1037,7 +1025,8 @@ func TestDAGIndexFailedStatus(t *testing.T) {
invalidBlockChild := util.NewBlock(invalidMsgBlockChild)
isOrphan, isDelayed, err = dag.ProcessBlock(invalidBlockChild, BFNoPoWCheck)
if rErr, ok := err.(RuleError); !ok || rErr.ErrorCode != ErrInvalidAncestorBlock {
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); !ok || ruleErr.ErrorCode != ErrInvalidAncestorBlock {
t.Fatalf("ProcessBlock: expected a rule error but got %s instead", err)
}
if isDelayed {
@@ -1065,7 +1054,7 @@ func TestDAGIndexFailedStatus(t *testing.T) {
invalidBlockGrandChild := util.NewBlock(invalidMsgBlockGrandChild)
isOrphan, isDelayed, err = dag.ProcessBlock(invalidBlockGrandChild, BFNoPoWCheck)
if rErr, ok := err.(RuleError); !ok || rErr.ErrorCode != ErrInvalidAncestorBlock {
if ok := errors.As(err, &ruleErr); !ok || ruleErr.ErrorCode != ErrInvalidAncestorBlock {
t.Fatalf("ProcessBlock: expected a rule error but got %s instead", err)
}
if isDelayed {
@@ -1084,3 +1073,18 @@ func TestDAGIndexFailedStatus(t *testing.T) {
t.Fatalf("invalidBlockGrandChildNode status to have %b flags raised (got %b)", statusInvalidAncestor, invalidBlockGrandChildNode.status)
}
}
func TestIsDAGCurrentMaxDiff(t *testing.T) {
netParams := []*dagconfig.Params{
&dagconfig.MainnetParams,
&dagconfig.TestnetParams,
&dagconfig.DevnetParams,
&dagconfig.RegressionNetParams,
&dagconfig.SimnetParams,
}
for _, params := range netParams {
if params.TargetTimePerBlock*time.Duration(params.FinalityInterval) < isDAGCurrentMaxDiff {
t.Errorf("in %s, a DAG can be considered current even if it's below the finality point", params.Name)
}
}
}

View File

@@ -10,6 +10,7 @@ import (
"encoding/json"
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/buffers"
"github.com/pkg/errors"
"io"
"sync"
@@ -58,6 +59,10 @@ var (
// reachability tree nodes and future covering sets of blocks.
reachabilityDataBucketName = []byte("reachability")
// multisetBucketName is the name of the database bucket used to house the
// ECMH multisets of blocks.
multisetBucketName = []byte("multiset")
// subnetworksBucketName is the name of the database bucket used to store the
// subnetwork registry.
subnetworksBucketName = []byte("subnetworks")
@@ -83,24 +88,8 @@ func (e errNotInDAG) Error() string {
// isNotInDAGErr returns whether or not the passed error is an
// errNotInDAG error.
func isNotInDAGErr(err error) bool {
_, ok := err.(errNotInDAG)
return ok
}
// errDeserialize signifies that a problem was encountered when deserializing
// data.
type errDeserialize string
// Error implements the error interface.
func (e errDeserialize) Error() string {
return string(e)
}
// isDeserializeErr returns whether or not the passed error is an errDeserialize
// error.
func isDeserializeErr(err error) bool {
_, ok := err.(errDeserialize)
return ok
var notInDAGErr errNotInDAG
return errors.As(err, &notInDAGErr)
}
// dbPutVersion uses an existing database transaction to update the provided
@@ -112,115 +101,46 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
return dbTx.Metadata().Put(key, serialized[:])
}
// -----------------------------------------------------------------------------
// The unspent transaction output (UTXO) set consists of an entry for each
// unspent output using a format that is optimized to reduce space using domain
// specific compression algorithms.
//
// Each entry is keyed by an outpoint as specified below. It is important to
// note that the key encoding uses a VLQ, which employs an MSB encoding so
// iteration of UTXOs when doing byte-wise comparisons will produce them in
// order.
//
// The serialized key format is:
// <hash><output index>
//
// Field Type Size
// hash daghash.Hash daghash.HashSize
// output index VLQ variable
//
// The serialized value format is:
//
// <header code><compressed txout>
//
// Field Type Size
// header code VLQ variable
// compressed txout
// compressed amount VLQ variable
// compressed script []byte variable
//
// The serialized header code format is:
// bit 0 - containing transaction is a coinbase
// bits 1-x - height of the block that contains the unspent txout
//
// Example 1:
// b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
//
// 03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
// <><------------------------------------------------------------------>
// | |
// header code compressed txout
//
// - header code: 0x03 (coinbase, height 1)
// - compressed txout:
// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 KAS)
// - 0x04: special script type pay-to-pubkey
// - 0x96...52: x-coordinate of the pubkey
//
// Example 2:
// 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2
//
// 8cf316800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
// <----><------------------------------------------>
// | |
// header code compressed txout
//
// - header code: 0x8cf316 (not coinbase, height 113931)
// - compressed txout:
// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 KAS)
// - 0x00: special script type pay-to-pubkey-hash
// - 0xb8...58: pubkey hash
//
// Example 3:
// 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22
//
// a8a2588ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
// <----><-------------------------------------------------->
// | |
// header code compressed txout
//
// - header code: 0xa8a258 (not coinbase, height 338156)
// - compressed txout:
// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 KAS)
// - 0x01: special script type pay-to-script-hash
// - 0x1d...e6: script hash
// -----------------------------------------------------------------------------
// maxUint32VLQSerializeSize is the maximum number of bytes a max uint32 takes
// to serialize as a VLQ.
var maxUint32VLQSerializeSize = serializeSizeVLQ(1<<32 - 1)
// outpointKeyPool defines a concurrent safe free list of byte slices used to
// outpointKeyPool defines a concurrent safe free list of byte buffers used to
// provide temporary buffers for outpoint database keys.
var outpointKeyPool = sync.Pool{
New: func() interface{} {
b := make([]byte, daghash.HashSize+maxUint32VLQSerializeSize)
return &b // Pointer to slice to avoid boxing alloc.
return &bytes.Buffer{} // Pointer to a buffer to avoid boxing alloc.
},
}
// outpointKey returns a key suitable for use as a database key in the UTXO set
// while making use of a free list. A new buffer is allocated if there are not
// already any available on the free list. The returned byte slice should be
// returned to the free list by using the recycleOutpointKey function when the
// caller is done with it _unless_ the slice will need to live for longer than
// the caller can calculate such as when used to write to the database.
func outpointKey(outpoint wire.Outpoint) *[]byte {
// A VLQ employs an MSB encoding, so they are useful not only to reduce
// the amount of storage space, but also so iteration of UTXOs when
// doing byte-wise comparisons will produce them in order.
key := outpointKeyPool.Get().(*[]byte)
idx := uint64(outpoint.Index)
*key = (*key)[:daghash.HashSize+serializeSizeVLQ(idx)]
copy(*key, outpoint.TxID[:])
putVLQ((*key)[daghash.HashSize:], idx)
return key
// outpointIndexByteOrder is the byte order for serializing the outpoint index.
// It uses big endian to ensure that when outpoint is used as database key, the
// keys will be iterated in an ascending order by the outpoint index.
var outpointIndexByteOrder = binary.BigEndian
func serializeOutpoint(w io.Writer, outpoint *wire.Outpoint) error {
_, err := w.Write(outpoint.TxID[:])
if err != nil {
return err
}
return binaryserializer.PutUint32(w, outpointIndexByteOrder, outpoint.Index)
}
// recycleOutpointKey puts the provided byte slice, which should have been
// obtained via the outpointKey function, back on the free list.
func recycleOutpointKey(key *[]byte) {
outpointKeyPool.Put(key)
var outpointSerializeSize = daghash.TxIDSize + 4
// deserializeOutpoint decodes an outpoint from the passed serialized byte
// slice into a new wire.Outpoint using a format that is suitable for long-
// term storage. This format is described in detail above.
func deserializeOutpoint(r io.Reader) (*wire.Outpoint, error) {
outpoint := &wire.Outpoint{}
_, err := r.Read(outpoint.TxID[:])
if err != nil {
return nil, err
}
outpoint.Index, err = binaryserializer.Uint32(r, outpointIndexByteOrder)
if err != nil {
return nil, err
}
return outpoint, nil
}
// dbPutUTXODiff uses an existing database transaction to update the UTXO set
@@ -230,20 +150,42 @@ func recycleOutpointKey(key *[]byte) {
func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
for outpoint := range diff.toRemove {
key := outpointKey(outpoint)
err := utxoBucket.Delete(*key)
recycleOutpointKey(key)
w := outpointKeyPool.Get().(*bytes.Buffer)
w.Reset()
err := serializeOutpoint(w, &outpoint)
if err != nil {
return err
}
key := w.Bytes()
err = utxoBucket.Delete(key)
if err != nil {
return err
}
outpointKeyPool.Put(w)
}
// We are preallocating for P2PKH entries because they are the most common ones.
// If we have entries with a compressed script bigger than P2PKH's, the buffer will grow.
bytesToPreallocate := (p2pkhUTXOEntrySerializeSize + outpointSerializeSize) * len(diff.toAdd)
buff := bytes.NewBuffer(make([]byte, bytesToPreallocate))
for outpoint, entry := range diff.toAdd {
// Serialize and store the UTXO entry.
serialized := serializeUTXOEntry(entry)
sBuff := buffers.NewSubBuffer(buff)
err := serializeUTXOEntry(sBuff, entry)
if err != nil {
return err
}
serializedEntry := sBuff.Bytes()
key := outpointKey(outpoint)
err := utxoBucket.Put(*key, serialized)
sBuff = buffers.NewSubBuffer(buff)
err = serializeOutpoint(sBuff, &outpoint)
if err != nil {
return err
}
key := sBuff.Bytes()
err = utxoBucket.Put(key, serializedEntry)
// NOTE: The key is intentionally not recycled here since the
// database interface contract prohibits modifications. It will
// be garbage collected normally when the database is done with
@@ -327,6 +269,11 @@ func (dag *BlockDAG) createDAGState() error {
return err
}
_, err = meta.CreateBucket(multisetBucketName)
if err != nil {
return err
}
err = dbPutVersion(dbTx, utxoSetVersionKeyName,
latestUTXOSetBucketVersion)
if err != nil {
@@ -382,6 +329,11 @@ func (dag *BlockDAG) removeDAGState() error {
return err
}
err = meta.DeleteBucket(multisetBucketName)
if err != nil {
return err
}
err = dbTx.Metadata().Delete(utxoSetVersionKeyName)
if err != nil {
return err
@@ -532,32 +484,14 @@ func (dag *BlockDAG) initDAGState() error {
fullUTXOCollection := make(utxoCollection, utxoEntryCount)
for ok := cursor.First(); ok; ok = cursor.Next() {
// Deserialize the outpoint
outpoint, err := deserializeOutpoint(cursor.Key())
outpoint, err := deserializeOutpoint(bytes.NewReader(cursor.Key()))
if err != nil {
// Ensure any deserialization errors are returned as database
// corruption errors.
if isDeserializeErr(err) {
return database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt outpoint: %s", err),
}
}
return err
}
// Deserialize the utxo entry
entry, err := deserializeUTXOEntry(cursor.Value())
entry, err := deserializeUTXOEntry(bytes.NewReader(cursor.Value()))
if err != nil {
// Ensure any deserialization errors are returned as database
// corruption errors.
if isDeserializeErr(err) {
return database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt utxo entry: %s", err),
}
}
return err
}
@@ -565,11 +499,19 @@ func (dag *BlockDAG) initDAGState() error {
}
// Initialize the reachability store
log.Infof("Loading reachability data...")
err = dag.reachabilityStore.init(dbTx)
if err != nil {
return err
}
// Initialize the multiset store
log.Infof("Loading multiset data...")
err = dag.multisetStore.init(dbTx)
if err != nil {
return err
}
// Apply the loaded utxoCollection to the virtual block.
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
if err != nil {
@@ -577,7 +519,7 @@ func (dag *BlockDAG) initDAGState() error {
}
// Apply the stored tips to the virtual block.
tips := newSet()
tips := newBlockSet()
for _, tipHash := range state.TipHashes {
tip := dag.index.LookupNode(tipHash)
if tip == nil {
@@ -608,6 +550,9 @@ func (dag *BlockDAG) initDAGState() error {
// Attempt to accept the block.
block, err := dbFetchBlockByNode(dbTx, node)
if err != nil {
return err
}
isOrphan, isDelayed, err := dag.ProcessBlock(block, BFWasStored)
if err != nil {
log.Warnf("Block %s, which was not previously processed, "+
@@ -654,8 +599,8 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
utxoCommitment: header.UTXOCommitment,
}
node.children = newSet()
node.parents = newSet()
node.children = newBlockSet()
node.parents = newBlockSet()
for _, hash := range header.ParentHashes {
parent := dag.index.LookupNode(hash)
@@ -706,17 +651,21 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
return nil, err
}
node.bluesAnticoneSizes = make(map[daghash.Hash]dagconfig.KType)
node.bluesAnticoneSizes = make(map[*blockNode]dagconfig.KType)
for i := uint64(0); i < bluesAnticoneSizesLen; i++ {
hash := &daghash.Hash{}
if _, err := io.ReadFull(buffer, hash[:]); err != nil {
return nil, err
}
bluesAnticoneSize, err := binaryserializer.Uint8(buffer)
node.bluesAnticoneSizes[*hash] = dagconfig.KType(bluesAnticoneSize)
if err != nil {
return nil, err
}
blue := dag.index.LookupNode(hash)
if blue == nil {
return nil, errors.Errorf("couldn't find block with hash %s", hash)
}
node.bluesAnticoneSizes[blue] = dagconfig.KType(bluesAnticoneSize)
}
return node, nil
@@ -784,8 +733,8 @@ func serializeBlockNode(node *blockNode) ([]byte, error) {
if err != nil {
return nil, err
}
for blockHash, blueAnticoneSize := range node.bluesAnticoneSizes {
_, err = w.Write(blockHash[:])
for blue, blueAnticoneSize := range node.bluesAnticoneSizes {
_, err = w.Write(blue.hash[:])
if err != nil {
return nil, err
}
@@ -873,7 +822,7 @@ func (dag *BlockDAG) BlockHashesFrom(lowHash *daghash.Hash, limit int) ([]*dagha
// genesis hash in the result
blockHashes = append(blockHashes, dag.genesis.hash)
}
if !dag.BlockExists(lowHash) {
if !dag.IsInDAG(lowHash) {
return nil, errors.Errorf("block %s not found", lowHash)
}
blueScore, err := dag.BlueScoreByBlockHash(lowHash)

View File

@@ -6,6 +6,7 @@ package blockdag
import (
"bytes"
"encoding/hex"
"github.com/pkg/errors"
"reflect"
"testing"
@@ -36,9 +37,21 @@ func TestErrNotInDAG(t *testing.T) {
}
}
// TestUtxoSerialization ensures serializing and deserializing unspent
// hexToBytes converts the passed hex string into bytes and will panic if there
// is an error. This is only provided for the hard-coded constants so errors in
// the source code can be detected. It will only (and must only) be called with
// hard-coded values.
func hexToBytes(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
panic("invalid hex in source file: " + s)
}
return b
}
// TestUTXOSerialization ensures serializing and deserializing unspent
// trasaction output entries works as expected.
func TestUtxoSerialization(t *testing.T) {
func TestUTXOSerialization(t *testing.T) {
t.Parallel()
tests := []struct {
@@ -54,7 +67,7 @@ func TestUtxoSerialization(t *testing.T) {
blockBlueScore: 1,
packedFlags: tfCoinbase,
},
serialized: hexToBytes("03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"),
serialized: hexToBytes("030000000000000000f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
},
{
name: "blue score 100001, not coinbase",
@@ -64,13 +77,21 @@ func TestUtxoSerialization(t *testing.T) {
blockBlueScore: 100001,
packedFlags: 0,
},
serialized: hexToBytes("8b99420700ee8bd501094a7d5ca318da2506de35e1cb025ddc"),
serialized: hexToBytes("420d03000000000040420f00000000001976a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
},
}
for i, test := range tests {
// Ensure the utxo entry serializes to the expected value.
gotBytes := serializeUTXOEntry(test.entry)
w := &bytes.Buffer{}
err := serializeUTXOEntry(w, test.entry)
if err != nil {
t.Errorf("serializeUTXOEntry #%d (%s) unexpected "+
"error: %v", i, test.name, err)
continue
}
gotBytes := w.Bytes()
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("serializeUTXOEntry #%d (%s): mismatched "+
"bytes - got %x, want %x", i, test.name,
@@ -78,8 +99,8 @@ func TestUtxoSerialization(t *testing.T) {
continue
}
// Deserialize to a utxo entry.
utxoEntry, err := deserializeUTXOEntry(test.serialized)
// Deserialize to a utxo entry.gotBytes
utxoEntry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
if err != nil {
t.Errorf("deserializeUTXOEntry #%d (%s) unexpected "+
"error: %v", i, test.name, err)
@@ -124,28 +145,24 @@ func TestUtxoEntryDeserializeErrors(t *testing.T) {
tests := []struct {
name string
serialized []byte
errType error
}{
{
name: "no data after header code",
serialized: hexToBytes("02"),
errType: errDeserialize(""),
},
{
name: "incomplete compressed txout",
serialized: hexToBytes("0232"),
errType: errDeserialize(""),
},
}
for _, test := range tests {
// Ensure the expected error type is returned and the returned
// entry is nil.
entry, err := deserializeUTXOEntry(test.serialized)
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
t.Errorf("deserializeUTXOEntry (%s): expected error "+
"type does not match - got %T, want %T",
test.name, err, test.errType)
entry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
if err == nil {
t.Errorf("deserializeUTXOEntry (%s): didn't return an error",
test.name)
continue
}
if entry != nil {
@@ -248,12 +265,13 @@ func TestDAGStateDeserializeErrors(t *testing.T) {
test.name, err, test.errType)
continue
}
if derr, ok := err.(database.Error); ok {
var dbErr database.Error
if ok := errors.As(err, &dbErr); ok {
tderr := test.errType.(database.Error)
if derr.ErrorCode != tderr.ErrorCode {
if dbErr.ErrorCode != tderr.ErrorCode {
t.Errorf("deserializeDAGState (%s): "+
"wrong error code got: %v, want: %v",
test.name, derr.ErrorCode,
test.name, dbErr.ErrorCode,
tderr.ErrorCode)
continue
}

View File

@@ -94,7 +94,8 @@ func TestDifficulty(t *testing.T) {
addNode := func(parents blockSet, blockTime time.Time) *blockNode {
bluestParent := parents.bluest()
if blockTime == zeroTime {
blockTime = time.Unix(bluestParent.timestamp+1, 0)
blockTime = time.Unix(bluestParent.timestamp, 0)
blockTime = blockTime.Add(params.TargetTimePerBlock)
}
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
if err != nil {
@@ -117,30 +118,32 @@ func TestDifficulty(t *testing.T) {
}
tip := dag.genesis
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != dag.genesis.bits {
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment window size, the difficulty should be the same as genesis'")
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment " +
"window size, the difficulty should be the same as genesis'")
}
}
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize+100; i++ {
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != dag.genesis.bits {
t.Fatalf("As long as the block rate remains the same, the difficulty shouldn't change")
}
}
nodeInThePast := addNode(setFromSlice(tip), tip.PastMedianTime(dag))
nodeInThePast := addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
if nodeInThePast.bits != tip.bits {
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
}
tip = nodeInThePast
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != nodeInThePast.bits {
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
}
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if compareBits(tip.bits, nodeInThePast.bits) >= 0 {
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the block rate, so the difficulty should increase as well")
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the " +
"block rate, so the difficulty should increase as well")
}
expectedBits := uint32(0x207f83df)
if tip.bits != expectedBits {
@@ -149,7 +152,7 @@ func TestDifficulty(t *testing.T) {
// Increase block rate to increase difficulty
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
tip = addNode(setFromSlice(tip), tip.PastMedianTime(dag))
tip = addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
if compareBits(tip.bits, tip.parents.bluest().bits) > 0 {
t.Fatalf("Because we're increasing the block rate, the difficulty can't decrease")
}
@@ -159,7 +162,7 @@ func TestDifficulty(t *testing.T) {
lastBits := tip.bits
sameBitsCount := uint64(0)
for sameBitsCount < dag.difficultyAdjustmentWindowSize+1 {
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits == lastBits {
sameBitsCount++
} else {
@@ -167,37 +170,41 @@ func TestDifficulty(t *testing.T) {
sameBitsCount = 0
}
}
slowNode := addNode(setFromSlice(tip), time.Unix(tip.timestamp+2, 0))
slowBlockTime := time.Unix(tip.timestamp, 0)
slowBlockTime = slowBlockTime.Add(params.TargetTimePerBlock + time.Second)
slowNode := addNode(blockSetFromSlice(tip), slowBlockTime)
if slowNode.bits != tip.bits {
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
}
tip = slowNode
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != slowNode.bits {
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
}
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if compareBits(tip.bits, slowNode.bits) <= 0 {
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block rate, so the difficulty should decrease as well")
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block" +
" rate, so the difficulty should decrease as well")
}
splitNode := addNode(setFromSlice(tip), zeroTime)
splitNode := addNode(blockSetFromSlice(tip), zeroTime)
tip = splitNode
for i := 0; i < 100; i++ {
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
}
blueTip := tip
redChainTip := splitNode
for i := 0; i < 10; i++ {
redChainTip = addNode(setFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
redChainTip = addNode(blockSetFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
}
tipWithRedPast := addNode(setFromSlice(redChainTip, blueTip), zeroTime)
tipWithoutRedPast := addNode(setFromSlice(blueTip), zeroTime)
tipWithRedPast := addNode(blockSetFromSlice(redChainTip, blueTip), zeroTime)
tipWithoutRedPast := addNode(blockSetFromSlice(blueTip), zeroTime)
if tipWithoutRedPast.bits != tipWithRedPast.bits {
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks shouldn't affect the difficulty")
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks" +
" shouldn't affect the difficulty")
}
}

View File

@@ -217,6 +217,10 @@ const (
// ErrInvalidParentsRelation indicates that one of the parents of a block
// is also an ancestor of another parent
ErrInvalidParentsRelation
// ErrDelayedBlockIsNotAllowed indicates that a block with a delayed timestamp was
// submitted with BFDisallowDelay flag raised.
ErrDelayedBlockIsNotAllowed
)
// Map of ErrorCode values back to their constant names for pretty printing.
@@ -264,6 +268,7 @@ var errorCodeStrings = map[ErrorCode]string{
ErrInvalidPayload: "ErrInvalidPayload",
ErrInvalidPayloadHash: "ErrInvalidPayloadHash",
ErrInvalidParentsRelation: "ErrInvalidParentsRelation",
ErrDelayedBlockIsNotAllowed: "ErrDelayedBlockIsNotAllowed",
}
// String returns the ErrorCode as a human-readable name.

View File

@@ -58,6 +58,7 @@ func TestErrorCodeStringer(t *testing.T) {
{ErrInvalidPayload, "ErrInvalidPayload"},
{ErrInvalidPayloadHash, "ErrInvalidPayloadHash"},
{ErrInvalidParentsRelation, "ErrInvalidParentsRelation"},
{ErrDelayedBlockIsNotAllowed, "ErrDelayedBlockIsNotAllowed"},
{0xffff, "Unknown ErrorCode (65535)"},
}

View File

@@ -73,7 +73,7 @@ func TestFinality(t *testing.T) {
currentNode := genesis
// First we build a chain of params.FinalityInterval blocks for future use
for i := 0; i < params.FinalityInterval; i++ {
for i := uint64(0); i < params.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -85,7 +85,7 @@ func TestFinality(t *testing.T) {
// Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and
// we expect the block with height 1 * params.FinalityInterval to be the last finality point
currentNode = genesis
for i := 0; i < params.FinalityInterval; i++ {
for i := uint64(0); i < params.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -94,7 +94,7 @@ func TestFinality(t *testing.T) {
expectedFinalityPoint := currentNode
for i := 0; i < params.FinalityInterval; i++ {
for i := uint64(0); i < params.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -137,10 +137,10 @@ func TestFinality(t *testing.T) {
if err == nil {
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
}
rErr, ok := err.(blockdag.RuleError)
if ok {
if rErr.ErrorCode != blockdag.ErrFinality {
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
var ruleErr blockdag.RuleError
if errors.As(err, &ruleErr) {
if ruleErr.ErrorCode != blockdag.ErrFinality {
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
}
} else {
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", err)
@@ -152,13 +152,12 @@ func TestFinality(t *testing.T) {
if err == nil {
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
}
rErr, ok = err.(blockdag.RuleError)
if ok {
if rErr.ErrorCode != blockdag.ErrFinality {
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
if errors.As(err, &ruleErr) {
if ruleErr.ErrorCode != blockdag.ErrFinality {
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
}
} else {
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", rErr)
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", ruleErr)
}
}
@@ -167,9 +166,17 @@ func TestFinality(t *testing.T) {
// a getblocks message it should always be able to send
// all the necessary invs.
func TestFinalityInterval(t *testing.T) {
params := dagconfig.SimnetParams
if params.FinalityInterval > wire.MaxInvPerMsg {
t.Errorf("dagconfig.SimnetParams.FinalityInterval should be lower or equal to wire.MaxInvPerMsg")
netParams := []*dagconfig.Params{
&dagconfig.MainnetParams,
&dagconfig.TestnetParams,
&dagconfig.DevnetParams,
&dagconfig.RegressionNetParams,
&dagconfig.SimnetParams,
}
for _, params := range netParams {
if params.FinalityInterval > wire.MaxInvPerMsg {
t.Errorf("FinalityInterval in %s should be lower or equal to wire.MaxInvPerMsg", params.Name)
}
}
}
@@ -260,21 +267,32 @@ func TestChainedTransactions(t *testing.T) {
}
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
block2, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true)
block2, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx}, false)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add a chained transaction to block2
block2.Transactions = append(block2.Transactions, chainedTx)
block2UtilTxs := make([]*util.Tx, len(block2.Transactions))
for i, tx := range block2.Transactions {
block2UtilTxs[i] = util.NewTx(tx)
}
block2.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(block2UtilTxs).Root()
//Checks that dag.ProcessBlock fails because we don't allow a transaction to spend another transaction from the same block
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
if err == nil {
t.Errorf("ProcessBlock expected an error")
} else if rErr, ok := err.(blockdag.RuleError); ok {
if rErr.ErrorCode != blockdag.ErrMissingTxOut {
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, rErr.ErrorCode)
}
} else {
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
var ruleErr blockdag.RuleError
if ok := errors.As(err, &ruleErr); ok {
if ruleErr.ErrorCode != blockdag.ErrMissingTxOut {
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, ruleErr.ErrorCode)
}
} else {
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
}
}
if isDelayed {
t.Fatalf("ProcessBlock: block2 " +
@@ -468,11 +486,11 @@ func TestGasLimit(t *testing.T) {
if err == nil {
t.Fatalf("ProcessBlock expected to have an error in block that exceeds gas limit")
}
rErr, ok := err.(blockdag.RuleError)
if !ok {
var ruleErr blockdag.RuleError
if !errors.As(err, &ruleErr) {
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
} else if ruleErr.ErrorCode != blockdag.ErrInvalidGas {
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, ruleErr.ErrorCode)
}
if isDelayed {
t.Fatalf("ProcessBlock: overLimitBlock " +
@@ -503,15 +521,18 @@ func TestGasLimit(t *testing.T) {
if err == nil {
t.Fatalf("ProcessBlock expected to have an error")
}
rErr, ok = err.(blockdag.RuleError)
if !ok {
if !errors.As(err, &ruleErr) {
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
} else if ruleErr.ErrorCode != blockdag.ErrInvalidGas {
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, ruleErr.ErrorCode)
}
if isOrphan {
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
}
if isDelayed {
t.Fatalf("ProcessBlock: overflowGasBlock " +
"is too far in the future")
}
nonExistentSubnetwork := &subnetworkid.SubnetworkID{123}
nonExistentSubnetworkTxIn := &wire.TxIn{
@@ -538,6 +559,13 @@ func TestGasLimit(t *testing.T) {
if err.Error() != expectedErrStr {
t.Fatalf("ProcessBlock expected error \"%v\" but got \"%v\"", expectedErrStr, err)
}
if isDelayed {
t.Fatalf("ProcessBlock: nonExistentSubnetworkBlock " +
"is too far in the future")
}
if isOrphan {
t.Fatalf("ProcessBlock: nonExistentSubnetworkBlock got unexpectedly orphan")
}
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit
validBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1}, true)

View File

@@ -27,7 +27,7 @@ import (
// For further details see the article https://eprint.iacr.org/2018/104.pdf
func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blockNode, err error) {
newNode.selectedParent = newNode.parents.bluest()
newNode.bluesAnticoneSizes[*newNode.selectedParent.hash] = 0
newNode.bluesAnticoneSizes[newNode.selectedParent] = 0
newNode.blues = []*blockNode{newNode.selectedParent}
selectedParentAnticone, err = dag.selectedParentAnticone(newNode)
if err != nil {
@@ -102,9 +102,9 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
if possiblyBlue {
// No k-cluster violation found, we can now set the candidate block as blue
newNode.blues = append(newNode.blues, blueCandidate)
newNode.bluesAnticoneSizes[*blueCandidate.hash] = candidateAnticoneSize
newNode.bluesAnticoneSizes[blueCandidate] = candidateAnticoneSize
for blue, blueAnticoneSize := range candidateBluesAnticoneSizes {
newNode.bluesAnticoneSizes[*blue.hash] = blueAnticoneSize + 1
newNode.bluesAnticoneSizes[blue] = blueAnticoneSize + 1
}
// The maximum length of node.blues can be K+1 because
@@ -126,9 +126,9 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
// we check whether it is in the past of the selected parent.
// If not, we add the node to the resulting anticone-set and queue it for processing.
func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, error) {
anticoneSet := newSet()
anticoneSet := newBlockSet()
var anticoneSlice []*blockNode
selectedParentPast := newSet()
selectedParentPast := newBlockSet()
var queue []*blockNode
// Queueing all parents (other than the selected parent itself) for processing.
for parent := range node.parents {
@@ -168,7 +168,7 @@ func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, erro
// Expects 'block' to be in the blue set of 'context'
func (dag *BlockDAG) blueAnticoneSize(block, context *blockNode) (dagconfig.KType, error) {
for current := context; current != nil; current = current.selectedParent {
if blueAnticoneSize, ok := current.bluesAnticoneSizes[*block.hash]; ok {
if blueAnticoneSize, ok := current.bluesAnticoneSizes[block]; ok {
return blueAnticoneSize, nil
}
}

View File

@@ -3,9 +3,12 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"reflect"
"sort"
"strings"
"testing"
)
@@ -30,7 +33,7 @@ func TestGHOSTDAG(t *testing.T) {
}{
{
k: 3,
expectedReds: []string{"F", "G", "H", "I", "O", "P"},
expectedReds: []string{"F", "G", "H", "I", "N", "Q"},
dagData: []*testBlockData{
{
parents: []string{"A"},
@@ -163,7 +166,7 @@ func TestGHOSTDAG(t *testing.T) {
id: "T",
expectedScore: 13,
expectedSelectedParent: "S",
expectedBlues: []string{"S", "N", "Q"},
expectedBlues: []string{"S", "O", "P"},
},
},
},
@@ -276,3 +279,92 @@ func checkReds(expectedReds []string, reds map[string]bool) bool {
}
return true
}
func TestBlueAnticoneSizeErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("TestBlueAnticoneSizeErrors: Failed to setup DAG instance: %s", err)
}
defer teardownFunc()
// Prepare a block chain with size K beginning with the genesis block
currentBlockA := dag.dagParams.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
newBlock := prepareAndProcessBlock(t, dag, currentBlockA)
currentBlockA = newBlock
}
// Prepare another block chain with size K beginning with the genesis block
currentBlockB := dag.dagParams.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
newBlock := prepareAndProcessBlock(t, dag, currentBlockB)
currentBlockB = newBlock
}
// Get references to the tips of the two chains
blockNodeA := dag.index.LookupNode(currentBlockA.BlockHash())
blockNodeB := dag.index.LookupNode(currentBlockB.BlockHash())
// Try getting the blueAnticoneSize between them. Since the two
// blocks are not in the anticones of eachother, this should fail.
_, err = dag.blueAnticoneSize(blockNodeA, blockNodeB)
if err == nil {
t.Fatalf("TestBlueAnticoneSizeErrors: blueAnticoneSize unexpectedly succeeded")
}
expectedErrSubstring := "is not in blue set of"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestBlueAnticoneSizeErrors: blueAnticoneSize returned wrong error. "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
}
func TestGHOSTDAGErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("TestGHOSTDAGErrors: Failed to setup DAG instance: %s", err)
}
defer teardownFunc()
// Add two child blocks to the genesis
block1 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
block2 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
// Add a child block to the previous two blocks
block3 := prepareAndProcessBlock(t, dag, block1, block2)
// Clear the reachability store
dag.reachabilityStore.loaded = map[daghash.Hash]*reachabilityData{}
err = dag.db.Update(func(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
cursor := bucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
err := bucket.Delete(cursor.Key())
if err != nil {
return err
}
}
return nil
})
if err != nil {
t.Fatalf("TestGHOSTDAGErrors: db.Update failed: %s", err)
}
// Try to rerun GHOSTDAG on the last block. GHOSTDAG uses
// reachability data, so we expect it to fail.
blockNode3 := dag.index.LookupNode(block3.BlockHash())
_, err = dag.ghostdag(blockNode3)
if err == nil {
t.Fatalf("TestGHOSTDAGErrors: ghostdag unexpectedly succeeded")
}
expectedErrSubstring := "Couldn't find reachability data"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestGHOSTDAGErrors: ghostdag returned wrong error. "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
}

View File

@@ -1,7 +1,7 @@
indexers
========
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://godoc.org/github.com/kaspanet/kaspad/blockdag/indexers?status.png)](http://godoc.org/github.com/kaspanet/kaspad/blockdag/indexers)
Package indexers implements optional block chain indexes.

View File

@@ -298,16 +298,16 @@ func copyDirectory(scrDir, dest string) error {
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
func copyFile(srcFile, dstFile string) error {
out, err := os.Create(dstFile)
defer out.Close()
if err != nil {
return err
}
defer out.Close()
in, err := os.Open(srcFile)
defer in.Close()
if err != nil {
return err
}
defer in.Close()
_, err = io.Copy(out, in)
if err != nil {

View File

@@ -1,902 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"fmt"
"github.com/pkg/errors"
"sync"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const (
// addrIndexName is the human-readable name for the index.
addrIndexName = "address index"
// level0MaxEntries is the maximum number of transactions that are
// stored in level 0 of an address index entry. Subsequent levels store
// 2^n * level0MaxEntries entries, or in words, double the maximum of
// the previous level.
level0MaxEntries = 8
// addrKeySize is the number of bytes an address key consumes in the
// index. It consists of 1 byte address type + 20 bytes hash160.
addrKeySize = 1 + 20
// levelKeySize is the number of bytes a level key in the address index
// consumes. It consists of the address key + 1 byte for the level.
levelKeySize = addrKeySize + 1
// levelOffset is the offset in the level key which identifes the level.
levelOffset = levelKeySize - 1
// addrKeyTypePubKeyHash is the address type in an address key which
// represents both a pay-to-pubkey-hash and a pay-to-pubkey address.
// This is done because both are identical for the purposes of the
// address index.
addrKeyTypePubKeyHash = 0
// addrKeyTypeScriptHash is the address type in an address key which
// represents a pay-to-script-hash address. This is necessary because
// the hash of a pubkey address might be the same as that of a script
// hash.
addrKeyTypeScriptHash = 1
// Size of a transaction entry. It consists of 8 bytes block id + 4
// bytes offset + 4 bytes length.
txEntrySize = 8 + 4 + 4
)
var (
// addrIndexKey is the key of the address index and the db bucket used
// to house it.
addrIndexKey = []byte("txbyaddridx")
// errUnsupportedAddressType is an error that is used to signal an
// unsupported address type has been used.
errUnsupportedAddressType = errors.New("address type is not supported " +
"by the address index")
)
// -----------------------------------------------------------------------------
// The address index maps addresses referenced in the blockDAG to a list of
// all the transactions involving that address. Transactions are stored
// according to their order of appearance in the blockDAG. That is to say
// first by block height and then by offset inside the block. It is also
// important to note that this implementation requires the transaction index
// since it is needed in order to catch up old blocks due to the fact the spent
// outputs will already be pruned from the utxo set.
//
// The approach used to store the index is similar to a log-structured merge
// tree (LSM tree) and is thus similar to how leveldb works internally.
//
// Every address consists of one or more entries identified by a level starting
// from 0 where each level holds a maximum number of entries such that each
// subsequent level holds double the maximum of the previous one. In equation
// form, the number of entries each level holds is 2^n * firstLevelMaxSize.
//
// New transactions are appended to level 0 until it becomes full at which point
// the entire level 0 entry is appended to the level 1 entry and level 0 is
// cleared. This process continues until level 1 becomes full at which point it
// will be appended to level 2 and cleared and so on.
//
// The result of this is the lower levels contain newer transactions and the
// transactions within each level are ordered from oldest to newest.
//
// The intent of this approach is to provide a balance between space efficiency
// and indexing cost. Storing one entry per transaction would have the lowest
// indexing cost, but would waste a lot of space because the same address hash
// would be duplicated for every transaction key. On the other hand, storing a
// single entry with all transactions would be the most space efficient, but
// would cause indexing cost to grow quadratically with the number of
// transactions involving the same address. The approach used here provides
// logarithmic insertion and retrieval.
//
// The serialized key format is:
//
// <addr type><addr hash><level>
//
// Field Type Size
// addr type uint8 1 byte
// addr hash hash160 20 bytes
// level uint8 1 byte
// -----
// Total: 22 bytes
//
// The serialized value format is:
//
// [<block id><start offset><tx length>,...]
//
// Field Type Size
// block id uint64 8 bytes
// start offset uint32 4 bytes
// tx length uint32 4 bytes
// -----
// Total: 16 bytes per indexed tx
// -----------------------------------------------------------------------------
// fetchBlockHashFunc defines a callback function to use in order to convert a
// serialized block ID to an associated block hash.
type fetchBlockHashFunc func(serializedID []byte) (*daghash.Hash, error)
// serializeAddrIndexEntry serializes the provided block id and transaction
// location according to the format described in detail above.
func serializeAddrIndexEntry(blockID uint64, txLoc wire.TxLoc) []byte {
// Serialize the entry.
serialized := make([]byte, 16)
byteOrder.PutUint64(serialized, blockID)
byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxStart))
byteOrder.PutUint32(serialized[12:], uint32(txLoc.TxLen))
return serialized
}
// deserializeAddrIndexEntry decodes the passed serialized byte slice into the
// provided region struct according to the format described in detail above and
// uses the passed block hash fetching function in order to conver the block ID
// to the associated block hash.
func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion, fetchBlockHash fetchBlockHashFunc) error {
// Ensure there are enough bytes to decode.
if len(serialized) < txEntrySize {
return errDeserialize("unexpected end of data")
}
hash, err := fetchBlockHash(serialized[0:8])
if err != nil {
return err
}
region.Hash = hash
region.Offset = byteOrder.Uint32(serialized[8:12])
region.Len = byteOrder.Uint32(serialized[12:16])
return nil
}
// keyForLevel returns the key for a specific address and level in the address
// index entry.
func keyForLevel(addrKey [addrKeySize]byte, level uint8) [levelKeySize]byte {
var key [levelKeySize]byte
copy(key[:], addrKey[:])
key[levelOffset] = level
return key
}
// dbPutAddrIndexEntry updates the address index to include the provided entry
// according to the level-based scheme described in detail above.
func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint64, txLoc wire.TxLoc) error {
// Start with level 0 and its initial max number of entries.
curLevel := uint8(0)
maxLevelBytes := level0MaxEntries * txEntrySize
// Simply append the new entry to level 0 and return now when it will
// fit. This is the most common path.
newData := serializeAddrIndexEntry(blockID, txLoc)
level0Key := keyForLevel(addrKey, 0)
level0Data := bucket.Get(level0Key[:])
if len(level0Data)+len(newData) <= maxLevelBytes {
mergedData := newData
if len(level0Data) > 0 {
mergedData = make([]byte, len(level0Data)+len(newData))
copy(mergedData, level0Data)
copy(mergedData[len(level0Data):], newData)
}
return bucket.Put(level0Key[:], mergedData)
}
// At this point, level 0 is full, so merge each level into higher
// levels as many times as needed to free up level 0.
prevLevelData := level0Data
for {
// Each new level holds twice as much as the previous one.
curLevel++
maxLevelBytes *= 2
// Move to the next level as long as the current level is full.
curLevelKey := keyForLevel(addrKey, curLevel)
curLevelData := bucket.Get(curLevelKey[:])
if len(curLevelData) == maxLevelBytes {
prevLevelData = curLevelData
continue
}
// The current level has room for the data in the previous one,
// so merge the data from previous level into it.
mergedData := prevLevelData
if len(curLevelData) > 0 {
mergedData = make([]byte, len(curLevelData)+
len(prevLevelData))
copy(mergedData, curLevelData)
copy(mergedData[len(curLevelData):], prevLevelData)
}
err := bucket.Put(curLevelKey[:], mergedData)
if err != nil {
return err
}
// Move all of the levels before the previous one up a level.
for mergeLevel := curLevel - 1; mergeLevel > 0; mergeLevel-- {
mergeLevelKey := keyForLevel(addrKey, mergeLevel)
prevLevelKey := keyForLevel(addrKey, mergeLevel-1)
prevData := bucket.Get(prevLevelKey[:])
err := bucket.Put(mergeLevelKey[:], prevData)
if err != nil {
return err
}
}
break
}
// Finally, insert the new entry into level 0 now that it is empty.
return bucket.Put(level0Key[:], newData)
}
// dbFetchAddrIndexEntries returns block regions for transactions referenced by
// the given address key and the number of entries skipped since it could have
// been less in the case where there are less total entries than the requested
// number of entries to skip.
func dbFetchAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, numToSkip, numRequested uint32, reverse bool, fetchBlockHash fetchBlockHashFunc) ([]database.BlockRegion, uint32, error) {
// When the reverse flag is not set, all levels need to be fetched
// because numToSkip and numRequested are counted from the oldest
// transactions (highest level) and thus the total count is needed.
// However, when the reverse flag is set, only enough records to satisfy
// the requested amount are needed.
var level uint8
var serialized []byte
for !reverse || len(serialized) < int(numToSkip+numRequested)*txEntrySize {
curLevelKey := keyForLevel(addrKey, level)
levelData := bucket.Get(curLevelKey[:])
if levelData == nil {
// Stop when there are no more levels.
break
}
// Higher levels contain older transactions, so prepend them.
prepended := make([]byte, len(serialized)+len(levelData))
copy(prepended, levelData)
copy(prepended[len(levelData):], serialized)
serialized = prepended
level++
}
// When the requested number of entries to skip is larger than the
// number available, skip them all and return now with the actual number
// skipped.
numEntries := uint32(len(serialized) / txEntrySize)
if numToSkip >= numEntries {
return nil, numEntries, nil
}
// Nothing more to do when there are no requested entries.
if numRequested == 0 {
return nil, numToSkip, nil
}
// Limit the number to load based on the number of available entries,
// the number to skip, and the number requested.
numToLoad := numEntries - numToSkip
if numToLoad > numRequested {
numToLoad = numRequested
}
// Start the offset after all skipped entries and load the calculated
// number.
results := make([]database.BlockRegion, numToLoad)
for i := uint32(0); i < numToLoad; i++ {
// Calculate the read offset according to the reverse flag.
var offset uint32
if reverse {
offset = (numEntries - numToSkip - i - 1) * txEntrySize
} else {
offset = (numToSkip + i) * txEntrySize
}
// Deserialize and populate the result.
err := deserializeAddrIndexEntry(serialized[offset:],
&results[i], fetchBlockHash)
if err != nil {
// Ensure any deserialization errors are returned as
// database corruption errors.
if isDeserializeErr(err) {
err = database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("failed to "+
"deserialized address index "+
"for key %x: %s", addrKey, err),
}
}
return nil, 0, err
}
}
return results, numToSkip, nil
}
// minEntriesToReachLevel returns the minimum number of entries that are
// required to reach the given address index level.
func minEntriesToReachLevel(level uint8) int {
maxEntriesForLevel := level0MaxEntries
minRequired := 1
for l := uint8(1); l <= level; l++ {
minRequired += maxEntriesForLevel
maxEntriesForLevel *= 2
}
return minRequired
}
// maxEntriesForLevel returns the maximum number of entries allowed for the
// given address index level.
func maxEntriesForLevel(level uint8) int {
numEntries := level0MaxEntries
for l := level; l > 0; l-- {
numEntries *= 2
}
return numEntries
}
// dbRemoveAddrIndexEntries removes the specified number of entries from from
// the address index for the provided key. An assertion error will be returned
// if the count exceeds the total number of entries in the index.
func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, count int) error {
// Nothing to do if no entries are being deleted.
if count <= 0 {
return nil
}
// Make use of a local map to track pending updates and define a closure
// to apply it to the database. This is done in order to reduce the
// number of database reads and because there is more than one exit
// path that needs to apply the updates.
pendingUpdates := make(map[uint8][]byte)
applyPending := func() error {
for level, data := range pendingUpdates {
curLevelKey := keyForLevel(addrKey, level)
if len(data) == 0 {
err := bucket.Delete(curLevelKey[:])
if err != nil {
return err
}
continue
}
err := bucket.Put(curLevelKey[:], data)
if err != nil {
return err
}
}
return nil
}
// Loop forwards through the levels while removing entries until the
// specified number has been removed. This will potentially result in
// entirely empty lower levels which will be backfilled below.
var highestLoadedLevel uint8
numRemaining := count
for level := uint8(0); numRemaining > 0; level++ {
// Load the data for the level from the database.
curLevelKey := keyForLevel(addrKey, level)
curLevelData := bucket.Get(curLevelKey[:])
if len(curLevelData) == 0 && numRemaining > 0 {
return AssertError(fmt.Sprintf("dbRemoveAddrIndexEntries "+
"not enough entries for address key %x to "+
"delete %d entries", addrKey, count))
}
pendingUpdates[level] = curLevelData
highestLoadedLevel = level
// Delete the entire level as needed.
numEntries := len(curLevelData) / txEntrySize
if numRemaining >= numEntries {
pendingUpdates[level] = nil
numRemaining -= numEntries
continue
}
// Remove remaining entries to delete from the level.
offsetEnd := len(curLevelData) - (numRemaining * txEntrySize)
pendingUpdates[level] = curLevelData[:offsetEnd]
break
}
// When all elements in level 0 were not removed there is nothing left
// to do other than updating the database.
if len(pendingUpdates[0]) != 0 {
return applyPending()
}
// At this point there are one or more empty levels before the current
// level which need to be backfilled and the current level might have
// had some entries deleted from it as well. Since all levels after
// level 0 are required to either be empty, half full, or completely
// full, the current level must be adjusted accordingly by backfilling
// each previous levels in a way which satisfies the requirements. Any
// entries that are left are assigned to level 0 after the loop as they
// are guaranteed to fit by the logic in the loop. In other words, this
// effectively squashes all remaining entries in the current level into
// the lowest possible levels while following the level rules.
//
// Note that the level after the current level might also have entries
// and gaps are not allowed, so this also keeps track of the lowest
// empty level so the code below knows how far to backfill in case it is
// required.
lowestEmptyLevel := uint8(255)
curLevelData := pendingUpdates[highestLoadedLevel]
curLevelMaxEntries := maxEntriesForLevel(highestLoadedLevel)
for level := highestLoadedLevel; level > 0; level-- {
// When there are not enough entries left in the current level
// for the number that would be required to reach it, clear the
// the current level which effectively moves them all up to the
// previous level on the next iteration. Otherwise, there are
// are sufficient entries, so update the current level to
// contain as many entries as possible while still leaving
// enough remaining entries required to reach the level.
numEntries := len(curLevelData) / txEntrySize
prevLevelMaxEntries := curLevelMaxEntries / 2
minPrevRequired := minEntriesToReachLevel(level - 1)
if numEntries < prevLevelMaxEntries+minPrevRequired {
lowestEmptyLevel = level
pendingUpdates[level] = nil
} else {
// This level can only be completely full or half full,
// so choose the appropriate offset to ensure enough
// entries remain to reach the level.
var offset int
if numEntries-curLevelMaxEntries >= minPrevRequired {
offset = curLevelMaxEntries * txEntrySize
} else {
offset = prevLevelMaxEntries * txEntrySize
}
pendingUpdates[level] = curLevelData[:offset]
curLevelData = curLevelData[offset:]
}
curLevelMaxEntries = prevLevelMaxEntries
}
pendingUpdates[0] = curLevelData
if len(curLevelData) == 0 {
lowestEmptyLevel = 0
}
// When the highest loaded level is empty, it's possible the level after
// it still has data and thus that data needs to be backfilled as well.
for len(pendingUpdates[highestLoadedLevel]) == 0 {
// When the next level is empty too, the is no data left to
// continue backfilling, so there is nothing left to do.
// Otherwise, populate the pending updates map with the newly
// loaded data and update the highest loaded level accordingly.
level := highestLoadedLevel + 1
curLevelKey := keyForLevel(addrKey, level)
levelData := bucket.Get(curLevelKey[:])
if len(levelData) == 0 {
break
}
pendingUpdates[level] = levelData
highestLoadedLevel = level
// At this point the highest level is not empty, but it might
// be half full. When that is the case, move it up a level to
// simplify the code below which backfills all lower levels that
// are still empty. This also means the current level will be
// empty, so the loop will perform another another iteration to
// potentially backfill this level with data from the next one.
curLevelMaxEntries := maxEntriesForLevel(level)
if len(levelData)/txEntrySize != curLevelMaxEntries {
pendingUpdates[level] = nil
pendingUpdates[level-1] = levelData
level--
curLevelMaxEntries /= 2
}
// Backfill all lower levels that are still empty by iteratively
// halfing the data until the lowest empty level is filled.
for level > lowestEmptyLevel {
offset := (curLevelMaxEntries / 2) * txEntrySize
pendingUpdates[level] = levelData[:offset]
levelData = levelData[offset:]
pendingUpdates[level-1] = levelData
level--
curLevelMaxEntries /= 2
}
// The lowest possible empty level is now the highest loaded
// level.
lowestEmptyLevel = highestLoadedLevel
}
// Apply the pending updates.
return applyPending()
}
// addrToKey converts known address types to an addrindex key. An error is
// returned for unsupported types.
func addrToKey(addr util.Address) ([addrKeySize]byte, error) {
switch addr := addr.(type) {
case *util.AddressPubKeyHash:
var result [addrKeySize]byte
result[0] = addrKeyTypePubKeyHash
copy(result[1:], addr.Hash160()[:])
return result, nil
case *util.AddressScriptHash:
var result [addrKeySize]byte
result[0] = addrKeyTypeScriptHash
copy(result[1:], addr.Hash160()[:])
return result, nil
}
return [addrKeySize]byte{}, errUnsupportedAddressType
}
// AddrIndex implements a transaction by address index. That is to say, it
// supports querying all transactions that reference a given address because
// they are either crediting or debiting the address. The returned transactions
// are ordered according to their order of appearance in the blockDAG. In
// other words, first by block height and then by offset inside the block.
//
// In addition, support is provided for a memory-only index of unconfirmed
// transactions such as those which are kept in the memory pool before inclusion
// in a block.
type AddrIndex struct {
// The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a
// separate mutex.
db database.DB
dagParams *dagconfig.Params
// The following fields are used to quickly link transactions and
// addresses that have not been included into a block yet when an
// address index is being maintained. The are protected by the
// unconfirmedLock field.
//
// The txnsByAddr field is used to keep an index of all transactions
// which either create an output to a given address or spend from a
// previous output to it keyed by the address.
//
// The addrsByTx field is essentially the reverse and is used to
// keep an index of all addresses which a given transaction involves.
// This allows fairly efficient updates when transactions are removed
// once they are included into a block.
unconfirmedLock sync.RWMutex
txnsByAddr map[[addrKeySize]byte]map[daghash.TxID]*util.Tx
addrsByTx map[daghash.TxID]map[[addrKeySize]byte]struct{}
}
// Ensure the AddrIndex type implements the Indexer interface.
var _ Indexer = (*AddrIndex)(nil)
// Ensure the AddrIndex type implements the NeedsInputser interface.
var _ NeedsInputser = (*AddrIndex)(nil)
// NeedsInputs signals that the index requires the referenced inputs in order
// to properly create the index.
//
// This implements the NeedsInputser interface.
func (idx *AddrIndex) NeedsInputs() bool {
return true
}
// Init is only provided to satisfy the Indexer interface as there is nothing to
// initialize for this index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
idx.db = db
return nil
}
// Key returns the database key to use for the index as a byte slice.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Key() []byte {
return addrIndexKey
}
// Name returns the human-readable name of the index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Name() string {
return addrIndexName
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the bucket for the address
// index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Create(dbTx database.Tx) error {
_, err := dbTx.Metadata().CreateBucket(addrIndexKey)
return err
}
// writeIndexData represents the address index data to be written for one block.
// It consists of the address mapped to an ordered list of the transactions
// that involve the address in block. It is ordered so the transactions can be
// stored in the order they appear in the block.
type writeIndexData map[[addrKeySize]byte][]int
// indexScriptPubKey extracts all standard addresses from the passed public key
// script and maps each of them to the associated transaction using the passed
// map.
func (idx *AddrIndex) indexScriptPubKey(data writeIndexData, scriptPubKey []byte, txIdx int) {
// Nothing to index if the script is non-standard or otherwise doesn't
// contain any addresses.
_, addr, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
idx.dagParams)
if err != nil || addr == nil {
return
}
addrKey, err := addrToKey(addr)
if err != nil {
// Ignore unsupported address types.
return
}
// Avoid inserting the transaction more than once. Since the
// transactions are indexed serially any duplicates will be
// indexed in a row, so checking the most recent entry for the
// address is enough to detect duplicates.
indexedTxns := data[addrKey]
numTxns := len(indexedTxns)
if numTxns > 0 && indexedTxns[numTxns-1] == txIdx {
return
}
indexedTxns = append(indexedTxns, txIdx)
data[addrKey] = indexedTxns
}
// indexBlock extract all of the standard addresses from all of the transactions
// in the passed block and maps each of them to the associated transaction using
// the passed map.
func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *blockdag.BlockDAG) {
for txIdx, tx := range block.Transactions() {
// Coinbases do not reference any inputs. Since the block is
// required to have already gone through full validation, it has
// already been proven on the first transaction in the block is
// a coinbase.
if txIdx > util.CoinbaseTransactionIndex {
for _, txIn := range tx.MsgTx().TxIn {
// The UTXO should always have the input since
// the index contract requires it, however, be
// safe and simply ignore any missing entries.
entry, ok := dag.GetUTXOEntry(txIn.PreviousOutpoint)
if !ok {
continue
}
idx.indexScriptPubKey(data, entry.ScriptPubKey(), txIdx)
}
}
for _, txOut := range tx.MsgTx().TxOut {
idx.indexScriptPubKey(data, txOut.ScriptPubKey, txIdx)
}
}
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the DAG. This indexer adds a mapping for each address
// the transactions in the block involve.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
_ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
// The offset and length of the transactions within the serialized
// block.
txLocs, err := block.TxLoc()
if err != nil {
return err
}
// Build all of the address to transaction mappings in a local map.
addrsToTxns := make(writeIndexData)
idx.indexBlock(addrsToTxns, block, dag)
// Add all of the index entries for each address.
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
for addrKey, txIdxs := range addrsToTxns {
for _, txIdx := range txIdxs {
err := dbPutAddrIndexEntry(addrIdxBucket, addrKey,
blockID, txLocs[txIdx])
if err != nil {
return err
}
}
}
return nil
}
// TxRegionsForAddress returns a slice of block regions which identify each
// transaction that involves the passed address according to the specified
// number to skip, number requested, and whether or not the results should be
// reversed. It also returns the number actually skipped since it could be less
// in the case where there are not enough entries.
//
// NOTE: These results only include transactions confirmed in blocks. See the
// UnconfirmedTxnsForAddress method for obtaining unconfirmed transactions
// that involve a given address.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr util.Address, numToSkip, numRequested uint32, reverse bool) ([]database.BlockRegion, uint32, error) {
addrKey, err := addrToKey(addr)
if err != nil {
return nil, 0, err
}
var regions []database.BlockRegion
var skipped uint32
err = idx.db.View(func(dbTx database.Tx) error {
// Create closure to lookup the block hash given the ID using
// the database transaction.
fetchBlockHash := func(id []byte) (*daghash.Hash, error) {
// Deserialize and populate the result.
return blockdag.DBFetchBlockHashBySerializedID(dbTx, id)
}
var err error
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
regions, skipped, err = dbFetchAddrIndexEntries(addrIdxBucket,
addrKey, numToSkip, numRequested, reverse,
fetchBlockHash)
return err
})
return regions, skipped, err
}
// indexUnconfirmedAddresses modifies the unconfirmed (memory-only) address
// index to include mappings for the addresses encoded by the passed public key
// script to the transaction.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) indexUnconfirmedAddresses(scriptPubKey []byte, tx *util.Tx) {
// The error is ignored here since the only reason it can fail is if the
// script fails to parse and it was already validated before being
// admitted to the mempool.
_, addr, _ := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
idx.dagParams)
// Ignore unsupported address types.
addrKey, err := addrToKey(addr)
if err != nil {
return
}
// Add a mapping from the address to the transaction.
idx.unconfirmedLock.Lock()
addrIndexEntry := idx.txnsByAddr[addrKey]
if addrIndexEntry == nil {
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
idx.txnsByAddr[addrKey] = addrIndexEntry
}
addrIndexEntry[*tx.ID()] = tx
// Add a mapping from the transaction to the address.
addrsByTxEntry := idx.addrsByTx[*tx.ID()]
if addrsByTxEntry == nil {
addrsByTxEntry = make(map[[addrKeySize]byte]struct{})
idx.addrsByTx[*tx.ID()] = addrsByTxEntry
}
addrsByTxEntry[addrKey] = struct{}{}
idx.unconfirmedLock.Unlock()
}
// AddUnconfirmedTx adds all addresses related to the transaction to the
// unconfirmed (memory-only) address index.
//
// NOTE: This transaction MUST have already been validated by the memory pool
// before calling this function with it and have all of the inputs available in
// the provided utxo view. Failure to do so could result in some or all
// addresses not being indexed.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) AddUnconfirmedTx(tx *util.Tx, utxoSet blockdag.UTXOSet) {
// Index addresses of all referenced previous transaction outputs.
//
// The existence checks are elided since this is only called after the
// transaction has already been validated and thus all inputs are
// already known to exist.
for _, txIn := range tx.MsgTx().TxIn {
entry, ok := utxoSet.Get(txIn.PreviousOutpoint)
if !ok {
// Ignore missing entries. This should never happen
// in practice since the function comments specifically
// call out all inputs must be available.
continue
}
idx.indexUnconfirmedAddresses(entry.ScriptPubKey(), tx)
}
// Index addresses of all created outputs.
for _, txOut := range tx.MsgTx().TxOut {
idx.indexUnconfirmedAddresses(txOut.ScriptPubKey, tx)
}
}
// RemoveUnconfirmedTx removes the passed transaction from the unconfirmed
// (memory-only) address index.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) RemoveUnconfirmedTx(txID *daghash.TxID) {
idx.unconfirmedLock.Lock()
defer idx.unconfirmedLock.Unlock()
// Remove all address references to the transaction from the address
// index and remove the entry for the address altogether if it no longer
// references any transactions.
for addrKey := range idx.addrsByTx[*txID] {
delete(idx.txnsByAddr[addrKey], *txID)
if len(idx.txnsByAddr[addrKey]) == 0 {
delete(idx.txnsByAddr, addrKey)
}
}
// Remove the entry from the transaction to address lookup map as well.
delete(idx.addrsByTx, *txID)
}
// UnconfirmedTxnsForAddress returns all transactions currently in the
// unconfirmed (memory-only) address index that involve the passed address.
// Unsupported address types are ignored and will result in no results.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
// Ignore unsupported address types.
addrKey, err := addrToKey(addr)
if err != nil {
return nil
}
// Protect concurrent access.
idx.unconfirmedLock.RLock()
defer idx.unconfirmedLock.RUnlock()
// Return a new slice with the results if there are any. This ensures
// safe concurrency.
if txns, exists := idx.txnsByAddr[addrKey]; exists {
addressTxns := make([]*util.Tx, 0, len(txns))
for _, tx := range txns {
addressTxns = append(addressTxns, tx)
}
return addressTxns
}
return nil
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("addrindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the addrindex with --dropaddrindex", lastKnownBlockID-currentBlockID)
}
// NewAddrIndex returns a new instance of an indexer that is used to create a
// mapping of all addresses in the blockDAG to the respective transactions
// that involve them.
//
// It implements the Indexer interface which plugs into the IndexManager that in
// turn is used by the blockDAG package. This allows the index to be
// seamlessly maintained along with the DAG.
func NewAddrIndex(dagParams *dagconfig.Params) *AddrIndex {
return &AddrIndex{
dagParams: dagParams,
txnsByAddr: make(map[[addrKeySize]byte]map[daghash.TxID]*util.Tx),
addrsByTx: make(map[daghash.TxID]map[[addrKeySize]byte]struct{}),
}
}
// DropAddrIndex drops the address index from the provided database if it
// exists.
func DropAddrIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, addrIndexKey, addrIndexName, interrupt)
}

View File

@@ -1,277 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"testing"
"github.com/kaspanet/kaspad/wire"
)
// addrIndexBucket provides a mock address index database bucket by implementing
// the internalBucket interface.
type addrIndexBucket struct {
levels map[[levelKeySize]byte][]byte
}
// Clone returns a deep copy of the mock address index bucket.
func (b *addrIndexBucket) Clone() *addrIndexBucket {
levels := make(map[[levelKeySize]byte][]byte)
for k, v := range b.levels {
vCopy := make([]byte, len(v))
copy(vCopy, v)
levels[k] = vCopy
}
return &addrIndexBucket{levels: levels}
}
// Get returns the value associated with the key from the mock address index
// bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Get(key []byte) []byte {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
return b.levels[levelKey]
}
// Put stores the provided key/value pair to the mock address index bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Put(key []byte, value []byte) error {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
b.levels[levelKey] = value
return nil
}
// Delete removes the provided key from the mock address index bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Delete(key []byte) error {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
delete(b.levels, levelKey)
return nil
}
// printLevels returns a string with a visual representation of the provided
// address key taking into account the max size of each level. It is useful
// when creating and debugging test cases.
func (b *addrIndexBucket) printLevels(addrKey [addrKeySize]byte) string {
highestLevel := uint8(0)
for k := range b.levels {
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
continue
}
level := uint8(k[levelOffset])
if level > highestLevel {
highestLevel = level
}
}
var levelBuf bytes.Buffer
_, _ = levelBuf.WriteString("\n")
maxEntries := level0MaxEntries
for level := uint8(0); level <= highestLevel; level++ {
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
for i := 0; i < numEntries; i++ {
start := i * txEntrySize
num := byteOrder.Uint32(data[start:])
_, _ = levelBuf.WriteString(fmt.Sprintf("%02d ", num))
}
for i := numEntries; i < maxEntries; i++ {
_, _ = levelBuf.WriteString("_ ")
}
_, _ = levelBuf.WriteString("\n")
maxEntries *= 2
}
return levelBuf.String()
}
// sanityCheck ensures that all data stored in the bucket for the given address
// adheres to the level-based rules described by the address index
// documentation.
func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal int) error {
// Find the highest level for the key.
highestLevel := uint8(0)
for k := range b.levels {
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
continue
}
level := uint8(k[levelOffset])
if level > highestLevel {
highestLevel = level
}
}
// Ensure the expected total number of entries are present and that
// all levels adhere to the rules described in the address index
// documentation.
var totalEntries int
maxEntries := level0MaxEntries
for level := uint8(0); level <= highestLevel; level++ {
// Level 0 can'have more entries than the max allowed if the
// levels after it have data and it can't be empty. All other
// levels must either be half full or full.
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
totalEntries += numEntries
if level == 0 {
if (highestLevel != 0 && numEntries == 0) ||
numEntries > maxEntries {
return errors.Errorf("level %d has %d entries",
level, numEntries)
}
} else if numEntries != maxEntries && numEntries != maxEntries/2 {
return errors.Errorf("level %d has %d entries", level,
numEntries)
}
maxEntries *= 2
}
if totalEntries != expectedTotal {
return errors.Errorf("expected %d entries - got %d", expectedTotal,
totalEntries)
}
// Ensure all of the numbers are in order starting from the highest
// level moving to the lowest level.
expectedNum := uint32(0)
for level := highestLevel + 1; level > 0; level-- {
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
for i := 0; i < numEntries; i++ {
start := i * txEntrySize
num := byteOrder.Uint32(data[start:])
if num != expectedNum {
return errors.Errorf("level %d offset %d does "+
"not contain the expected number of "+
"%d - got %d", level, i, num,
expectedNum)
}
expectedNum++
}
}
return nil
}
// TestAddrIndexLevels ensures that adding and deleting entries to the address
// index creates multiple levels as described by the address index
// documentation.
func TestAddrIndexLevels(t *testing.T) {
t.Parallel()
tests := []struct {
name string
key [addrKeySize]byte
numInsert int
printLevels bool // Set to help debug a specific test.
}{
{
name: "level 0 not full",
numInsert: level0MaxEntries - 1,
},
{
name: "level 1 half",
numInsert: level0MaxEntries + 1,
},
{
name: "level 1 full",
numInsert: level0MaxEntries*2 + 1,
},
{
name: "level 2 half, level 1 half",
numInsert: level0MaxEntries*3 + 1,
},
{
name: "level 2 half, level 1 full",
numInsert: level0MaxEntries*4 + 1,
},
{
name: "level 2 full, level 1 half",
numInsert: level0MaxEntries*5 + 1,
},
{
name: "level 2 full, level 1 full",
numInsert: level0MaxEntries*6 + 1,
},
{
name: "level 3 half, level 2 half, level 1 half",
numInsert: level0MaxEntries*7 + 1,
},
{
name: "level 3 full, level 2 half, level 1 full",
numInsert: level0MaxEntries*12 + 1,
},
}
nextTest:
for testNum, test := range tests {
// Insert entries in order.
populatedBucket := &addrIndexBucket{
levels: make(map[[levelKeySize]byte][]byte),
}
for i := 0; i < test.numInsert; i++ {
txLoc := wire.TxLoc{TxStart: i * 2}
err := dbPutAddrIndexEntry(populatedBucket, test.key,
uint64(i), txLoc)
if err != nil {
t.Errorf("dbPutAddrIndexEntry #%d (%s) - "+
"unexpected error: %v", testNum,
test.name, err)
continue nextTest
}
}
if test.printLevels {
t.Log(populatedBucket.printLevels(test.key))
}
// Delete entries from the populated bucket until all entries
// have been deleted. The bucket is reset to the fully
// populated bucket on each iteration so every combination is
// tested. Notice the upper limit purposes exceeds the number
// of entries to ensure attempting to delete more entries than
// there are works correctly.
for numDelete := 0; numDelete <= test.numInsert+1; numDelete++ {
// Clone populated bucket to run each delete against.
bucket := populatedBucket.Clone()
// Remove the number of entries for this iteration.
err := dbRemoveAddrIndexEntries(bucket, test.key,
numDelete)
if err != nil {
if numDelete <= test.numInsert {
t.Errorf("dbRemoveAddrIndexEntries (%s) "+
" delete %d - unexpected error: "+
"%v", test.name, numDelete, err)
continue nextTest
}
}
if test.printLevels {
t.Log(bucket.printLevels(test.key))
}
// Sanity check the levels to ensure the adhere to all
// rules.
numExpected := test.numInsert
if numDelete <= test.numInsert {
numExpected -= numDelete
}
err = bucket.sanityCheck(test.key, numExpected)
if err != nil {
t.Errorf("sanity check fail (%s) delete %d: %v",
test.name, numDelete, err)
continue nextTest
}
}
}
}

View File

@@ -85,8 +85,8 @@ func (e errDeserialize) Error() string {
// isDeserializeErr returns whether or not the passed error is an errDeserialize
// error.
func isDeserializeErr(err error) bool {
_, ok := err.(errDeserialize)
return ok
var deserializeErr errDeserialize
return errors.As(err, &deserializeErr)
}
// internalBucket is an abstraction over a database bucket. It is used to make

View File

@@ -6,8 +6,6 @@ package indexers
import (
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.INDX)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -363,6 +363,9 @@ func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan s
}
return bucket.DeleteBucket(bucketName[len(bucketName)-1])
})
if err != nil {
return err
}
}
// Remove the index tip, index bucket, and in-progress drop flag now

View File

@@ -1,438 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"fmt"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
const (
// txIndexName is the human-readable name for the index.
txIndexName = "transaction index"
includingBlocksIndexKeyEntrySize = 8 // 4 bytes for offset + 4 bytes for transaction length
)
var (
includingBlocksIndexKey = []byte("includingblocksidx")
acceptingBlocksIndexKey = []byte("acceptingblocksidx")
)
// txsAcceptedByVirtual is the in-memory index of txIDs that were accepted
// by the current virtual
var txsAcceptedByVirtual map[daghash.TxID]bool
// -----------------------------------------------------------------------------
// The transaction index consists of an entry for every transaction in the DAG.
//
// There are two buckets used in total. The first bucket maps the hash of
// each transaction to its location in each block it's included in. The second bucket
// contains all of the blocks that from their viewpoint the transaction has been
// accepted (i.e. the transaction is found in their blue set without double spends),
// and their blue block (or themselves) that included the transaction.
//
// NOTE: Although it is technically possible for multiple transactions to have
// the same hash as long as the previous transaction with the same hash is fully
// spent, this code only stores the most recent one because doing otherwise
// would add a non-trivial amount of space and overhead for something that will
// realistically never happen per the probability and even if it did, the old
// one must be fully spent and so the most likely transaction a caller would
// want for a given hash is the most recent one anyways.
//
// The including blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
//
// <block id> = <start offset><tx length>
//
// Field Type Size
// block id uint64 8 bytes
// start offset uint32 4 bytes
// tx length uint32 4 bytes
// -----
// Total: 16 bytes
//
// The accepting blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
//
// <accepting block id> = <including block id>
//
// Field Type Size
// accepting block id uint64 8 bytes
// including block id uint64 8 bytes
// -----
// Total: 16 bytes
//
// -----------------------------------------------------------------------------
func putIncludingBlocksEntry(target []byte, txLoc wire.TxLoc) {
byteOrder.PutUint32(target, uint32(txLoc.TxStart))
byteOrder.PutUint32(target[4:], uint32(txLoc.TxLen))
}
func dbPutIncludingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
bucket, err := dbTx.Metadata().Bucket(includingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
if err != nil {
return err
}
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
}
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
bucket, err := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
if err != nil {
return err
}
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
}
// dbFetchFirstTxRegion uses an existing database transaction to fetch the block
// region for the provided transaction hash from the transaction index. When
// there is no entry for the provided hash, nil will be returned for the both
// the region and the error.
//
// P.S Because the transaction can be found in multiple blocks, this function arbitarily
// returns the first block region that is stored in the txindex.
func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.BlockRegion, error) {
// Load the record from the database and return now if it doesn't exist.
txBucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txID[:])
if txBucket == nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No block region "+
"was found for %s", txID),
}
}
cursor := txBucket.Cursor()
if ok := cursor.First(); !ok {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No block region "+
"was found for %s", txID),
}
}
serializedBlockID := cursor.Key()
serializedData := cursor.Value()
if len(serializedData) == 0 {
return nil, nil
}
// Ensure the serialized data has enough bytes to properly deserialize.
if len(serializedData) < includingBlocksIndexKeyEntrySize {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt transaction index "+
"entry for %s", txID),
}
}
// Load the block hash associated with the block ID.
hash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
if err != nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt transaction index "+
"entry for %s: %s", txID, err),
}
}
// Deserialize the final entry.
region := database.BlockRegion{Hash: &daghash.Hash{}}
copy(region.Hash[:], hash[:])
region.Offset = byteOrder.Uint32(serializedData[:4])
region.Len = byteOrder.Uint32(serializedData[4:])
return &region, nil
}
// dbAddTxIndexEntries uses an existing database transaction to add a
// transaction index entry for every transaction in the passed block.
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint64, multiBlockTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// The offset and length of the transactions within the serialized
// block.
txLocs, err := block.TxLoc()
if err != nil {
return err
}
// As an optimization, allocate a single slice big enough to hold all
// of the serialized transaction index entries for the block and
// serialize them directly into the slice. Then, pass the appropriate
// subslice to the database to be written. This approach significantly
// cuts down on the number of required allocations.
includingBlocksOffset := 0
serializedIncludingBlocksValues := make([]byte, len(block.Transactions())*includingBlocksIndexKeyEntrySize)
for i, tx := range block.Transactions() {
putIncludingBlocksEntry(serializedIncludingBlocksValues[includingBlocksOffset:], txLocs[i])
endOffset := includingBlocksOffset + includingBlocksIndexKeyEntrySize
err := dbPutIncludingBlocksEntry(dbTx, tx.ID(), blockID,
serializedIncludingBlocksValues[includingBlocksOffset:endOffset:endOffset])
if err != nil {
return err
}
includingBlocksOffset += includingBlocksIndexKeyEntrySize
}
for _, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
var includingBlockID uint64
if blockTxsAcceptanceData.BlockHash.IsEqual(block.Hash()) {
includingBlockID = blockID
} else {
includingBlockID, err = blockdag.DBFetchBlockIDByHash(dbTx, &blockTxsAcceptanceData.BlockHash)
if err != nil {
return err
}
}
serializedIncludingBlockID := blockdag.SerializeBlockID(includingBlockID)
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
if !txAcceptanceData.IsAccepted {
continue
}
err = dbPutAcceptingBlocksEntry(dbTx, txAcceptanceData.Tx.ID(), blockID, serializedIncludingBlockID)
if err != nil {
return err
}
}
}
return nil
}
func updateTxsAcceptedByVirtual(virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// Initialize a new txsAcceptedByVirtual
entries := 0
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
entries += len(blockTxsAcceptanceData.TxAcceptanceData)
}
txsAcceptedByVirtual = make(map[daghash.TxID]bool, entries)
// Copy virtualTxsAcceptanceData to txsAcceptedByVirtual
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
txsAcceptedByVirtual[*txAcceptanceData.Tx.ID()] = true
}
}
return nil
}
// TxIndex implements a transaction by hash index. That is to say, it supports
// querying all transactions by their hash.
type TxIndex struct {
db database.DB
}
// Ensure the TxIndex type implements the Indexer interface.
var _ Indexer = (*TxIndex)(nil)
// Init initializes the hash-based transaction index. In particular, it finds
// the highest used block ID and stores it for later use when connecting or
// disconnecting blocks.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
idx.db = db
// Initialize the txsAcceptedByVirtual index
virtualTxsAcceptanceData, err := dag.TxsAcceptedByVirtual()
if err != nil {
return err
}
err = updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
if err != nil {
return err
}
return nil
}
// Key returns the database key to use for the index as a byte slice.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Key() []byte {
return includingBlocksIndexKey
}
// Name returns the human-readable name of the index.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Name() string {
return txIndexName
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the buckets for the hash-based
// transaction index and the internal block ID indexes.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Create(dbTx database.Tx) error {
meta := dbTx.Metadata()
if _, err := meta.CreateBucket(includingBlocksIndexKey); err != nil {
return err
}
_, err := meta.CreateBucket(acceptingBlocksIndexKey)
return err
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the DAG. This indexer adds a hash-to-transaction mapping
// for every transaction in the passed block.
//
// This is part of the Indexer interface.
func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
if err := dbAddTxIndexEntries(dbTx, block, blockID, acceptedTxsData); err != nil {
return err
}
err := updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
if err != nil {
return err
}
return nil
}
// TxFirstBlockRegion returns the first block region for the provided transaction hash
// from the transaction index. The block region can in turn be used to load the
// raw transaction bytes. When there is no entry for the provided hash, nil
// will be returned for the both the entry and the error.
//
// This function is safe for concurrent access.
func (idx *TxIndex) TxFirstBlockRegion(txID *daghash.TxID) (*database.BlockRegion, error) {
var region *database.BlockRegion
err := idx.db.View(func(dbTx database.Tx) error {
var err error
region, err = dbFetchFirstTxRegion(dbTx, txID)
return err
})
return region, err
}
// TxBlocks returns the hashes of the blocks where the transaction exists
func (idx *TxIndex) TxBlocks(txHash *daghash.Hash) ([]*daghash.Hash, error) {
blockHashes := make([]*daghash.Hash, 0)
err := idx.db.View(func(dbTx database.Tx) error {
var err error
blockHashes, err = dbFetchTxBlocks(dbTx, txHash)
if err != nil {
return err
}
return nil
})
return blockHashes, err
}
func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]*daghash.Hash, error) {
blockHashes := make([]*daghash.Hash, 0)
bucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txHash[:])
if bucket == nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No including blocks "+
"were found for %s", txHash),
}
}
err := bucket.ForEach(func(serializedBlockID, _ []byte) error {
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
if err != nil {
return err
}
blockHashes = append(blockHashes, blockHash)
return nil
})
if err != nil {
return nil, err
}
return blockHashes, nil
}
// BlockThatAcceptedTx returns the hash of the block where the transaction got accepted (from the virtual block point of view)
func (idx *TxIndex) BlockThatAcceptedTx(dag *blockdag.BlockDAG, txID *daghash.TxID) (*daghash.Hash, error) {
var acceptingBlock *daghash.Hash
err := idx.db.View(func(dbTx database.Tx) error {
var err error
acceptingBlock, err = dbFetchTxAcceptingBlock(dbTx, txID, dag)
return err
})
return acceptingBlock, err
}
func dbFetchTxAcceptingBlock(dbTx database.Tx, txID *daghash.TxID, dag *blockdag.BlockDAG) (*daghash.Hash, error) {
// If the transaction was accepted by the current virtual,
// return the zeroHash immediately
if _, ok := txsAcceptedByVirtual[*txID]; ok {
return &daghash.ZeroHash, nil
}
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txID[:])
if bucket == nil {
return nil, nil
}
cursor := bucket.Cursor()
if !cursor.First() {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("Accepting blocks bucket is "+
"empty for %s", txID),
}
}
for ; cursor.Key() != nil; cursor.Next() {
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, cursor.Key())
if err != nil {
return nil, err
}
isBlockInSelectedParentChain, err := dag.IsInSelectedParentChain(blockHash)
if err != nil {
return nil, err
}
if isBlockInSelectedParentChain {
return blockHash, nil
}
}
return nil, nil
}
// NewTxIndex returns a new instance of an indexer that is used to create a
// mapping of the hashes of all transactions in the blockDAG to the respective
// block, location within the block, and size of the transaction.
//
// It implements the Indexer interface which plugs into the IndexManager that in
// turn is used by the blockdag package. This allows the index to be
// seamlessly maintained along with the DAG.
func NewTxIndex() *TxIndex {
return &TxIndex{}
}
// DropTxIndex drops the transaction index from the provided database if it
// exists. Since the address index relies on it, the address index will also be
// dropped when it exists.
func DropTxIndex(db database.DB, interrupt <-chan struct{}) error {
err := dropIndex(db, addrIndexKey, addrIndexName, interrupt)
if err != nil {
return err
}
err = dropIndex(db, includingBlocksIndexKey, addrIndexName, interrupt)
if err != nil {
return err
}
return dropIndex(db, acceptingBlocksIndexKey, txIndexName, interrupt)
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("txindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the txindex with --droptxindex", lastKnownBlockID-currentBlockID)
}

View File

@@ -1,144 +0,0 @@
package indexers
import (
"bytes"
"reflect"
"testing"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
func createTransaction(t *testing.T, value uint64, originTx *wire.MsgTx, outputIndex uint32) *wire.MsgTx {
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
if err != nil {
t.Fatalf("Error creating signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutpoint: wire.Outpoint{
TxID: *originTx.TxID(),
Index: outputIndex,
},
Sequence: wire.MaxTxInSequenceNum,
SignatureScript: signatureScript,
}
txOut := wire.NewTxOut(value, blockdag.OpTrueScript)
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
return tx
}
func TestTxIndexConnectBlock(t *testing.T) {
blocks := make(map[daghash.Hash]*util.Block)
txIndex := NewTxIndex()
indexManager := NewManager([]Indexer{txIndex})
params := dagconfig.SimnetParams
params.BlockCoinbaseMaturity = 0
params.K = 1
config := blockdag.Config{
IndexManager: indexManager,
DAGParams: &params,
}
dag, teardown, err := blockdag.DAGSetup("TestTxIndexConnectBlock", config)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Failed to setup DAG instance: %v", err)
}
if teardown != nil {
defer teardown()
}
prepareAndProcessBlock := func(parentHashes []*daghash.Hash, transactions []*wire.MsgTx, blockName string) *wire.MsgBlock {
block, err := mining.PrepareBlockForTest(dag, &params, parentHashes, transactions, false)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: block %v got unexpected error from PrepareBlockForTest: %v", blockName, err)
}
utilBlock := util.NewBlock(block)
blocks[*block.BlockHash()] = utilBlock
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, blockdag.BFNoPoWCheck)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: dag.ProcessBlock got unexpected error for block %v: %v", blockName, err)
}
if isDelayed {
t.Fatalf("TestTxIndexConnectBlock: block %s "+
"is too far in the future", blockName)
}
if isOrphan {
t.Fatalf("TestTxIndexConnectBlock: block %v was unexpectedly orphan", blockName)
}
return block
}
block1 := prepareAndProcessBlock([]*daghash.Hash{params.GenesisHash}, nil, "1")
block2Tx := createTransaction(t, block1.Transactions[0].TxOut[0].Value, block1.Transactions[0], 0)
block2 := prepareAndProcessBlock([]*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{block2Tx}, "2")
block3Tx := createTransaction(t, block2.Transactions[0].TxOut[0].Value, block2.Transactions[0], 0)
block3 := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3")
block2TxID := block2Tx.TxID()
block2TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
block3Hash := block3.BlockHash()
if !block2TxNewAcceptedBlock.IsEqual(block3Hash) {
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3Hash, block2TxNewAcceptedBlock)
}
block3TxID := block3Tx.TxID()
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block3TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
if !block3TxNewAcceptedBlock.IsEqual(&daghash.ZeroHash) {
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
"been accepted by the virtual block but instead got accepted in block %v", block3TxNewAcceptedBlock)
}
block3A := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3A")
block4 := prepareAndProcessBlock([]*daghash.Hash{block3.BlockHash()}, nil, "4")
prepareAndProcessBlock([]*daghash.Hash{block3A.BlockHash(), block4.BlockHash()}, nil, "5")
block2TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
if !block2TxAcceptedBlock.IsEqual(block3Hash) {
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3Hash, block2TxAcceptedBlock)
}
region, err := txIndex.TxFirstBlockRegion(block3TxID)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: no block region was found for block3Tx")
}
regionBlock, ok := blocks[*region.Hash]
if !ok {
t.Fatalf("TestTxIndexConnectBlock: couldn't find block with hash %v", region.Hash)
}
regionBlockBytes, err := regionBlock.Bytes()
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Couldn't serialize block to bytes")
}
block3TxInBlock := regionBlockBytes[region.Offset : region.Offset+region.Len]
block3TxBuf := bytes.NewBuffer(make([]byte, 0, block3Tx.SerializeSize()))
block3Tx.KaspaEncode(block3TxBuf, 0)
blockTxBytes := block3TxBuf.Bytes()
if !reflect.DeepEqual(blockTxBytes, block3TxInBlock) {
t.Errorf("TestTxIndexConnectBlock: the block region that was in the bucket doesn't match block3Tx")
}
}

View File

@@ -1,206 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"math"
"sort"
"sync"
"time"
)
const (
// maxAllowedOffsetSeconds is the maximum number of seconds in either
// direction that local clock will be adjusted. When the median time
// of the network is outside of this range, no offset will be applied.
maxAllowedOffsetSecs = 70 * 60 // 1 hour 10 minutes
// similarTimeSecs is the number of seconds in either direction from the
// local clock that is used to determine that it is likley wrong and
// hence to show a warning.
similarTimeSecs = 5 * 60 // 5 minutes
)
var (
// maxMedianTimeEntries is the maximum number of entries allowed in the
// median time data. This is a variable as opposed to a constant so the
// test code can modify it.
maxMedianTimeEntries = 200
)
// MedianTimeSource provides a mechanism to add several time samples which are
// used to determine a median time which is then used as an offset to the local
// clock.
type MedianTimeSource interface {
// AdjustedTime returns the current time adjusted by the median time
// offset as calculated from the time samples added by AddTimeSample.
AdjustedTime() time.Time
// AddTimeSample adds a time sample that is used when determining the
// median time of the added samples.
AddTimeSample(id string, timeVal time.Time)
// Offset returns the number of seconds to adjust the local clock based
// upon the median of the time samples added by AddTimeData.
Offset() time.Duration
}
// int64Sorter implements sort.Interface to allow a slice of 64-bit integers to
// be sorted.
type int64Sorter []int64
// Len returns the number of 64-bit integers in the slice. It is part of the
// sort.Interface implementation.
func (s int64Sorter) Len() int {
return len(s)
}
// Swap swaps the 64-bit integers at the passed indices. It is part of the
// sort.Interface implementation.
func (s int64Sorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less returns whether the 64-bit integer with index i should sort before the
// 64-bit integer with index j. It is part of the sort.Interface
// implementation.
func (s int64Sorter) Less(i, j int) bool {
return s[i] < s[j]
}
// medianTime provides an implementation of the MedianTimeSource interface.
type medianTime struct {
mtx sync.Mutex
knownIDs map[string]struct{}
offsets []int64
offsetSecs int64
invalidTimeChecked bool
}
// Ensure the medianTime type implements the MedianTimeSource interface.
var _ MedianTimeSource = (*medianTime)(nil)
// AdjustedTime returns the current time adjusted by the median time offset as
// calculated from the time samples added by AddTimeSample.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) AdjustedTime() time.Time {
m.mtx.Lock()
defer m.mtx.Unlock()
// Limit the adjusted time to 1 second precision.
now := time.Unix(time.Now().Unix(), 0)
return now.Add(time.Duration(m.offsetSecs) * time.Second)
}
// AddTimeSample adds a time sample that is used when determining the median
// time of the added samples.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) AddTimeSample(sourceID string, timeVal time.Time) {
m.mtx.Lock()
defer m.mtx.Unlock()
// Don't add time data from the same source.
if _, exists := m.knownIDs[sourceID]; exists {
return
}
m.knownIDs[sourceID] = struct{}{}
// Truncate the provided offset to seconds and append it to the slice
// of offsets while respecting the maximum number of allowed entries by
// replacing the oldest entry with the new entry once the maximum number
// of entries is reached.
now := time.Unix(time.Now().Unix(), 0)
offsetSecs := int64(timeVal.Sub(now).Seconds())
numOffsets := len(m.offsets)
if numOffsets == maxMedianTimeEntries && maxMedianTimeEntries > 0 {
m.offsets = m.offsets[1:]
numOffsets--
}
m.offsets = append(m.offsets, offsetSecs)
numOffsets++
// Sort the offsets so the median can be obtained as needed later.
sortedOffsets := make([]int64, numOffsets)
copy(sortedOffsets, m.offsets)
sort.Sort(int64Sorter(sortedOffsets))
offsetDuration := time.Duration(offsetSecs) * time.Second
log.Debugf("Added time sample of %s (total: %d)", offsetDuration,
numOffsets)
// The median offset is only updated when there are enough offsets and
// the number of offsets is odd so the middle value is the true median.
// Thus, there is nothing to do when those conditions are not met.
if numOffsets < 5 || numOffsets&0x01 != 1 {
return
}
// At this point the number of offsets in the list is odd, so the
// middle value of the sorted offsets is the median.
median := sortedOffsets[numOffsets/2]
// Set the new offset when the median offset is within the allowed
// offset range.
if math.Abs(float64(median)) < maxAllowedOffsetSecs {
m.offsetSecs = median
} else {
// The median offset of all added time data is larger than the
// maximum allowed offset, so don't use an offset. This
// effectively limits how far the local clock can be skewed.
m.offsetSecs = 0
if !m.invalidTimeChecked {
m.invalidTimeChecked = true
// Find if any time samples have a time that is close
// to the local time.
var remoteHasCloseTime bool
for _, offset := range sortedOffsets {
if math.Abs(float64(offset)) < similarTimeSecs {
remoteHasCloseTime = true
break
}
}
// Warn if none of the time samples are close.
if !remoteHasCloseTime {
log.Warnf("Please check your date and time " +
"are correct! kaspad will not work " +
"properly with an invalid time")
}
}
}
medianDuration := time.Duration(m.offsetSecs) * time.Second
log.Debugf("New time offset: %d", medianDuration)
}
// Offset returns the number of seconds to adjust the local clock based upon the
// median of the time samples added by AddTimeData.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) Offset() time.Duration {
m.mtx.Lock()
defer m.mtx.Unlock()
return time.Duration(m.offsetSecs) * time.Second
}
// NewMedianTime returns a new instance of concurrency-safe implementation of
// the MedianTimeSource interface. The returned implementation contains the
// rules necessary for proper time handling in the DAG consensus rules and
// expects the time samples to be added from the timestamp field of the version
// message received from remote peers that successfully connect and negotiate.
func NewMedianTime() MedianTimeSource {
return &medianTime{
knownIDs: make(map[string]struct{}),
offsets: make([]int64, 0, maxMedianTimeEntries),
}
}

View File

@@ -1,102 +0,0 @@
// Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"strconv"
"testing"
"time"
)
// TestMedianTime tests the medianTime implementation.
func TestMedianTime(t *testing.T) {
tests := []struct {
in []int64
wantOffset int64
useDupID bool
}{
// Not enough samples must result in an offset of 0.
{in: []int64{1}, wantOffset: 0},
{in: []int64{1, 2}, wantOffset: 0},
{in: []int64{1, 2, 3}, wantOffset: 0},
{in: []int64{1, 2, 3, 4}, wantOffset: 0},
// Various number of entries. The expected offset is only
// updated on odd number of elements.
{in: []int64{-13, 57, -4, -23, -12}, wantOffset: -12},
{in: []int64{55, -13, 61, -52, 39, 55}, wantOffset: 39},
{in: []int64{-62, -58, -30, -62, 51, -30, 15}, wantOffset: -30},
{in: []int64{29, -47, 39, 54, 42, 41, 8, -33}, wantOffset: 39},
{in: []int64{37, 54, 9, -21, -56, -36, 5, -11, -39}, wantOffset: -11},
{in: []int64{57, -28, 25, -39, 9, 63, -16, 19, -60, 25}, wantOffset: 9},
{in: []int64{-5, -4, -3, -2, -1}, wantOffset: -3, useDupID: true},
// The offset stops being updated once the max number of entries
// has been reached.
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52}, wantOffset: 17},
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45}, wantOffset: 17},
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45, 4}, wantOffset: 17},
// Offsets that are too far away from the local time should
// be ignored.
{in: []int64{-4201, 4202, -4203, 4204, -4205}, wantOffset: 0},
// Exercise the condition where the median offset is greater
// than the max allowed adjustment, but there is at least one
// sample that is close enough to the current time to avoid
// triggering a warning about an invalid local clock.
{in: []int64{4201, 4202, 4203, 4204, -299}, wantOffset: 0},
}
// Modify the max number of allowed median time entries for these tests.
maxMedianTimeEntries = 10
defer func() { maxMedianTimeEntries = 200 }()
for i, test := range tests {
filter := NewMedianTime()
for j, offset := range test.in {
id := strconv.Itoa(j)
now := time.Unix(time.Now().Unix(), 0)
tOffset := now.Add(time.Duration(offset) * time.Second)
filter.AddTimeSample(id, tOffset)
// Ensure the duplicate IDs are ignored.
if test.useDupID {
// Modify the offsets to ensure the final median
// would be different if the duplicate is added.
tOffset = tOffset.Add(time.Duration(offset) *
time.Second)
filter.AddTimeSample(id, tOffset)
}
}
// Since it is possible that the time.Now call in AddTimeSample
// and the time.Now calls here in the tests will be off by one
// second, allow a fudge factor to compensate.
gotOffset := filter.Offset()
wantOffset := time.Duration(test.wantOffset) * time.Second
wantOffset2 := time.Duration(test.wantOffset-1) * time.Second
if gotOffset != wantOffset && gotOffset != wantOffset2 {
t.Errorf("Offset #%d: unexpected offset -- got %v, "+
"want %v or %v", i, gotOffset, wantOffset,
wantOffset2)
continue
}
// Since it is possible that the time.Now call in AdjustedTime
// and the time.Now call here in the tests will be off by one
// second, allow a fudge factor to compensate.
adjustedTime := filter.AdjustedTime()
now := time.Unix(time.Now().Unix(), 0)
wantTime := now.Add(filter.Offset())
wantTime2 := now.Add(filter.Offset() - time.Second)
if !adjustedTime.Equal(wantTime) && !adjustedTime.Equal(wantTime2) {
t.Errorf("AdjustedTime #%d: unexpected result -- got %v, "+
"want %v or %v", i, adjustedTime, wantTime,
wantTime2)
continue
}
}
}

View File

@@ -3,17 +3,19 @@ package blockdag
import (
"bytes"
"encoding/binary"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"sort"
"time"
)
// BlockForMining returns a block with the given transactions
// that points to the current DAG tips, that is valid from
// all aspects except proof of work.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, error) {
blockTimestamp := dag.NextBlockTime()
requiredDifficulty := dag.NextRequiredDifficulty(blockTimestamp)
@@ -25,17 +27,6 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
return nil, err
}
// Sort transactions by subnetwork ID before building Merkle tree
sort.Slice(transactions, func(i, j int) bool {
if transactions[i].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
return true
}
if transactions[j].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
return false
}
return subnetworkid.Less(&transactions[i].MsgTx().SubnetworkID, &transactions[j].MsgTx().SubnetworkID)
})
// Create a new block ready to be solved.
hashMerkleTree := BuildHashMerkleTreeStore(transactions)
acceptedIDMerkleRoot, err := dag.NextAcceptedIDMerkleRootNoLock()
@@ -47,18 +38,17 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
msgBlock.AddTransaction(tx.MsgTx())
}
utxoWithTransactions, err := dag.UTXOSet().WithTransactions(msgBlock.Transactions, UnacceptedBlueScore, false)
multiset, err := dag.NextBlockMultiset(transactions)
if err != nil {
return nil, err
}
utxoCommitment := utxoWithTransactions.Multiset().Hash()
msgBlock.Header = wire.BlockHeader{
Version: nextBlockVersion,
ParentHashes: dag.TipHashes(),
HashMerkleRoot: hashMerkleTree.Root(),
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
UTXOCommitment: utxoCommitment,
UTXOCommitment: (*daghash.Hash)(multiset.Finalize()),
Timestamp: blockTimestamp,
Bits: requiredDifficulty,
}
@@ -66,6 +56,19 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
return &msgBlock, nil
}
// NextBlockMultiset returns the multiset of an assumed next block
// built on top of the current tips, with the given transactions.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) NextBlockMultiset(transactions []*util.Tx) (*secp256k1.MultiSet, error) {
pastUTXO, selectedParentUTXO, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
if err != nil {
return nil, err
}
return dag.virtual.blockNode.calcMultiset(dag, transactions, txsAcceptanceData, selectedParentUTXO, pastUTXO)
}
// CoinbasePayloadExtraData returns coinbase payload extra data parameter
// which is built from extra nonce and coinbase flags.
func CoinbasePayloadExtraData(extraNonce uint64, coinbaseFlags string) ([]byte, error) {
@@ -114,7 +117,7 @@ func (dag *BlockDAG) NextBlockTime() time.Time {
// timestamp is truncated to a second boundary before comparison since a
// block timestamp does not supported a precision greater than one
// second.
newTimestamp := dag.AdjustedTime()
newTimestamp := dag.Now()
minTimestamp := dag.NextBlockMinimumTime()
if newTimestamp.Before(minTimestamp) {
newTimestamp = minTimestamp

29
blockdag/multisetio.go Normal file
View File

@@ -0,0 +1,29 @@
package blockdag
import (
"encoding/binary"
"github.com/kaspanet/go-secp256k1"
"io"
)
const multisetPointSize = 32
// serializeMultiset serializes an ECMH multiset.
func serializeMultiset(w io.Writer, ms *secp256k1.MultiSet) error {
serialized := ms.Serialize()
err := binary.Write(w, byteOrder, serialized)
if err != nil {
return err
}
return nil
}
// deserializeMultiset deserializes an EMCH multiset.
func deserializeMultiset(r io.Reader) (*secp256k1.MultiSet, error) {
serialized := &secp256k1.SerializedMultiSet{}
err := binary.Read(r, byteOrder, serialized[:])
if err != nil {
return nil, err
}
return secp256k1.DeserializeMultiSet(serialized)
}

112
blockdag/multisetstore.go Normal file
View File

@@ -0,0 +1,112 @@
package blockdag
import (
"bytes"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/locks"
"github.com/pkg/errors"
)
type multisetStore struct {
dag *BlockDAG
new map[daghash.Hash]struct{}
loaded map[daghash.Hash]secp256k1.MultiSet
mtx *locks.PriorityMutex
}
func newMultisetStore(dag *BlockDAG) *multisetStore {
return &multisetStore{
dag: dag,
new: make(map[daghash.Hash]struct{}),
loaded: make(map[daghash.Hash]secp256k1.MultiSet),
}
}
func (store *multisetStore) setMultiset(node *blockNode, ms *secp256k1.MultiSet) {
store.loaded[*node.hash] = *ms
store.addToNewBlocks(node.hash)
}
func (store *multisetStore) addToNewBlocks(blockHash *daghash.Hash) {
store.new[*blockHash] = struct{}{}
}
func multisetNotFoundError(blockHash *daghash.Hash) error {
return errors.Errorf("Couldn't find multiset data for block %s", blockHash)
}
func (store *multisetStore) multisetByBlockNode(node *blockNode) (*secp256k1.MultiSet, error) {
ms, exists := store.multisetByBlockHash(node.hash)
if !exists {
return nil, multisetNotFoundError(node.hash)
}
return ms, nil
}
func (store *multisetStore) multisetByBlockHash(hash *daghash.Hash) (*secp256k1.MultiSet, bool) {
ms, ok := store.loaded[*hash]
return &ms, ok
}
// flushToDB writes all new multiset data to the database.
func (store *multisetStore) flushToDB(dbTx database.Tx) error {
if len(store.new) == 0 {
return nil
}
w := &bytes.Buffer{}
for hash := range store.new {
hash := hash // Copy hash to a new variable to avoid passing the same pointer
w.Reset()
ms, exists := store.loaded[hash]
if !exists {
return multisetNotFoundError(&hash)
}
err := serializeMultiset(w, &ms)
if err != nil {
return err
}
err = store.dbStoreMultiset(dbTx, &hash, w.Bytes())
if err != nil {
return err
}
}
return nil
}
func (store *multisetStore) clearNewEntries() {
store.new = make(map[daghash.Hash]struct{})
}
func (store *multisetStore) init(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(multisetBucketName)
cursor := bucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
hash, err := daghash.NewHash(cursor.Key())
if err != nil {
return err
}
ms, err := deserializeMultiset(bytes.NewReader(cursor.Value()))
if err != nil {
return err
}
store.loaded[*hash] = *ms
}
return nil
}
// dbStoreMultiset stores the multiset data to the database.
func (store *multisetStore) dbStoreMultiset(dbTx database.Tx, blockHash *daghash.Hash, serializedMS []byte) error {
bucket := dbTx.Metadata().Bucket(multisetBucketName)
if bucket.Get(blockHash[:]) != nil {
return errors.Errorf("Can't override an existing multiset database entry for block %s", blockHash)
}
return bucket.Put(blockHash[:], serializedMS)
}

View File

@@ -57,8 +57,8 @@ type Notification struct {
// NotificationType for details on the types and contents of notifications.
func (dag *BlockDAG) Subscribe(callback NotificationCallback) {
dag.notificationsLock.Lock()
defer dag.notificationsLock.Unlock()
dag.notifications = append(dag.notifications, callback)
dag.notificationsLock.Unlock()
}
// sendNotification sends a notification with the passed type and data if the
@@ -68,10 +68,10 @@ func (dag *BlockDAG) sendNotification(typ NotificationType, data interface{}) {
// Generate and send the notification.
n := Notification{Type: typ, Data: data}
dag.notificationsLock.RLock()
defer dag.notificationsLock.RUnlock()
for _, callback := range dag.notifications {
callback(&n)
}
dag.notificationsLock.RUnlock()
}
// BlockAddedNotificationData defines data to be sent along with a BlockAdded

View File

@@ -7,6 +7,7 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/pkg/errors"
"time"
"github.com/kaspanet/kaspad/util"
@@ -44,15 +45,19 @@ const (
// in the block index but was never fully processed
BFWasStored
// BFDisallowDelay is set to indicate that a delayed block should be rejected.
// This is used for the case where a block is submitted through RPC.
BFDisallowDelay
// BFNone is a convenience value to specifically indicate no flags.
BFNone BehaviorFlags = 0
)
// BlockExists determines whether a block with the given hash exists in
// IsInDAG determines whether a block with the given hash exists in
// the DAG.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) BlockExists(hash *daghash.Hash) bool {
func (dag *BlockDAG) IsInDAG(hash *daghash.Hash) bool {
return dag.index.HaveBlock(hash)
}
@@ -95,7 +100,8 @@ func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) err
// still missing.
_, err := lookupParentNodes(orphan.block, dag)
if err != nil {
if ruleErr, ok := err.(RuleError); ok && ruleErr.ErrorCode == ErrParentBlockUnknown {
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); ok && ruleErr.ErrorCode == ErrParentBlockUnknown {
continue
}
return err
@@ -111,7 +117,7 @@ func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) err
if err != nil {
// Since we don't want to reject the original block because of
// a bad unorphaned child, only return an error if it's not a RuleError.
if _, ok := err.(RuleError); !ok {
if !errors.As(err, &RuleError{}) {
return err
}
log.Warnf("Verification failed for orphan block %s: %s", orphanHash, err)
@@ -144,12 +150,13 @@ func (dag *BlockDAG) ProcessBlock(block *util.Block, flags BehaviorFlags) (isOrp
func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags) (isOrphan bool, isDelayed bool, err error) {
isAfterDelay := flags&BFAfterDelay == BFAfterDelay
wasBlockStored := flags&BFWasStored == BFWasStored
disallowDelay := flags&BFDisallowDelay == BFDisallowDelay
blockHash := block.Hash()
log.Tracef("Processing block %s", blockHash)
// The block must not already exist in the DAG.
if dag.BlockExists(blockHash) && !wasBlockStored {
if dag.IsInDAG(blockHash) && !wasBlockStored {
str := fmt.Sprintf("already have block %s", blockHash)
return false, false, ruleError(ErrDuplicateBlock, str)
}
@@ -172,6 +179,11 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
return false, false, err
}
if delay != 0 && disallowDelay {
str := fmt.Sprintf("Cannot process blocks beyond the allowed time offset while the BFDisallowDelay flag is raised %s", blockHash)
return false, true, ruleError(ErrDelayedBlockIsNotAllowed, str)
}
if delay != 0 {
err = dag.addDelayedBlock(block, delay)
if err != nil {
@@ -183,7 +195,7 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
var missingParents []*daghash.Hash
for _, parentHash := range block.MsgBlock().Header.ParentHashes {
if !dag.BlockExists(parentHash) {
if !dag.IsInDAG(parentHash) {
missingParents = append(missingParents, parentHash)
}
}
@@ -252,7 +264,7 @@ func (dag *BlockDAG) maxDelayOfParents(parentHashes []*daghash.Hash) (delay time
for _, parentHash := range parentHashes {
if delayedParent, exists := dag.delayedBlocks[*parentHash]; exists {
isDelayed = true
parentDelay := delayedParent.processTime.Sub(dag.AdjustedTime())
parentDelay := delayedParent.processTime.Sub(dag.Now())
if parentDelay > delay {
delay = parentDelay
}

View File

@@ -1,8 +1,10 @@
package blockdag
import (
"github.com/kaspanet/kaspad/util"
"path/filepath"
"testing"
"time"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/daghash"
@@ -69,3 +71,156 @@ func TestProcessOrphans(t *testing.T) {
t.Fatalf("TestProcessOrphans: child block erroneously not marked as invalid")
}
}
func TestProcessDelayedBlocks(t *testing.T) {
// We use dag1 so we can build the test blocks with the proper
// block header (UTXO commitment, acceptedIDMerkleroot, etc), and
// then we use dag2 for the actual test.
dag1, teardownFunc, err := DAGSetup("TestProcessDelayedBlocks1", Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
initialTime := dag1.dagParams.GenesisBlock.Header.Timestamp
// Here we use a fake time source that returns a timestamp
// one hour into the future to make delayedBlock artificially
// valid.
dag1.timeSource = newFakeTimeSource(initialTime.Add(time.Hour))
delayedBlock, err := PrepareBlockForTest(dag1, []*daghash.Hash{dag1.dagParams.GenesisBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
blockDelay := time.Duration(dag1.dagParams.TimestampDeviationTolerance*uint64(dag1.targetTimePerBlock)+5) * time.Second
delayedBlock.Header.Timestamp = initialTime.Add(blockDelay)
isOrphan, isDelayed, err := dag1.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
"is an orphan\n")
}
if isDelayed {
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
"is delayed\n")
}
delayedBlockChild, err := PrepareBlockForTest(dag1, []*daghash.Hash{delayedBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
// Here the actual test begins. We add a delayed block and
// its child and check that they are not added to the DAG,
// and check that they're added only if we add a new block
// after the delayed block timestamp is valid.
dag2, teardownFunc2, err := DAGSetup("TestProcessDelayedBlocks2", Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc2()
dag2.timeSource = newFakeTimeSource(initialTime)
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
"is an orphan\n")
}
if !isDelayed {
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
"is not delayed\n")
}
if dag2.IsInDAG(delayedBlock.BlockHash()) {
t.Errorf("dag.IsInDAG should return false for a delayed block")
}
if !dag2.IsKnownBlock(delayedBlock.BlockHash()) {
t.Errorf("dag.IsKnownBlock should return true for a a delayed block")
}
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(delayedBlockChild), BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned delayedBlockChild " +
"is an orphan\n")
}
if !isDelayed {
t.Fatalf("ProcessBlock incorrectly returned delayedBlockChild " +
"is not delayed\n")
}
if dag2.IsInDAG(delayedBlockChild.BlockHash()) {
t.Errorf("dag.IsInDAG should return false for a child of a delayed block")
}
if !dag2.IsKnownBlock(delayedBlockChild.BlockHash()) {
t.Errorf("dag.IsKnownBlock should return true for a child of a delayed block")
}
blockBeforeDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(blockBeforeDelay), BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
"is an orphan\n")
}
if isDelayed {
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
"is delayed\n")
}
if dag2.IsInDAG(delayedBlock.BlockHash()) {
t.Errorf("delayedBlock shouldn't be added to the DAG because its time hasn't reached yet")
}
if dag2.IsInDAG(delayedBlockChild.BlockHash()) {
t.Errorf("delayedBlockChild shouldn't be added to the DAG because its parent is not in the DAG")
}
// We advance the clock to the point where delayedBlock timestamp is valid.
deviationTolerance := int64(dag2.TimestampDeviationTolerance) * dag2.targetTimePerBlock
secondsUntilDelayedBlockIsValid := delayedBlock.Header.Timestamp.Unix() - deviationTolerance - dag2.Now().Unix() + 1
dag2.timeSource = newFakeTimeSource(initialTime.Add(time.Duration(secondsUntilDelayedBlockIsValid) * time.Second))
blockAfterDelay, err := PrepareBlockForTest(dag2,
[]*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()},
nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(blockAfterDelay), BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
"is an orphan\n")
}
if isDelayed {
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
"is not delayed\n")
}
if !dag2.IsInDAG(delayedBlock.BlockHash()) {
t.Fatalf("delayedBlock should be added to the DAG because its time has been reached")
}
if !dag2.IsInDAG(delayedBlockChild.BlockHash()) {
t.Errorf("delayedBlockChild shouldn't be added to the DAG because its parent has been added to the DAG")
}
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/pkg/errors"
"math"
"strings"
"time"
)
// reachabilityInterval represents an interval to be used within the
@@ -190,11 +191,6 @@ type reachabilityTreeNode struct {
// remainingInterval is the not-yet allocated interval (within
// this node's interval) awaiting new children
remainingInterval *reachabilityInterval
// subtreeSize is a helper field used only during reindexing
// (expected to be 0 any other time).
// See countSubtrees for further details.
subtreeSize uint64
}
func newReachabilityTreeNode(blockNode *blockNode) *reachabilityTreeNode {
@@ -218,7 +214,16 @@ func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode) ([]*reach
// No allocation space left -- reindex
if rtn.remainingInterval.size() == 0 {
return rtn.reindexIntervals()
reindexStartTime := time.Now()
modifiedNodes, err := rtn.reindexIntervals()
if err != nil {
return nil, err
}
reindexTimeElapsed := time.Since(reindexStartTime)
log.Debugf("Reachability reindex triggered for "+
"block %s. Modified %d tree nodes and took %dms.",
rtn.blockNode.hash, len(modifiedNodes), reindexTimeElapsed.Milliseconds())
return modifiedNodes, nil
}
// Allocate from the remaining space
@@ -253,10 +258,12 @@ func (rtn *reachabilityTreeNode) reindexIntervals() ([]*reachabilityTreeNode, er
// Initial interval and subtree sizes
intervalSize := current.interval.size()
subtreeSize := current.countSubtrees()
subTreeSizeMap := make(map[*reachabilityTreeNode]uint64)
current.countSubtrees(subTreeSizeMap)
currentSubtreeSize := subTreeSizeMap[current]
// Find the first ancestor that has sufficient interval space
for intervalSize < subtreeSize {
for intervalSize < currentSubtreeSize {
if current.parent == nil {
// If we ended up here it means that there are more
// than 2^64 blocks, which shouldn't ever happen.
@@ -267,14 +274,16 @@ func (rtn *reachabilityTreeNode) reindexIntervals() ([]*reachabilityTreeNode, er
}
current = current.parent
intervalSize = current.interval.size()
subtreeSize = current.countSubtrees()
current.countSubtrees(subTreeSizeMap)
currentSubtreeSize = subTreeSizeMap[current]
}
// Propagate the interval to the subtree
return current.propagateInterval()
return current.propagateInterval(subTreeSizeMap)
}
// countSubtrees counts the size of each subtree under this node.
// countSubtrees counts the size of each subtree under this node,
// and populates the provided subTreeSizeMap with the results.
// It is equivalent to the following recursive implementation:
//
// func (rtn *reachabilityTreeNode) countSubtrees() uint64 {
@@ -293,36 +302,20 @@ func (rtn *reachabilityTreeNode) reindexIntervals() ([]*reachabilityTreeNode, er
// intermediate updates from leaves via parent chains until all
// size information is gathered at the root of the operation
// (i.e. at rtn).
//
// Note the role of the subtreeSize field in the algorithm.
// For each node rtn this field is initialized to 0. The field
// has two possible states:
// * rtn.subtreeSize > |rtn.children|:
// indicates that rtn's subtree size is already known and
// calculated.
// * rtn.subtreeSize <= |rtn.children|:
// indicates that we are still in the counting stage of
// tracking which of rtn's children has already calculated
// its subtree size.
// This way, once rtn.subtree_size = |rtn.children| we know we
// can pull subtree sizes from children and continue pushing
// the readiness signal further up.
func (rtn *reachabilityTreeNode) countSubtrees() uint64 {
func (rtn *reachabilityTreeNode) countSubtrees(subTreeSizeMap map[*reachabilityTreeNode]uint64) {
queue := []*reachabilityTreeNode{rtn}
calculatedChildrenCount := make(map[*reachabilityTreeNode]uint64)
for len(queue) > 0 {
var current *reachabilityTreeNode
current, queue = queue[0], queue[1:]
if len(current.children) == 0 {
// We reached a leaf
current.subtreeSize = 1
}
if current.subtreeSize <= uint64(len(current.children)) {
subTreeSizeMap[current] = 1
} else if calculatedChildrenCount[current] <= uint64(len(current.children)) {
// We haven't yet calculated the subtree size of
// the current node. Add all its children to the
// queue
for _, child := range current.children {
queue = append(queue, child)
}
queue = append(queue, current.children...)
continue
}
@@ -330,39 +323,39 @@ func (rtn *reachabilityTreeNode) countSubtrees() uint64 {
// Push information up
for current != rtn {
current = current.parent
current.subtreeSize++
if current.subtreeSize != uint64(len(current.children)) {
calculatedChildrenCount[current]++
if calculatedChildrenCount[current] != uint64(len(current.children)) {
// Not all subtrees of the current node are ready
break
}
// All subtrees of current have reported readiness.
// Count actual subtree size and continue pushing up.
// All children of `current` have calculated their subtree size.
// Sum them all together and add 1 to get the sub tree size of
// `current`.
childSubtreeSizeSum := uint64(0)
for _, child := range current.children {
childSubtreeSizeSum += child.subtreeSize
childSubtreeSizeSum += subTreeSizeMap[child]
}
current.subtreeSize = childSubtreeSizeSum + 1
subTreeSizeMap[current] = childSubtreeSizeSum + 1
}
}
return rtn.subtreeSize
}
// propagateInterval propagates the new interval using a BFS traversal.
// Subtree intervals are recursively allocated according to subtree sizes and
// the allocation rule in splitWithExponentialBias. This method returns
// a list of reachabilityTreeNodes modified by it.
func (rtn *reachabilityTreeNode) propagateInterval() ([]*reachabilityTreeNode, error) {
func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabilityTreeNode]uint64) ([]*reachabilityTreeNode, error) {
// We set the interval to reset its remainingInterval, so we could reallocate it while reindexing.
rtn.setInterval(rtn.interval)
modifiedNodes := []*reachabilityTreeNode{rtn}
queue := []*reachabilityTreeNode{rtn}
var modifiedNodes []*reachabilityTreeNode
for len(queue) > 0 {
var current *reachabilityTreeNode
current, queue = queue[0], queue[1:]
if len(current.children) > 0 {
sizes := make([]uint64, len(current.children))
for i, child := range current.children {
sizes[i] = child.subtreeSize
sizes[i] = subTreeSizeMap[child]
}
intervals, err := current.remainingInterval.splitWithExponentialBias(sizes)
if err != nil {
@@ -379,9 +372,6 @@ func (rtn *reachabilityTreeNode) propagateInterval() ([]*reachabilityTreeNode, e
}
modifiedNodes = append(modifiedNodes, current)
// Cleanup temp info for future reindexing
current.subtreeSize = 0
}
return modifiedNodes, nil
}
@@ -401,7 +391,7 @@ func (rtn *reachabilityTreeNode) String() string {
var current *reachabilityTreeNode
current, queue = queue[0], queue[1:]
if len(current.children) == 0 {
break
continue
}
line := ""
@@ -527,7 +517,8 @@ func (dag *BlockDAG) updateReachability(node *blockNode, selectedParentAnticone
// If this is the genesis node, simply initialize it and return
if node.isGenesis() {
return dag.reachabilityStore.setTreeNode(newTreeNode)
dag.reachabilityStore.setTreeNode(newTreeNode)
return nil
}
// Insert the node into the selected parent's reachability tree
@@ -540,10 +531,7 @@ func (dag *BlockDAG) updateReachability(node *blockNode, selectedParentAnticone
return err
}
for _, modifiedTreeNode := range modifiedTreeNodes {
err = dag.reachabilityStore.setTreeNode(modifiedTreeNode)
if err != nil {
return err
}
dag.reachabilityStore.setTreeNode(modifiedTreeNode)
}
// Add the block to the futureCoveringSets of all the blocks

View File

@@ -2,6 +2,7 @@ package blockdag
import (
"reflect"
"strings"
"testing"
)
@@ -322,6 +323,15 @@ func TestSplitWithExponentialBias(t *testing.T) {
newReachabilityInterval(41, 10_000),
},
},
{
interval: newReachabilityInterval(1, 100_000),
sizes: []uint64{31_000, 31_000, 30_001},
expectedIntervals: []*reachabilityInterval{
newReachabilityInterval(1, 35_000),
newReachabilityInterval(35_001, 69_999),
newReachabilityInterval(70_000, 100_000),
},
},
}
for i, test := range tests {
@@ -473,3 +483,166 @@ func TestInsertBlock(t *testing.T) {
}
}
}
func TestSplitFractionErrors(t *testing.T) {
interval := newReachabilityInterval(100, 200)
// Negative fraction
_, _, err := interval.splitFraction(-0.5)
if err == nil {
t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " +
"didn't return an error for a negative fraction")
}
expectedErrSubstring := "fraction must be between 0 and 1"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+
"for a negative fraction. "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
// Fraction > 1
_, _, err = interval.splitFraction(1.5)
if err == nil {
t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " +
"didn't return an error for a fraction greater than 1")
}
expectedErrSubstring = "fraction must be between 0 and 1"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+
"for a fraction greater than 1. "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
// Splitting an empty interval
emptyInterval := newReachabilityInterval(1, 0)
_, _, err = emptyInterval.splitFraction(0.5)
if err == nil {
t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " +
"didn't return an error for an empty interval")
}
expectedErrSubstring = "cannot split an empty interval"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+
"for an empty interval. "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
}
func TestSplitExactErrors(t *testing.T) {
interval := newReachabilityInterval(100, 199)
// Sum of sizes greater than the size of the interval
sizes := []uint64{50, 51}
_, err := interval.splitExact(sizes)
if err == nil {
t.Fatalf("TestSplitExactErrors: splitExact unexpectedly " +
"didn't return an error for (sum of sizes) > (size of interval)")
}
expectedErrSubstring := "sum of sizes must be equal to the interval's size"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestSplitExactErrors: splitExact returned wrong error "+
"for (sum of sizes) > (size of interval). "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
// Sum of sizes smaller than the size of the interval
sizes = []uint64{50, 49}
_, err = interval.splitExact(sizes)
if err == nil {
t.Fatalf("TestSplitExactErrors: splitExact unexpectedly " +
"didn't return an error for (sum of sizes) < (size of interval)")
}
expectedErrSubstring = "sum of sizes must be equal to the interval's size"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestSplitExactErrors: splitExact returned wrong error "+
"for (sum of sizes) < (size of interval). "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
}
func TestSplitWithExponentialBiasErrors(t *testing.T) {
interval := newReachabilityInterval(100, 199)
// Sum of sizes greater than the size of the interval
sizes := []uint64{50, 51}
_, err := interval.splitWithExponentialBias(sizes)
if err == nil {
t.Fatalf("TestSplitWithExponentialBiasErrors: splitWithExponentialBias " +
"unexpectedly didn't return an error")
}
expectedErrSubstring := "sum of sizes must be less than or equal to the interval's size"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestSplitWithExponentialBiasErrors: splitWithExponentialBias "+
"returned wrong error. Want: %s, got: %s", expectedErrSubstring, err)
}
}
func TestReindexIntervalErrors(t *testing.T) {
// Create a treeNode and give it size = 100
treeNode := newReachabilityTreeNode(&blockNode{})
treeNode.setInterval(newReachabilityInterval(0, 99))
// Add a chain of 100 child treeNodes to treeNode
var err error
currentTreeNode := treeNode
for i := 0; i < 100; i++ {
childTreeNode := newReachabilityTreeNode(&blockNode{})
_, err = currentTreeNode.addChild(childTreeNode)
if err != nil {
break
}
currentTreeNode = childTreeNode
}
// At the 100th addChild we expect a reindex. This reindex should
// fail because our initial treeNode only has size = 100, and the
// reindex requires size > 100.
// This simulates the case when (somehow) there's more than 2^64
// blocks in the DAG, since the genesis block has size = 2^64.
if err == nil {
t.Fatalf("TestReindexIntervalErrors: reindexIntervals " +
"unexpectedly didn't return an error")
}
if !strings.Contains(err.Error(), "missing tree parent during reindexing") {
t.Fatalf("TestReindexIntervalErrors: reindexIntervals "+
"returned an expected error: %s", err)
}
}
func TestFutureCoveringBlockSetString(t *testing.T) {
treeNodeA := newReachabilityTreeNode(&blockNode{})
treeNodeA.setInterval(newReachabilityInterval(123, 456))
treeNodeB := newReachabilityTreeNode(&blockNode{})
treeNodeB.setInterval(newReachabilityInterval(457, 789))
futureCoveringSet := futureCoveringBlockSet{
&futureCoveringBlock{treeNode: treeNodeA},
&futureCoveringBlock{treeNode: treeNodeB},
}
str := futureCoveringSet.String()
expectedStr := "[123,456][457,789]"
if str != expectedStr {
t.Fatalf("TestFutureCoveringBlockSetString: unexpected "+
"string. Want: %s, got: %s", expectedStr, str)
}
}
func TestReachabilityTreeNodeString(t *testing.T) {
treeNodeA := newReachabilityTreeNode(&blockNode{})
treeNodeA.setInterval(newReachabilityInterval(100, 199))
treeNodeB1 := newReachabilityTreeNode(&blockNode{})
treeNodeB1.setInterval(newReachabilityInterval(100, 150))
treeNodeB2 := newReachabilityTreeNode(&blockNode{})
treeNodeB2.setInterval(newReachabilityInterval(150, 199))
treeNodeC := newReachabilityTreeNode(&blockNode{})
treeNodeC.setInterval(newReachabilityInterval(100, 149))
treeNodeA.children = []*reachabilityTreeNode{treeNodeB1, treeNodeB2}
treeNodeB2.children = []*reachabilityTreeNode{treeNodeC}
str := treeNodeA.String()
expectedStr := "[100,149]\n[100,150][150,199]\n[100,199]"
if str != expectedStr {
t.Fatalf("TestReachabilityTreeNodeString: unexpected "+
"string. Want: %s, got: %s", expectedStr, str)
}
}

View File

@@ -28,7 +28,7 @@ func newReachabilityStore(dag *BlockDAG) *reachabilityStore {
}
}
func (store *reachabilityStore) setTreeNode(treeNode *reachabilityTreeNode) error {
func (store *reachabilityStore) setTreeNode(treeNode *reachabilityTreeNode) {
// load the reachability data from DB to store.loaded
node := treeNode.blockNode
_, exists := store.reachabilityDataByHash(node.hash)
@@ -38,7 +38,6 @@ func (store *reachabilityStore) setTreeNode(treeNode *reachabilityTreeNode) erro
store.loaded[*node.hash].treeNode = treeNode
store.setBlockAsDirty(node.hash)
return nil
}
func (store *reachabilityStore) setFutureCoveringSet(node *blockNode, futureCoveringSet futureCoveringBlockSet) error {
@@ -89,6 +88,7 @@ func (store *reachabilityStore) flushToDB(dbTx database.Tx) error {
}
for hash := range store.dirty {
hash := hash // Copy hash to a new variable to avoid passing the same pointer
reachabilityData := store.loaded[hash]
err := store.dbStoreReachabilityData(dbTx, &hash, reachabilityData)
if err != nil {

View File

@@ -10,6 +10,7 @@ import (
"io"
"os"
"path/filepath"
"sort"
"strings"
"sync"
@@ -26,9 +27,6 @@ const (
// testDbType is the database backend type to use for the tests.
testDbType = "ffldb"
// testDbRoot is the root directory used to create all test databases.
testDbRoot = "testdbs"
// blockDataNet is the expected network in the test block data.
blockDataNet = wire.Mainnet
)
@@ -79,16 +77,9 @@ func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
}
if config.DB == nil {
// Create the root directory for test databases.
if !FileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
err := errors.Errorf("unable to create test db "+
"root: %s", err)
return nil, nil, err
}
}
tmpDir := os.TempDir()
dbPath := filepath.Join(testDbRoot, dbName)
dbPath := filepath.Join(tmpDir, dbName)
_ = os.RemoveAll(dbPath)
var err error
config.DB, err = database.Create(testDbType, dbPath, blockDataNet)
@@ -103,7 +94,6 @@ func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
spawn = realSpawn
config.DB.Close()
os.RemoveAll(dbPath)
os.RemoveAll(testDbRoot)
}
} else {
teardown = func() {
@@ -113,7 +103,7 @@ func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
}
}
config.TimeSource = NewMedianTime()
config.TimeSource = NewTimeSource()
config.SigCache = txscript.NewSigCache(1000)
// Create the DAG instance.
@@ -173,7 +163,7 @@ func SetVirtualForTest(dag *BlockDAG, virtual VirtualForTest) VirtualForTest {
// GetVirtualFromParentsForTest generates a virtual block with the given parents.
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (VirtualForTest, error) {
parents := newSet()
parents := newBlockSet()
for _, hash := range parentHashes {
parent := dag.index.LookupNode(hash)
if parent == nil {
@@ -183,7 +173,7 @@ func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (
}
virtual := newVirtualBlock(dag, parents)
pastUTXO, _, err := dag.pastUTXO(&virtual.blockNode)
pastUTXO, _, _, err := dag.pastUTXO(&virtual.blockNode)
if err != nil {
return nil, err
}
@@ -288,6 +278,17 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
blockTransactions[i+1] = util.NewTx(tx)
}
// Sort transactions by subnetwork ID
sort.Slice(blockTransactions, func(i, j int) bool {
if blockTransactions[i].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
return true
}
if blockTransactions[j].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
return false
}
return subnetworkid.Less(&blockTransactions[i].MsgTx().SubnetworkID, &blockTransactions[j].MsgTx().SubnetworkID)
})
block, err := dag.BlockForMining(blockTransactions)
if err != nil {
return nil, err

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -230,9 +230,6 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
if condition {
count++
}
// Get the previous block node.
current = current.selectedParent
}
// The state is locked in if the number of blocks in the
@@ -267,8 +264,8 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
// This function is safe for concurrent access.
func (dag *BlockDAG) ThresholdState(deploymentID uint32) (ThresholdState, error) {
dag.dagLock.Lock()
defer dag.dagLock.Unlock()
state, err := dag.deploymentState(dag.selectedTip(), deploymentID)
dag.dagLock.Unlock()
return state, err
}
@@ -279,8 +276,8 @@ func (dag *BlockDAG) ThresholdState(deploymentID uint32) (ThresholdState, error)
// This function is safe for concurrent access.
func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
dag.dagLock.Lock()
defer dag.dagLock.Unlock()
state, err := dag.deploymentState(dag.selectedTip(), deploymentID)
dag.dagLock.Unlock()
if err != nil {
return false, err
}

25
blockdag/timesource.go Normal file
View File

@@ -0,0 +1,25 @@
package blockdag
import (
"time"
)
// TimeSource is the interface to access time.
type TimeSource interface {
// Now returns the current time.
Now() time.Time
}
// timeSource provides an implementation of the TimeSource interface
// that simply returns the current local time.
type timeSource struct{}
// Now returns the current local time, with one second precision.
func (m *timeSource) Now() time.Time {
return time.Unix(time.Now().Unix(), 0)
}
// NewTimeSource returns a new instance of a TimeSource
func NewTimeSource() TimeSource {
return &timeSource{}
}

View File

@@ -2,31 +2,26 @@ package blockdag
import (
"bytes"
"github.com/golang/groupcache/lru"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/wire"
)
const ecmhCacheSize = 4_000_000
var (
utxoToECMHCache = lru.New(ecmhCacheSize)
)
func utxoMultiset(entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
func addUTXOToMultiset(ms *secp256k1.MultiSet, entry *UTXOEntry, outpoint *wire.Outpoint) (*secp256k1.MultiSet, error) {
w := &bytes.Buffer{}
err := serializeUTXO(w, entry, outpoint)
if err != nil {
return nil, err
}
serializedUTXO := w.Bytes()
utxoHash := daghash.DoubleHashH(serializedUTXO)
if cachedMSPoint, ok := utxoToECMHCache.Get(utxoHash); ok {
return cachedMSPoint.(*ecc.Multiset), nil
}
msPoint := ecc.NewMultiset(ecc.S256()).Add(serializedUTXO)
utxoToECMHCache.Add(utxoHash, msPoint)
return msPoint, nil
ms.Add(w.Bytes())
return ms, nil
}
func removeUTXOFromMultiset(ms *secp256k1.MultiSet, entry *UTXOEntry, outpoint *wire.Outpoint) (*secp256k1.MultiSet, error) {
w := &bytes.Buffer{}
err := serializeUTXO(w, entry, outpoint)
if err != nil {
return nil, err
}
ms.Remove(w.Bytes())
return ms, nil
}

View File

@@ -8,8 +8,6 @@ import (
"github.com/pkg/errors"
)
var multisetPointSize = 32
type blockUTXODiffData struct {
diff *UTXODiff
diffChild *blockNode
@@ -166,6 +164,7 @@ func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
// while writing each entry.
buffer := &bytes.Buffer{}
for hash := range diffStore.dirty {
hash := hash // Copy hash to a new variable to avoid passing the same pointer
buffer.Reset()
diffData := diffStore.loaded[hash]
err := dbStoreDiffData(dbTx, buffer, &hash, diffData)

View File

@@ -2,15 +2,11 @@ package blockdag
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
"io"
"math/big"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"io"
)
// serializeBlockUTXODiffData serializes diff data in the following format:
@@ -54,40 +50,26 @@ func utxoEntryHeaderCode(entry *UTXOEntry) uint64 {
return headerCode
}
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffDataBytes []byte) (*blockUTXODiffData, error) {
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffData []byte) (*blockUTXODiffData, error) {
diffData := &blockUTXODiffData{}
serializedDiffData := bytes.NewBuffer(serializedDiffDataBytes)
r := bytes.NewBuffer(serializedDiffData)
var hasDiffChild bool
err := wire.ReadElement(serializedDiffData, &hasDiffChild)
err := wire.ReadElement(r, &hasDiffChild)
if err != nil {
return nil, err
}
if hasDiffChild {
hash := &daghash.Hash{}
err := wire.ReadElement(serializedDiffData, hash)
err := wire.ReadElement(r, hash)
if err != nil {
return nil, err
}
diffData.diffChild = diffStore.dag.index.LookupNode(hash)
}
diffData.diff = &UTXODiff{
useMultiset: true,
}
diffData.diff.toAdd, err = deserializeDiffEntries(serializedDiffData)
if err != nil {
return nil, err
}
diffData.diff.toRemove, err = deserializeDiffEntries(serializedDiffData)
if err != nil {
return nil, err
}
diffData.diff.diffMultiset, err = deserializeMultiset(serializedDiffData)
diffData.diff, err = deserializeUTXODiff(r)
if err != nil {
return nil, err
}
@@ -95,38 +77,31 @@ func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffDataB
return diffData, nil
}
func deserializeDiffEntries(r io.Reader) (utxoCollection, error) {
func deserializeUTXODiff(r io.Reader) (*UTXODiff, error) {
diff := &UTXODiff{}
var err error
diff.toAdd, err = deserializeUTXOCollection(r)
if err != nil {
return nil, err
}
diff.toRemove, err = deserializeUTXOCollection(r)
if err != nil {
return nil, err
}
return diff, nil
}
func deserializeUTXOCollection(r io.Reader) (utxoCollection, error) {
count, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
collection := utxoCollection{}
for i := uint64(0); i < count; i++ {
outpointSize, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
serializedOutpoint := make([]byte, outpointSize)
err = binary.Read(r, byteOrder, serializedOutpoint)
if err != nil {
return nil, err
}
outpoint, err := deserializeOutpoint(serializedOutpoint)
if err != nil {
return nil, err
}
utxoEntrySize, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
serializedEntry := make([]byte, utxoEntrySize)
err = binary.Read(r, byteOrder, serializedEntry)
if err != nil {
return nil, err
}
utxoEntry, err := deserializeUTXOEntry(serializedEntry)
utxoEntry, outpoint, err := deserializeUTXO(r)
if err != nil {
return nil, err
}
@@ -135,31 +110,22 @@ func deserializeDiffEntries(r io.Reader) (utxoCollection, error) {
return collection, nil
}
// deserializeMultiset deserializes an EMCH multiset.
// See serializeMultiset for more details.
func deserializeMultiset(r io.Reader) (*ecc.Multiset, error) {
xBytes := make([]byte, multisetPointSize)
yBytes := make([]byte, multisetPointSize)
err := binary.Read(r, byteOrder, xBytes)
func deserializeUTXO(r io.Reader) (*UTXOEntry, *wire.Outpoint, error) {
outpoint, err := deserializeOutpoint(r)
if err != nil {
return nil, err
return nil, nil, err
}
err = binary.Read(r, byteOrder, yBytes)
utxoEntry, err := deserializeUTXOEntry(r)
if err != nil {
return nil, err
return nil, nil, err
}
var x, y big.Int
x.SetBytes(xBytes)
y.SetBytes(yBytes)
return ecc.NewMultisetFromPoint(ecc.S256(), &x, &y), nil
return utxoEntry, outpoint, nil
}
// serializeUTXODiff serializes UTXODiff by serializing
// UTXODiff.toAdd, UTXODiff.toRemove and UTXODiff.Multiset one after the other.
func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
if !diff.useMultiset {
return errors.New("Cannot serialize a UTXO diff without a multiset")
}
err := serializeUTXOCollection(w, diff.toAdd)
if err != nil {
return err
@@ -169,10 +135,7 @@ func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
if err != nil {
return err
}
err = serializeMultiset(w, diff.diffMultiset)
if err != nil {
return err
}
return nil
}
@@ -193,121 +156,95 @@ func serializeUTXOCollection(w io.Writer, collection utxoCollection) error {
return nil
}
// serializeMultiset serializes an ECMH multiset. The serialization
// is done by taking the (x,y) coordinnates of the multiset point and
// padding each one of them with 32 byte (it'll be 32 byte in most
// cases anyway except one of the coordinates is zero) and writing
// them one after the other.
func serializeMultiset(w io.Writer, ms *ecc.Multiset) error {
x, y := ms.Point()
xBytes := make([]byte, multisetPointSize)
copy(xBytes, x.Bytes())
yBytes := make([]byte, multisetPointSize)
copy(yBytes, y.Bytes())
err := binary.Write(w, byteOrder, xBytes)
if err != nil {
return err
}
err = binary.Write(w, byteOrder, yBytes)
if err != nil {
return err
}
return nil
}
// serializeUTXO serializes a utxo entry-outpoint pair
func serializeUTXO(w io.Writer, entry *UTXOEntry, outpoint *wire.Outpoint) error {
serializedOutpoint := *outpointKey(*outpoint)
err := wire.WriteVarInt(w, uint64(len(serializedOutpoint)))
err := serializeOutpoint(w, outpoint)
if err != nil {
return err
}
err = binary.Write(w, byteOrder, serializedOutpoint)
if err != nil {
return err
}
serializedUTXOEntry := serializeUTXOEntry(entry)
err = wire.WriteVarInt(w, uint64(len(serializedUTXOEntry)))
if err != nil {
return err
}
err = binary.Write(w, byteOrder, serializedUTXOEntry)
err = serializeUTXOEntry(w, entry)
if err != nil {
return err
}
return nil
}
// serializeUTXOEntry returns the entry serialized to a format that is suitable
// for long-term storage. The format is described in detail above.
func serializeUTXOEntry(entry *UTXOEntry) []byte {
// p2pkhUTXOEntrySerializeSize is the serialized size for a P2PKH UTXO entry.
// 8 bytes (header code) + 8 bytes (amount) + varint for script pub key length of 25 (for P2PKH) + 25 bytes for P2PKH script.
var p2pkhUTXOEntrySerializeSize = 8 + 8 + wire.VarIntSerializeSize(25) + 25
// serializeUTXOEntry encodes the entry to the given io.Writer and use compression if useCompression is true.
// The compression format is described in detail above.
func serializeUTXOEntry(w io.Writer, entry *UTXOEntry) error {
// Encode the header code.
headerCode := utxoEntryHeaderCode(entry)
// Calculate the size needed to serialize the entry.
size := serializeSizeVLQ(headerCode) +
compressedTxOutSize(uint64(entry.Amount()), entry.ScriptPubKey())
// Serialize the header code followed by the compressed unspent
// transaction output.
serialized := make([]byte, size)
offset := putVLQ(serialized, headerCode)
offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()),
entry.ScriptPubKey())
return serialized
}
// deserializeOutpoint decodes an outpoint from the passed serialized byte
// slice into a new wire.Outpoint using a format that is suitable for long-
// term storage. this format is described in detail above.
func deserializeOutpoint(serialized []byte) (*wire.Outpoint, error) {
if len(serialized) <= daghash.HashSize {
return nil, errDeserialize("unexpected end of data")
err := binaryserializer.PutUint64(w, byteOrder, headerCode)
if err != nil {
return err
}
txID := daghash.TxID{}
txID.SetBytes(serialized[:daghash.HashSize])
index, _ := deserializeVLQ(serialized[daghash.HashSize:])
return wire.NewOutpoint(&txID, uint32(index)), nil
err = binaryserializer.PutUint64(w, byteOrder, entry.Amount())
if err != nil {
return err
}
err = wire.WriteVarInt(w, uint64(len(entry.ScriptPubKey())))
if err != nil {
return err
}
_, err = w.Write(entry.ScriptPubKey())
if err != nil {
return errors.WithStack(err)
}
return nil
}
// deserializeUTXOEntry decodes a UTXO entry from the passed serialized byte
// slice into a new UTXOEntry using a format that is suitable for long-term
// storage. The format is described in detail above.
func deserializeUTXOEntry(serialized []byte) (*UTXOEntry, error) {
// deserializeUTXOEntry decodes a UTXO entry from the passed reader
// into a new UTXOEntry. If isCompressed is used it will decompress
// the entry according to the format that is described in detail
// above.
func deserializeUTXOEntry(r io.Reader) (*UTXOEntry, error) {
// Deserialize the header code.
code, offset := deserializeVLQ(serialized)
if offset >= len(serialized) {
return nil, errDeserialize("unexpected end of data after header")
headerCode, err := binaryserializer.Uint64(r, byteOrder)
if err != nil {
return nil, err
}
// Decode the header code.
//
// Bit 0 indicates whether the containing transaction is a coinbase.
// Bits 1-x encode blue score of the containing transaction.
isCoinbase := code&0x01 != 0
blockBlueScore := code >> 1
// Decode the compressed unspent transaction output.
amount, scriptPubKey, _, err := decodeCompressedTxOut(serialized[offset:])
if err != nil {
return nil, errDeserialize(fmt.Sprintf("unable to decode "+
"UTXO: %s", err))
}
isCoinbase := headerCode&0x01 != 0
blockBlueScore := headerCode >> 1
entry := &UTXOEntry{
amount: amount,
scriptPubKey: scriptPubKey,
blockBlueScore: blockBlueScore,
packedFlags: 0,
}
if isCoinbase {
entry.packedFlags |= tfCoinbase
}
entry.amount, err = binaryserializer.Uint64(r, byteOrder)
if err != nil {
return nil, err
}
scriptPubKeyLen, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
entry.scriptPubKey = make([]byte, scriptPubKeyLen)
_, err = r.Read(entry.scriptPubKey)
if err != nil {
return nil, errors.WithStack(err)
}
return entry, nil
}

View File

@@ -2,12 +2,13 @@ package blockdag
import (
"fmt"
"github.com/pkg/errors"
"math"
"sort"
"strings"
"github.com/kaspanet/kaspad/ecc"
"github.com/pkg/errors"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/wire"
)
@@ -152,29 +153,16 @@ func (uc utxoCollection) clone() utxoCollection {
// UTXODiff represents a diff between two UTXO Sets.
type UTXODiff struct {
toAdd utxoCollection
toRemove utxoCollection
diffMultiset *ecc.Multiset
useMultiset bool
toAdd utxoCollection
toRemove utxoCollection
}
// NewUTXODiffWithoutMultiset creates a new, empty utxoDiff
// NewUTXODiff creates a new, empty utxoDiff
// without a multiset.
func NewUTXODiffWithoutMultiset() *UTXODiff {
return &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
useMultiset: false,
}
}
// NewUTXODiff creates a new, empty utxoDiff.
func NewUTXODiff() *UTXODiff {
return &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
useMultiset: true,
diffMultiset: ecc.NewMultiset(ecc.S256()),
toAdd: utxoCollection{},
toRemove: utxoCollection{},
}
}
@@ -208,9 +196,8 @@ func NewUTXODiff() *UTXODiff {
// diffFrom results in the UTXO being added to toAdd
func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
result := UTXODiff{
toAdd: make(utxoCollection, len(d.toRemove)+len(other.toAdd)),
toRemove: make(utxoCollection, len(d.toAdd)+len(other.toRemove)),
useMultiset: d.useMultiset,
toAdd: make(utxoCollection, len(d.toRemove)+len(other.toAdd)),
toRemove: make(utxoCollection, len(d.toAdd)+len(other.toRemove)),
}
// Note that the following cases are not accounted for, as they are impossible
@@ -224,6 +211,10 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
for outpoint, utxoEntry := range d.toAdd {
if !other.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toRemove.add(outpoint, utxoEntry)
} else if (d.toRemove.contains(outpoint) && !other.toRemove.contains(outpoint)) ||
(!d.toRemove.contains(outpoint) && other.toRemove.contains(outpoint)) {
return nil, errors.New(
"diffFrom: outpoint both in d.toAdd, other.toAdd, and only one of d.toRemove and other.toRemove")
}
if diffEntry, ok := other.toRemove.get(outpoint); ok {
// An exception is made for entries with unequal blue scores
@@ -243,6 +234,18 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
// If they are not in other.toRemove - should be added in result.toAdd
// If they are in other.toAdd - base utxoSet is not the same
for outpoint, utxoEntry := range d.toRemove {
diffEntry, ok := other.toRemove.get(outpoint)
if ok {
// if have the same entry in d.toRemove - simply don't copy.
// unless existing entry is with different blue score, in this case - this is an error
if utxoEntry.blockBlueScore != diffEntry.blockBlueScore {
return nil, errors.New("diffFrom: outpoint both in d.toRemove and other.toRemove with different " +
"blue scores, with no corresponding entry in d.toAdd")
}
} else { // if no existing entry - add to result.toAdd
result.toAdd.add(outpoint, utxoEntry)
}
if !other.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toAdd.add(outpoint, utxoEntry)
}
@@ -256,7 +259,7 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
other.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore)) {
continue
}
return nil, errors.New("diffFrom: transaction both in d.toRemove and in other.toAdd")
return nil, errors.New("diffFrom: outpoint both in d.toRemove and in other.toAdd")
}
}
@@ -276,124 +279,72 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
}
}
if d.useMultiset {
// Create a new diffMultiset as the subtraction of the two diffs.
result.diffMultiset = other.diffMultiset.Subtract(d.diffMultiset)
}
return &result, nil
}
// WithDiff applies provided diff to this diff, creating a new utxoDiff, that would be the result if
// withDiffInPlace applies provided diff to this diff in-place, that would be the result if
// first d, and than diff were applied to the same base
//
// WithDiff follows a set of rules represented by the following 3 by 3 table:
//
// | | this | |
// ---------+-----------+-----------+-----------+-----------
// | | toAdd | toRemove | None
// ---------+-----------+-----------+-----------+-----------
// other | toAdd | X | - | toAdd
// ---------+-----------+-----------+-----------+-----------
// | toRemove | - | X | toRemove
// ---------+-----------+-----------+-----------+-----------
// | None | toAdd | toRemove | -
//
// Key:
// - Don't add anything to the result
// X Return an error
// toAdd Add the UTXO into the toAdd collection of the result
// toRemove Add the UTXO into the toRemove collection of the result
//
// Examples:
// 1. This diff contains a UTXO in toAdd, and the other diff contains it in toRemove
// WithDiff results in nothing being added
// 2. This diff contains a UTXO in toRemove, and the other diff does not contain it
// WithDiff results in the UTXO being added to toRemove
func (d *UTXODiff) withDiffInPlace(diff *UTXODiff) error {
for outpoint, entryToRemove := range diff.toRemove {
if d.toAdd.containsWithBlueScore(outpoint, entryToRemove.blockBlueScore) {
// If already exists in toAdd with the same blueScore - remove from toAdd
d.toAdd.remove(outpoint)
continue
}
if d.toRemove.contains(outpoint) {
// If already exists - this is an error
return ruleError(ErrWithDiff, fmt.Sprintf(
"withDiffInPlace: outpoint %s both in d.toRemove and in diff.toRemove", outpoint))
}
// If not exists neither in toAdd nor in toRemove - add to toRemove
d.toRemove.add(outpoint, entryToRemove)
}
for outpoint, entryToAdd := range diff.toAdd {
if d.toRemove.containsWithBlueScore(outpoint, entryToAdd.blockBlueScore) {
// If already exists in toRemove with the same blueScore - remove from toRemove
if d.toAdd.contains(outpoint) && !diff.toRemove.contains(outpoint) {
return ruleError(ErrWithDiff, fmt.Sprintf(
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd with no "+
"corresponding entry in diff.toRemove", outpoint))
}
d.toRemove.remove(outpoint)
continue
}
if existingEntry, ok := d.toAdd.get(outpoint); ok &&
(existingEntry.blockBlueScore == entryToAdd.blockBlueScore ||
!diff.toRemove.containsWithBlueScore(outpoint, existingEntry.blockBlueScore)) {
// If already exists - this is an error
return ruleError(ErrWithDiff, fmt.Sprintf(
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd", outpoint))
}
// If not exists neither in toAdd nor in toRemove, or exists in toRemove with different blueScore - add to toAdd
d.toAdd.add(outpoint, entryToAdd)
}
return nil
}
// WithDiff applies provided diff to this diff, creating a new utxoDiff, that would be the result if
// first d, and than diff were applied to some base
func (d *UTXODiff) WithDiff(diff *UTXODiff) (*UTXODiff, error) {
result := UTXODiff{
toAdd: make(utxoCollection, len(d.toAdd)+len(diff.toAdd)),
toRemove: make(utxoCollection, len(d.toRemove)+len(diff.toRemove)),
useMultiset: d.useMultiset,
clone := d.clone()
err := clone.withDiffInPlace(diff)
if err != nil {
return nil, err
}
// All transactions in d.toAdd:
// If they are not in diff.toRemove - should be added in result.toAdd
// If they are in diff.toAdd - should throw an error
// Otherwise - should be ignored
for outpoint, utxoEntry := range d.toAdd {
if !diff.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toAdd.add(outpoint, utxoEntry)
}
if diffEntry, ok := diff.toAdd.get(outpoint); ok {
// An exception is made for entries with unequal blue scores
// as long as the appropriate entry exists in either d.toRemove
// or diff.toRemove.
// These are just "updates" to accepted blue score
if diffEntry.blockBlueScore != utxoEntry.blockBlueScore &&
(d.toRemove.containsWithBlueScore(outpoint, diffEntry.blockBlueScore) ||
diff.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore)) {
continue
}
return nil, ruleError(ErrWithDiff, fmt.Sprintf("WithDiff: outpoint %s both in d.toAdd and in other.toAdd", outpoint))
}
}
// All transactions in d.toRemove:
// If they are not in diff.toAdd - should be added in result.toRemove
// If they are in diff.toRemove - should throw an error
// Otherwise - should be ignored
for outpoint, utxoEntry := range d.toRemove {
if !diff.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toRemove.add(outpoint, utxoEntry)
}
if diffEntry, ok := diff.toRemove.get(outpoint); ok {
// An exception is made for entries with unequal blue scores
// as long as the appropriate entry exists in either d.toAdd
// or diff.toAdd.
// These are just "updates" to accepted blue score
if diffEntry.blockBlueScore != utxoEntry.blockBlueScore &&
(d.toAdd.containsWithBlueScore(outpoint, diffEntry.blockBlueScore) ||
diff.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore)) {
continue
}
return nil, ruleError(ErrWithDiff, "WithDiff: transaction both in d.toRemove and in other.toRemove")
}
}
// All transactions in diff.toAdd:
// If they are not in d.toRemove - should be added in result.toAdd
for outpoint, utxoEntry := range diff.toAdd {
if !d.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toAdd.add(outpoint, utxoEntry)
}
}
// All transactions in diff.toRemove:
// If they are not in d.toAdd - should be added in result.toRemove
for outpoint, utxoEntry := range diff.toRemove {
if !d.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toRemove.add(outpoint, utxoEntry)
}
}
// Apply diff.diffMultiset to d.diffMultiset
if d.useMultiset {
result.diffMultiset = d.diffMultiset.Union(diff.diffMultiset)
}
return &result, nil
return clone, nil
}
// clone returns a clone of this utxoDiff
func (d *UTXODiff) clone() *UTXODiff {
clone := &UTXODiff{
toAdd: d.toAdd.clone(),
toRemove: d.toRemove.clone(),
useMultiset: d.useMultiset,
}
if d.useMultiset {
clone.diffMultiset = d.diffMultiset.Clone()
toAdd: d.toAdd.clone(),
toRemove: d.toRemove.clone(),
}
return clone
}
@@ -410,14 +361,6 @@ func (d *UTXODiff) AddEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
} else {
d.toAdd.add(outpoint, entry)
}
if d.useMultiset {
newMs, err := addUTXOToMultiset(d.diffMultiset, entry, &outpoint)
if err != nil {
return err
}
d.diffMultiset = newMs
}
return nil
}
@@ -433,21 +376,10 @@ func (d *UTXODiff) RemoveEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
} else {
d.toRemove.add(outpoint, entry)
}
if d.useMultiset {
newMs, err := removeUTXOFromMultiset(d.diffMultiset, entry, &outpoint)
if err != nil {
return err
}
d.diffMultiset = newMs
}
return nil
}
func (d UTXODiff) String() string {
if d.useMultiset {
return fmt.Sprintf("toAdd: %s; toRemove: %s, Multiset-Hash: %s", d.toAdd, d.toRemove, d.diffMultiset.Hash())
}
return fmt.Sprintf("toAdd: %s; toRemove: %s", d.toAdd, d.toRemove)
}
@@ -472,8 +404,6 @@ type UTXOSet interface {
AddTx(tx *wire.MsgTx, blockBlueScore uint64) (ok bool, err error)
clone() UTXOSet
Get(outpoint wire.Outpoint) (*UTXOEntry, bool)
Multiset() *ecc.Multiset
WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error)
}
// diffFromTx is a common implementation for diffFromTx, that works
@@ -543,21 +473,19 @@ func diffFromAcceptedTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*
// FullUTXOSet represents a full list of transaction outputs and their values
type FullUTXOSet struct {
utxoCollection
UTXOMultiset *ecc.Multiset
}
// NewFullUTXOSet creates a new utxoSet with full list of transaction outputs and their values
func NewFullUTXOSet() *FullUTXOSet {
return &FullUTXOSet{
utxoCollection: utxoCollection{},
UTXOMultiset: ecc.NewMultiset(ecc.S256()),
}
}
// newFullUTXOSetFromUTXOCollection converts a utxoCollection to a FullUTXOSet
func newFullUTXOSetFromUTXOCollection(collection utxoCollection) (*FullUTXOSet, error) {
var err error
multiset := ecc.NewMultiset(ecc.S256())
multiset := secp256k1.NewMultiset()
for outpoint, utxoEntry := range collection {
multiset, err = addUTXOToMultiset(multiset, utxoEntry, &outpoint)
if err != nil {
@@ -566,7 +494,6 @@ func newFullUTXOSetFromUTXOCollection(collection utxoCollection) (*FullUTXOSet,
}
return &FullUTXOSet{
utxoCollection: collection,
UTXOMultiset: multiset,
}, nil
}
@@ -603,22 +530,14 @@ func (fus *FullUTXOSet) AddTx(tx *wire.MsgTx, blueScore uint64) (isAccepted bool
}
for _, txIn := range tx.TxIn {
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
err := fus.removeAndUpdateMultiset(outpoint)
if err != nil {
return false, err
}
fus.remove(txIn.PreviousOutpoint)
}
}
for i, txOut := range tx.TxOut {
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
entry := NewUTXOEntry(txOut, isCoinbase, blueScore)
err := fus.addAndUpdateMultiset(outpoint, entry)
if err != nil {
return false, err
}
fus.add(outpoint, entry)
}
return true, nil
@@ -647,7 +566,7 @@ func (fus *FullUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore ui
// clone returns a clone of this utxoSet
func (fus *FullUTXOSet) clone() UTXOSet {
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone(), UTXOMultiset: fus.UTXOMultiset.Clone()}
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone()}
}
// Get returns the UTXOEntry associated with the given Outpoint, and a boolean indicating if such entry was found
@@ -656,55 +575,6 @@ func (fus *FullUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
return utxoEntry, ok
}
// Multiset returns the ecmh-Multiset of this utxoSet
func (fus *FullUTXOSet) Multiset() *ecc.Multiset {
return fus.UTXOMultiset
}
// addAndUpdateMultiset adds a UTXOEntry to this utxoSet and updates its multiset accordingly
func (fus *FullUTXOSet) addAndUpdateMultiset(outpoint wire.Outpoint, entry *UTXOEntry) error {
fus.add(outpoint, entry)
newMs, err := addUTXOToMultiset(fus.UTXOMultiset, entry, &outpoint)
if err != nil {
return err
}
fus.UTXOMultiset = newMs
return nil
}
// removeAndUpdateMultiset removes a UTXOEntry from this utxoSet and updates its multiset accordingly
func (fus *FullUTXOSet) removeAndUpdateMultiset(outpoint wire.Outpoint) error {
entry, ok := fus.Get(outpoint)
if !ok {
return errors.Errorf("Couldn't find outpoint %s", outpoint)
}
fus.remove(outpoint)
var err error
newMs, err := removeUTXOFromMultiset(fus.UTXOMultiset, entry, &outpoint)
if err != nil {
return err
}
fus.UTXOMultiset = newMs
return nil
}
// WithTransactions returns a new UTXO Set with the added transactions.
//
// This function MUST be called with the DAG lock held.
func (fus *FullUTXOSet) WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error) {
diffSet := NewDiffUTXOSet(fus, NewUTXODiff())
for _, tx := range transactions {
isAccepted, err := diffSet.AddTx(tx, blockBlueScore)
if err != nil {
return nil, err
}
if !ignoreDoubleSpends && !isAccepted {
return nil, errors.Errorf("Transaction %s is not valid with the current UTXO set", tx.TxID())
}
}
return UTXOSet(diffSet), nil
}
// DiffUTXOSet represents a utxoSet with a base fullUTXOSet and a UTXODiff
type DiffUTXOSet struct {
base *FullUTXOSet
@@ -765,12 +635,11 @@ func (dus *DiffUTXOSet) AddTx(tx *wire.MsgTx, blockBlueScore uint64) (bool, erro
func (dus *DiffUTXOSet) appendTx(tx *wire.MsgTx, blockBlueScore uint64, isCoinbase bool) error {
if !isCoinbase {
for _, txIn := range tx.TxIn {
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
entry, ok := dus.Get(outpoint)
entry, ok := dus.Get(txIn.PreviousOutpoint)
if !ok {
return errors.Errorf("Couldn't find entry for outpoint %s", outpoint)
return errors.Errorf("Couldn't find entry for outpoint %s", txIn.PreviousOutpoint)
}
err := dus.UTXODiff.RemoveEntry(outpoint, entry)
err := dus.UTXODiff.RemoveEntry(txIn.PreviousOutpoint, entry)
if err != nil {
return err
}
@@ -816,16 +685,7 @@ func (dus *DiffUTXOSet) meldToBase() error {
for outpoint, utxoEntry := range dus.UTXODiff.toAdd {
dus.base.add(outpoint, utxoEntry)
}
if dus.UTXODiff.useMultiset {
dus.base.UTXOMultiset = dus.base.UTXOMultiset.Union(dus.UTXODiff.diffMultiset)
}
if dus.UTXODiff.useMultiset {
dus.UTXODiff = NewUTXODiff()
} else {
dus.UTXODiff = NewUTXODiffWithoutMultiset()
}
dus.UTXODiff = NewUTXODiff()
return nil
}
@@ -840,7 +700,7 @@ func (dus *DiffUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore ui
}
func (dus *DiffUTXOSet) String() string {
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s, Multiset-Hash:%s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove, dus.Multiset().Hash())
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove)
}
// clone returns a clone of this UTXO Set
@@ -865,42 +725,3 @@ func (dus *DiffUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
txOut, ok := dus.UTXODiff.toAdd.get(outpoint)
return txOut, ok
}
// Multiset returns the ecmh-Multiset of this utxoSet
func (dus *DiffUTXOSet) Multiset() *ecc.Multiset {
return dus.base.UTXOMultiset.Union(dus.UTXODiff.diffMultiset)
}
// WithTransactions returns a new UTXO Set with the added transactions.
//
// If dus.UTXODiff.useMultiset is true, this function MUST be
// called with the DAG lock held.
func (dus *DiffUTXOSet) WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error) {
diffSet := NewDiffUTXOSet(dus.base, dus.UTXODiff.clone())
for _, tx := range transactions {
isAccepted, err := diffSet.AddTx(tx, blockBlueScore)
if err != nil {
return nil, err
}
if !ignoreDoubleSpends && !isAccepted {
return nil, errors.Errorf("Transaction %s is not valid with the current UTXO set", tx.TxID())
}
}
return UTXOSet(diffSet), nil
}
func addUTXOToMultiset(ms *ecc.Multiset, entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
utxoMS, err := utxoMultiset(entry, outpoint)
if err != nil {
return nil, err
}
return ms.Union(utxoMS), nil
}
func removeUTXOFromMultiset(ms *ecc.Multiset, entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
utxoMS, err := utxoMultiset(entry, outpoint)
if err != nil {
return nil, err
}
return ms.Subtract(utxoMS), nil
}

View File

@@ -1,12 +1,12 @@
package blockdag
import (
"github.com/kaspanet/kaspad/util/subnetworkid"
"math"
"reflect"
"testing"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
@@ -79,49 +79,40 @@ func TestUTXODiff(t *testing.T) {
utxoEntry0 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 0)
utxoEntry1 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 20}, false, 1)
for i := 0; i < 2; i++ {
withMultiset := i == 0
// Test utxoDiff creation
var diff *UTXODiff
if withMultiset {
diff = NewUTXODiff()
} else {
diff = NewUTXODiffWithoutMultiset()
}
if len(diff.toAdd) != 0 || len(diff.toRemove) != 0 {
t.Errorf("new diff is not empty")
}
// Test utxoDiff creation
err := diff.AddEntry(outpoint0, utxoEntry0)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
diff := NewUTXODiff()
err = diff.RemoveEntry(outpoint1, utxoEntry1)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
if len(diff.toAdd) != 0 || len(diff.toRemove) != 0 {
t.Errorf("new diff is not empty")
}
// Test utxoDiff cloning
clonedDiff := diff.clone()
if clonedDiff == diff {
t.Errorf("cloned diff is reference-equal to the original")
}
if !reflect.DeepEqual(clonedDiff, diff) {
t.Errorf("cloned diff not equal to the original"+
"Original: \"%v\", cloned: \"%v\".", diff, clonedDiff)
}
err := diff.AddEntry(outpoint0, utxoEntry0)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
// Test utxoDiff string representation
expectedDiffString := "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ]"
if withMultiset {
expectedDiffString = "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], Multiset-Hash: 7cb61e48005b0c817211d04589d719bff87d86a6a6ce2454515f57265382ded7"
}
diffString := clonedDiff.String()
if diffString != expectedDiffString {
t.Errorf("unexpected diff string. "+
"Expected: \"%s\", got: \"%s\".", expectedDiffString, diffString)
}
err = diff.RemoveEntry(outpoint1, utxoEntry1)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
// Test utxoDiff cloning
clonedDiff := diff.clone()
if clonedDiff == diff {
t.Errorf("cloned diff is reference-equal to the original")
}
if !reflect.DeepEqual(clonedDiff, diff) {
t.Errorf("cloned diff not equal to the original"+
"Original: \"%v\", cloned: \"%v\".", diff, clonedDiff)
}
// Test utxoDiff string representation
expectedDiffString := "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ]"
diffString := clonedDiff.String()
if diffString != expectedDiffString {
t.Errorf("unexpected diff string. "+
"Expected: \"%s\", got: \"%s\".", expectedDiffString, diffString)
}
}
@@ -136,8 +127,11 @@ func TestUTXODiffRules(t *testing.T) {
// For each of the following test cases, we will:
// this.diffFrom(other) and compare it to expectedDiffFromResult
// this.WithDiff(other) and compare it to expectedWithDiffResult
// this.withDiffInPlace(other) and compare it to expectedWithDiffResult
//
// Note: an expected nil result means that we expect the respective operation to fail
// See the following spreadsheet for a summary of all test-cases:
// https://docs.google.com/spreadsheets/d/1E8G3mp5y1-yifouwLLXRLueSRfXdDRwRKFieYE07buY/edit?usp=sharing
tests := []struct {
name string
this *UTXODiff
@@ -146,7 +140,7 @@ func TestUTXODiffRules(t *testing.T) {
expectedWithDiffResult *UTXODiff
}{
{
name: "one toAdd in this, one toAdd in other",
name: "first toAdd in this, first toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
@@ -162,7 +156,23 @@ func TestUTXODiffRules(t *testing.T) {
expectedWithDiffResult: nil,
},
{
name: "one toAdd in this, one toRemove in other",
name: "first in toAdd in this, second in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedWithDiffResult: nil,
},
{
name: "first in toAdd in this, second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
@@ -178,7 +188,36 @@ func TestUTXODiffRules(t *testing.T) {
},
},
{
name: "one toAdd in this, empty other",
name: "first in toAdd in this and other, second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: nil,
},
{
name: "first in toAdd in this and toRemove in other, second in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{},
},
},
{
name: "first in toAdd in this, empty other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
@@ -197,7 +236,7 @@ func TestUTXODiffRules(t *testing.T) {
},
},
{
name: "one toRemove in this, one toAdd in other",
name: "first in toRemove in this and in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
@@ -213,7 +252,23 @@ func TestUTXODiffRules(t *testing.T) {
},
},
{
name: "one toRemove in this, one toRemove in other",
name: "first in toRemove in this, second in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
},
{
name: "first in toRemove in this and other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
@@ -229,7 +284,49 @@ func TestUTXODiffRules(t *testing.T) {
expectedWithDiffResult: nil,
},
{
name: "one toRemove in this, empty other",
name: "first in toRemove in this, second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
other: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: nil,
},
{
name: "first in toRemove in this and toAdd in other, second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: nil,
},
{
name: "first in toRemove in this and other, second in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{},
},
expectedWithDiffResult: nil,
},
{
name: "first in toRemove in this, empty other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
@@ -248,7 +345,116 @@ func TestUTXODiffRules(t *testing.T) {
},
},
{
name: "empty this, one toAdd in other",
name: "first in toAdd in this and other, second in toRemove in this",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: nil,
},
{
name: "first in toAdd in this, second in toRemove in this and toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: nil,
},
{
name: "first in toAdd in this and toRemove in other, second in toRemove in this",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
},
{
name: "first in toAdd in this, second in toRemove in this and in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedWithDiffResult: nil,
},
{
name: "first in toAdd and second in toRemove in both this and other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
expectedWithDiffResult: nil,
},
{
name: "first in toAdd in this and toRemove in other, second in toRemove in this and toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
},
{
name: "first in toAdd and second in toRemove in this, empty other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
},
{
name: "empty this, first in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
@@ -267,7 +473,7 @@ func TestUTXODiffRules(t *testing.T) {
},
},
{
name: "empty this, one toRemove in other",
name: "empty this, first in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
@@ -285,6 +491,25 @@ func TestUTXODiffRules(t *testing.T) {
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
},
{
name: "empty this, first in toAdd and second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
},
{
name: "empty this, empty other",
this: &UTXODiff{
@@ -304,244 +529,104 @@ func TestUTXODiffRules(t *testing.T) {
toRemove: utxoCollection{},
},
},
{
name: "equal outpoints different blue scores: first in toAdd in this, second in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedWithDiffResult: nil,
},
{
name: "equal outpoints different blue scores: first in toRemove in this, second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
other: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedWithDiffResult: nil,
},
{
name: "equal outpoints different blue scores: first in toAdd and second in toRemove in this, empty other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
},
{
name: "equal outpoints different blue scores: empty this, first in toAdd and second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
},
{
name: "equal outpoints different blue scores: first in toAdd and second in toRemove in both this and other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
expectedWithDiffResult: nil,
},
{
name: "equal outpoints different blue scores: first in toAdd in this and toRemove in other, second in toRemove in this and toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
},
}
for _, test := range tests {
this := addMultisetToDiff(t, test.this)
other := addMultisetToDiff(t, test.other)
expectedDiffFromResult := addMultisetToDiff(t, test.expectedDiffFromResult)
expectedWithDiffResult := addMultisetToDiff(t, test.expectedWithDiffResult)
// diffFrom from this to other
diffResult, err := this.diffFrom(other)
// diffFrom from test.this to test.other
diffResult, err := test.this.diffFrom(test.other)
// Test whether diffFrom returned an error
isDiffFromOk := err == nil
expectedIsDiffFromOk := expectedDiffFromResult != nil
expectedIsDiffFromOk := test.expectedDiffFromResult != nil
if isDiffFromOk != expectedIsDiffFromOk {
t.Errorf("unexpected diffFrom error in test \"%s\". "+
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsDiffFromOk, isDiffFromOk)
}
// If not error, test the diffFrom result
if isDiffFromOk && !expectedDiffFromResult.equal(diffResult) {
if isDiffFromOk && !test.expectedDiffFromResult.equal(diffResult) {
t.Errorf("unexpected diffFrom result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedDiffFromResult, diffResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedDiffFromResult, diffResult)
}
// Make sure that WithDiff after diffFrom results in the original other
// Make sure that WithDiff after diffFrom results in the original test.other
if isDiffFromOk {
otherResult, err := this.WithDiff(diffResult)
otherResult, err := test.this.WithDiff(diffResult)
if err != nil {
t.Errorf("WithDiff unexpectedly failed in test \"%s\": %s", test.name, err)
}
if !other.equal(otherResult) {
if !test.other.equal(otherResult) {
t.Errorf("unexpected WithDiff result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, other, otherResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.other, otherResult)
}
}
// WithDiff from this to other
withDiffResult, err := this.WithDiff(other)
// WithDiff from test.this to test.other
withDiffResult, err := test.this.WithDiff(test.other)
// Test whether WithDiff returned an error
isWithDiffOk := err == nil
expectedIsWithDiffOk := expectedWithDiffResult != nil
expectedIsWithDiffOk := test.expectedWithDiffResult != nil
if isWithDiffOk != expectedIsWithDiffOk {
t.Errorf("unexpected WithDiff error in test \"%s\". "+
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsWithDiffOk, isWithDiffOk)
}
// If not error, test the WithDiff result
if isWithDiffOk && !withDiffResult.equal(expectedWithDiffResult) {
if isWithDiffOk && !withDiffResult.equal(test.expectedWithDiffResult) {
t.Errorf("unexpected WithDiff result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedWithDiffResult, withDiffResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedWithDiffResult, withDiffResult)
}
// Make sure that diffFrom after WithDiff results in the original other
// Repeat WithDiff check test.this time using withDiffInPlace
thisClone := test.this.clone()
err = thisClone.withDiffInPlace(test.other)
// Test whether withDiffInPlace returned an error
isWithDiffInPlaceOk := err == nil
expectedIsWithDiffInPlaceOk := test.expectedWithDiffResult != nil
if isWithDiffInPlaceOk != expectedIsWithDiffInPlaceOk {
t.Errorf("unexpected withDiffInPlace error in test \"%s\". "+
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsWithDiffInPlaceOk, isWithDiffInPlaceOk)
}
// If not error, test the withDiffInPlace result
if isWithDiffInPlaceOk && !thisClone.equal(test.expectedWithDiffResult) {
t.Errorf("unexpected withDiffInPlace result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedWithDiffResult, thisClone)
}
// Make sure that diffFrom after WithDiff results in the original test.other
if isWithDiffOk {
otherResult, err := this.diffFrom(withDiffResult)
otherResult, err := test.this.diffFrom(withDiffResult)
if err != nil {
t.Errorf("diffFrom unexpectedly failed in test \"%s\": %s", test.name, err)
}
if !other.equal(otherResult) {
if !test.other.equal(otherResult) {
t.Errorf("unexpected diffFrom result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, other, otherResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.other, otherResult)
}
}
}
}
func areMultisetsEqual(a *ecc.Multiset, b *ecc.Multiset) bool {
aX, aY := a.Point()
bX, bY := b.Point()
return aX.Cmp(bX) == 0 && aY.Cmp(bY) == 0
}
func (d *UTXODiff) equal(other *UTXODiff) bool {
if d == nil || other == nil {
return d == other
}
return reflect.DeepEqual(d.toAdd, other.toAdd) &&
reflect.DeepEqual(d.toRemove, other.toRemove) &&
areMultisetsEqual(d.diffMultiset, other.diffMultiset)
reflect.DeepEqual(d.toRemove, other.toRemove)
}
func (fus *FullUTXOSet) equal(other *FullUTXOSet) bool {
return reflect.DeepEqual(fus.utxoCollection, other.utxoCollection) &&
areMultisetsEqual(fus.UTXOMultiset, other.UTXOMultiset)
return reflect.DeepEqual(fus.utxoCollection, other.utxoCollection)
}
func (dus *DiffUTXOSet) equal(other *DiffUTXOSet) bool {
return dus.base.equal(other.base) && dus.UTXODiff.equal(other.UTXODiff)
}
func addMultisetToDiff(t *testing.T, diff *UTXODiff) *UTXODiff {
if diff == nil {
return nil
}
diffWithMs := NewUTXODiff()
for outpoint, entry := range diff.toAdd {
err := diffWithMs.AddEntry(outpoint, entry)
if err != nil {
t.Fatalf("Error with diffWithMs.AddEntry: %s", err)
}
}
for outpoint, entry := range diff.toRemove {
err := diffWithMs.RemoveEntry(outpoint, entry)
if err != nil {
t.Fatalf("Error with diffWithMs.removeEntry: %s", err)
}
}
return diffWithMs
}
func addMultisetToFullUTXOSet(t *testing.T, fus *FullUTXOSet) *FullUTXOSet {
if fus == nil {
return nil
}
fusWithMs := NewFullUTXOSet()
for outpoint, entry := range fus.utxoCollection {
err := fusWithMs.addAndUpdateMultiset(outpoint, entry)
if err != nil {
t.Fatalf("Error with diffWithMs.AddEntry: %s", err)
}
}
return fusWithMs
}
func addMultisetToDiffUTXOSet(t *testing.T, diffSet *DiffUTXOSet) *DiffUTXOSet {
if diffSet == nil {
return nil
}
diffWithMs := addMultisetToDiff(t, diffSet.UTXODiff)
baseWithMs := addMultisetToFullUTXOSet(t, diffSet.base)
return NewDiffUTXOSet(baseWithMs, diffWithMs)
}
// TestFullUTXOSet makes sure that fullUTXOSet is working as expected.
func TestFullUTXOSet(t *testing.T) {
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
@@ -552,10 +637,10 @@ func TestFullUTXOSet(t *testing.T) {
txOut1 := &wire.TxOut{ScriptPubKey: []byte{}, Value: 20}
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
diff := addMultisetToDiff(t, &UTXODiff{
diff := &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry0},
toRemove: utxoCollection{outpoint1: utxoEntry1},
})
}
// Test fullUTXOSet creation
emptySet := NewFullUTXOSet()
@@ -584,7 +669,7 @@ func TestFullUTXOSet(t *testing.T) {
} else if isAccepted {
t.Errorf("addTx unexpectedly succeeded")
}
emptySet = addMultisetToFullUTXOSet(t, &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}})
emptySet = &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}}
if isAccepted, err := emptySet.AddTx(transaction0, 0); err != nil {
t.Errorf("addTx unexpectedly failed. Error: %s", err)
} else if !isAccepted {
@@ -616,10 +701,10 @@ func TestDiffUTXOSet(t *testing.T) {
txOut1 := &wire.TxOut{ScriptPubKey: []byte{}, Value: 20}
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
diff := addMultisetToDiff(t, &UTXODiff{
diff := &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry0},
toRemove: utxoCollection{outpoint1: utxoEntry1},
})
}
// Test diffUTXOSet creation
emptySet := NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff())
@@ -677,7 +762,7 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ ], Multiset-Hash:0000000000000000000000000000000000000000000000000000000000000000}",
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ ]}",
expectedCollection: utxoCollection{},
},
{
@@ -696,7 +781,7 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ ], To Add: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Remove: [ ], Multiset-Hash:da4768bd0359c3426268d6707c1fc17a68c45ef1ea734331b07568418234487f}",
expectedString: "{Base: [ ], To Add: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Remove: [ ]}",
expectedCollection: utxoCollection{outpoint0: utxoEntry0},
},
{
@@ -709,7 +794,7 @@ func TestDiffUTXOSet(t *testing.T) {
},
},
expectedMeldSet: nil,
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], Multiset-Hash:046242cb1bb1e6d3fd91d0f181e1b2d4a597ac57fa2584fc3c2eb0e0f46c9369}",
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]}",
expectedCollection: utxoCollection{},
expectedMeldToBaseError: "Couldn't remove outpoint 0000000000000000000000000000000000000000000000000000000000000000:0 because it doesn't exist in the DiffUTXOSet base",
},
@@ -734,7 +819,7 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], To Remove: [ ], Multiset-Hash:556cc61fd4d7e74d7807ca2298c5320375a6a20310a18920e54667220924baff}",
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], To Remove: [ ]}",
expectedCollection: utxoCollection{
outpoint0: utxoEntry0,
outpoint1: utxoEntry1,
@@ -758,24 +843,21 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], Multiset-Hash:0000000000000000000000000000000000000000000000000000000000000000}",
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]}",
expectedCollection: utxoCollection{},
},
}
for _, test := range tests {
diffSet := addMultisetToDiffUTXOSet(t, test.diffSet)
expectedMeldSet := addMultisetToDiffUTXOSet(t, test.expectedMeldSet)
// Test string representation
setString := diffSet.String()
setString := test.diffSet.String()
if setString != test.expectedString {
t.Errorf("unexpected string in test \"%s\". "+
"Expected: \"%s\", got: \"%s\".", test.name, test.expectedString, setString)
}
// Test meldToBase
meldSet := diffSet.clone().(*DiffUTXOSet)
meldSet := test.diffSet.clone().(*DiffUTXOSet)
err := meldSet.meldToBase()
errString := ""
if err != nil {
@@ -787,27 +869,27 @@ func TestDiffUTXOSet(t *testing.T) {
if err != nil {
continue
}
if !meldSet.equal(expectedMeldSet) {
if !meldSet.equal(test.expectedMeldSet) {
t.Errorf("unexpected melded set in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedMeldSet, meldSet)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedMeldSet, meldSet)
}
// Test collection
setCollection, err := diffSet.collection()
setCollection, err := test.diffSet.collection()
if err != nil {
t.Errorf("Error getting diffSet collection: %s", err)
t.Errorf("Error getting test.diffSet collection: %s", err)
} else if !reflect.DeepEqual(setCollection, test.expectedCollection) {
t.Errorf("unexpected set collection in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedCollection, setCollection)
}
// Test cloning
clonedSet := diffSet.clone().(*DiffUTXOSet)
if !reflect.DeepEqual(clonedSet, diffSet) {
clonedSet := test.diffSet.clone().(*DiffUTXOSet)
if !reflect.DeepEqual(clonedSet, test.diffSet) {
t.Errorf("unexpected set clone in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, diffSet, clonedSet)
"Expected: \"%v\", got: \"%v\".", test.name, test.diffSet, clonedSet)
}
if clonedSet == diffSet {
if clonedSet == test.diffSet {
t.Errorf("cloned set is reference-equal to the original")
}
}
@@ -1008,10 +1090,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
testLoop:
for _, test := range tests {
startSet := addMultisetToDiffUTXOSet(t, test.startSet)
expectedSet := addMultisetToDiffUTXOSet(t, test.expectedSet)
diffSet := startSet.clone()
diffSet := test.startSet.clone()
// Apply all transactions to diffSet, in order, with the initial block height startHeight
for i, transaction := range test.toAdd {
@@ -1023,18 +1102,18 @@ testLoop:
}
}
// Make sure that the result diffSet equals to the expectedSet
if !diffSet.(*DiffUTXOSet).equal(expectedSet) {
// Make sure that the result diffSet equals to test.expectedSet
if !diffSet.(*DiffUTXOSet).equal(test.expectedSet) {
t.Errorf("unexpected diffSet in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedSet, diffSet)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedSet, diffSet)
}
}
}
func TestDiffFromTx(t *testing.T) {
fus := addMultisetToFullUTXOSet(t, &FullUTXOSet{
fus := &FullUTXOSet{
utxoCollection: utxoCollection{},
})
}
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutpoint: wire.Outpoint{TxID: *txID0, Index: math.MaxUint32}, Sequence: 0}
@@ -1090,10 +1169,10 @@ func TestDiffFromTx(t *testing.T) {
}
//Test that we get an error if the outpoint is inside diffUTXOSet's toRemove
diff2 := addMultisetToDiff(t, &UTXODiff{
diff2 := &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
})
}
dus := NewDiffUTXOSet(fus, diff2)
if isAccepted, err := dus.AddTx(tx, 2); err != nil {
t.Fatalf("AddTx unexpectedly failed. Error: %s", err)
@@ -1170,7 +1249,6 @@ func TestUTXOSetAddEntry(t *testing.T) {
}
for _, test := range tests {
expectedUTXODiff := addMultisetToDiff(t, test.expectedUTXODiff)
err := utxoDiff.AddEntry(*test.outpointToAdd, test.utxoEntryToAdd)
errString := ""
if err != nil {
@@ -1179,9 +1257,9 @@ func TestUTXOSetAddEntry(t *testing.T) {
if errString != test.expectedError {
t.Fatalf("utxoDiff.AddEntry: unexpected err in test \"%s\". Expected: %s but got: %s", test.name, test.expectedError, err)
}
if err == nil && !utxoDiff.equal(expectedUTXODiff) {
if err == nil && !utxoDiff.equal(test.expectedUTXODiff) {
t.Fatalf("utxoDiff.AddEntry: unexpected utxoDiff in test \"%s\". "+
"Expected: %v, got: %v", test.name, expectedUTXODiff, utxoDiff)
"Expected: %v, got: %v", test.name, test.expectedUTXODiff, utxoDiff)
}
}
}

View File

@@ -435,7 +435,7 @@ func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags Beha
// the duration of time that should be waited before the block becomes valid.
// This check needs to be last as it does not return an error but rather marks the
// header as delayed (and valid).
maxTimestamp := dag.AdjustedTime().Add(time.Second *
maxTimestamp := dag.Now().Add(time.Second *
time.Duration(int64(dag.TimestampDeviationTolerance)*dag.targetTimePerBlock))
if header.Timestamp.After(maxTimestamp) {
return header.Timestamp.Sub(maxTimestamp), nil
@@ -446,9 +446,9 @@ func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags Beha
//checkBlockParentsOrder ensures that the block's parents are ordered by hash
func checkBlockParentsOrder(header *wire.BlockHeader) error {
sortedHashes := make([]*daghash.Hash, 0, header.NumParentBlocks())
for _, hash := range header.ParentHashes {
sortedHashes = append(sortedHashes, hash)
sortedHashes := make([]*daghash.Hash, header.NumParentBlocks())
for i, hash := range header.ParentHashes {
sortedHashes[i] = hash
}
sort.Slice(sortedHashes, func(i, j int) bool {
return daghash.Less(sortedHashes[i], sortedHashes[j])
@@ -598,41 +598,32 @@ func (dag *BlockDAG) validateDifficulty(header *wire.BlockHeader, bluestParent *
}
// validateParents validates that no parent is an ancestor of another parent, and no parent is finalized
func validateParents(blockHeader *wire.BlockHeader, parents blockSet) error {
minBlueScore := uint64(math.MaxUint64)
queue := newDownHeap()
visited := newSet()
for parent := range parents {
func (dag *BlockDAG) validateParents(blockHeader *wire.BlockHeader, parents blockSet) error {
for parentA := range parents {
// isFinalized might be false-negative because node finality status is
// updated in a separate goroutine. This is why later the block is
// checked more thoroughly on the finality rules in dag.checkFinalityRules.
if parent.isFinalized {
return ruleError(ErrFinality, fmt.Sprintf("block %s is a finalized parent of block %s", parent.hash, blockHeader.BlockHash()))
if parentA.isFinalized {
return ruleError(ErrFinality, fmt.Sprintf("block %s is a finalized "+
"parent of block %s", parentA.hash, blockHeader.BlockHash()))
}
if parent.blueScore < minBlueScore {
minBlueScore = parent.blueScore
}
for grandParent := range parent.parents {
if !visited.contains(grandParent) {
queue.Push(grandParent)
visited.add(grandParent)
for parentB := range parents {
if parentA == parentB {
continue
}
}
}
for queue.Len() > 0 {
current := queue.pop()
if parents.contains(current) {
return ruleError(ErrInvalidParentsRelation, fmt.Sprintf("block %s is both a parent of %s and an"+
" ancestor of another parent",
current.hash,
blockHeader.BlockHash()))
}
if current.blueScore > minBlueScore {
for parent := range current.parents {
if !visited.contains(parent) {
queue.Push(parent)
visited.add(parent)
}
isAncestorOf, err := dag.isAncestorOf(parentA, parentB)
if err != nil {
return err
}
if isAncestorOf {
return ruleError(ErrInvalidParentsRelation, fmt.Sprintf("block %s is both a parent of %s and an"+
" ancestor of another parent %s",
parentA.hash,
blockHeader.BlockHash(),
parentB.hash,
))
}
}
}
@@ -654,7 +645,7 @@ func (dag *BlockDAG) checkBlockContext(block *util.Block, parents blockSet, flag
bluestParent := parents.bluest()
fastAdd := flags&BFFastAdd == BFFastAdd
err := validateParents(&block.MsgBlock().Header, parents)
err := dag.validateParents(&block.MsgBlock().Header, parents)
if err != nil {
return err
}

View File

@@ -5,6 +5,7 @@
package blockdag
import (
"github.com/pkg/errors"
"math"
"path/filepath"
"testing"
@@ -168,6 +169,7 @@ func TestCheckBlockSanity(t *testing.T) {
return
}
defer teardownFunc()
dag.timeSource = newFakeTimeSource(time.Now())
block := util.NewBlock(&Block100000)
if len(block.Transactions()) < 3 {
@@ -186,11 +188,12 @@ func TestCheckBlockSanity(t *testing.T) {
if err == nil {
t.Errorf("CheckBlockSanity: transactions disorder is not detected")
}
ruleErr, ok := err.(RuleError)
if !ok {
var ruleErr RuleError
if !errors.As(err, &ruleErr) {
t.Errorf("CheckBlockSanity: wrong error returned, expect RuleError, got %T", err)
} else if ruleErr.ErrorCode != ErrTransactionsNotSorted {
t.Errorf("CheckBlockSanity: wrong error returned, expect ErrTransactionsNotSorted, got %v, err %s", ruleErr.ErrorCode, err)
t.Errorf("CheckBlockSanity: wrong error returned, expect ErrTransactionsNotSorted, got"+
" %v, err %s", ruleErr.ErrorCode, err)
}
if delay != 0 {
t.Errorf("CheckBlockSanity: unexpected return %s delay", delay)
@@ -479,8 +482,10 @@ func TestCheckBlockSanity(t *testing.T) {
if err == nil {
t.Errorf("CheckBlockSanity: error is nil when it shouldn't be")
}
rError := err.(RuleError)
if rError.ErrorCode != ErrWrongParentsOrder {
var rError RuleError
if !errors.As(err, &rError) {
t.Fatalf("CheckBlockSanity: expected a RuleError, but got %s", err)
} else if rError.ErrorCode != ErrWrongParentsOrder {
t.Errorf("CheckBlockSanity: Expected error was ErrWrongParentsOrder but got %v", err)
}
if delay != 0 {
@@ -489,8 +494,8 @@ func TestCheckBlockSanity(t *testing.T) {
blockInTheFuture := Block100000
expectedDelay := 10 * time.Second
now := time.Unix(time.Now().Unix(), 0)
blockInTheFuture.Header.Timestamp = now.Add(time.Duration(dag.TimestampDeviationTolerance)*time.Second + expectedDelay)
deviationTolerance := time.Duration(dag.TimestampDeviationTolerance*uint64(dag.targetTimePerBlock)) * time.Second
blockInTheFuture.Header.Timestamp = dag.Now().Add(deviationTolerance + expectedDelay)
delay, err = dag.checkBlockSanity(util.NewBlock(&blockInTheFuture), BFNoPoWCheck)
if err != nil {
t.Errorf("CheckBlockSanity: %v", err)
@@ -509,14 +514,14 @@ func TestPastMedianTime(t *testing.T) {
for i := 0; i < 100; i++ {
blockTime = blockTime.Add(time.Second)
tip = newTestNode(dag, setFromSlice(tip),
tip = newTestNode(dag, blockSetFromSlice(tip),
blockVersion,
0,
blockTime)
}
// Checks that a block is valid if it has timestamp equals to past median time
node := newTestNode(dag, setFromSlice(tip),
node := newTestNode(dag, blockSetFromSlice(tip),
blockVersion,
dag.powMaxBits,
tip.PastMedianTime(dag))
@@ -529,7 +534,7 @@ func TestPastMedianTime(t *testing.T) {
}
// Checks that a block is valid if its timestamp is after past median time
node = newTestNode(dag, setFromSlice(tip),
node = newTestNode(dag, blockSetFromSlice(tip),
blockVersion,
dag.powMaxBits,
tip.PastMedianTime(dag).Add(time.Second))
@@ -542,7 +547,7 @@ func TestPastMedianTime(t *testing.T) {
}
// Checks that a block is invalid if its timestamp is before past median time
node = newTestNode(dag, setFromSlice(tip),
node = newTestNode(dag, blockSetFromSlice(tip),
blockVersion,
0,
tip.PastMedianTime(dag).Add(-time.Second))
@@ -555,24 +560,23 @@ func TestPastMedianTime(t *testing.T) {
}
func TestValidateParents(t *testing.T) {
dag := newTestDAG(&dagconfig.SimnetParams)
genesisNode := dag.genesis
blockVersion := int32(0x10000000)
blockTime := genesisNode.Header().Timestamp
generateNode := func(parents ...*blockNode) *blockNode {
// The timestamp of each block is changed to prevent a situation where two blocks share the same hash
blockTime = blockTime.Add(time.Second)
return newTestNode(dag, setFromSlice(parents...),
blockVersion,
0,
blockTime)
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Errorf("Failed to setup dag instance: %v", err)
return
}
defer teardownFunc()
a := generateNode(genesisNode)
b := generateNode(a)
c := generateNode(genesisNode)
a := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
b := prepareAndProcessBlock(t, dag, a)
c := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
aNode := nodeByMsgBlock(t, dag, a)
bNode := nodeByMsgBlock(t, dag, b)
cNode := nodeByMsgBlock(t, dag, c)
fakeBlockHeader := &wire.BlockHeader{
HashMerkleRoot: &daghash.ZeroHash,
@@ -581,19 +585,19 @@ func TestValidateParents(t *testing.T) {
}
// Check direct parents relation
err := validateParents(fakeBlockHeader, setFromSlice(a, b))
err = dag.validateParents(fakeBlockHeader, blockSetFromSlice(aNode, bNode))
if err == nil {
t.Errorf("validateParents: `a` is a parent of `b`, so an error is expected")
}
// Check indirect parents relation
err = validateParents(fakeBlockHeader, setFromSlice(genesisNode, b))
err = dag.validateParents(fakeBlockHeader, blockSetFromSlice(dag.genesis, bNode))
if err == nil {
t.Errorf("validateParents: `genesis` and `b` are indirectly related, so an error is expected")
}
// Check parents with no relation
err = validateParents(fakeBlockHeader, setFromSlice(b, c))
err = dag.validateParents(fakeBlockHeader, blockSetFromSlice(bNode, cNode))
if err != nil {
t.Errorf("validateParents: unexpected error: %v", err)
}

View File

@@ -32,22 +32,13 @@ func newVirtualBlock(dag *BlockDAG, tips blockSet) *virtualBlock {
var virtual virtualBlock
virtual.dag = dag
virtual.utxoSet = NewFullUTXOSet()
virtual.selectedParentChainSet = newSet()
virtual.selectedParentChainSet = newBlockSet()
virtual.selectedParentChainSlice = nil
virtual.setTips(tips)
return &virtual
}
// clone creates and returns a clone of the virtual block.
func (v *virtualBlock) clone() *virtualBlock {
return &virtualBlock{
utxoSet: v.utxoSet,
blockNode: v.blockNode,
selectedParentChainSet: v.selectedParentChainSet,
}
}
// setTips replaces the tips of the virtual block with the blocks in the
// given blockSet. This only differs from the exported version in that it
// is up to the caller to ensure the lock is held.
@@ -122,8 +113,8 @@ func (v *virtualBlock) updateSelectedParentSet(oldSelectedParent *blockNode) *ch
// This function is safe for concurrent access.
func (v *virtualBlock) SetTips(tips blockSet) {
v.mtx.Lock()
defer v.mtx.Unlock()
v.setTips(tips)
v.mtx.Unlock()
}
// addTip adds the given tip to the set of tips in the virtual block.

View File

@@ -52,12 +52,12 @@ func TestVirtualBlock(t *testing.T) {
// \ X
// <- 4 <- 6
node0 := dag.genesis
node1 := buildNode(t, dag, setFromSlice(node0))
node2 := buildNode(t, dag, setFromSlice(node1))
node3 := buildNode(t, dag, setFromSlice(node0))
node4 := buildNode(t, dag, setFromSlice(node0))
node5 := buildNode(t, dag, setFromSlice(node3, node4))
node6 := buildNode(t, dag, setFromSlice(node3, node4))
node1 := buildNode(t, dag, blockSetFromSlice(node0))
node2 := buildNode(t, dag, blockSetFromSlice(node1))
node3 := buildNode(t, dag, blockSetFromSlice(node0))
node4 := buildNode(t, dag, blockSetFromSlice(node0))
node5 := buildNode(t, dag, blockSetFromSlice(node3, node4))
node6 := buildNode(t, dag, blockSetFromSlice(node3, node4))
// Given an empty VirtualBlock, each of the following test cases will:
// Set its tips to tipsToSet
@@ -75,28 +75,28 @@ func TestVirtualBlock(t *testing.T) {
name: "empty virtual",
tipsToSet: []*blockNode{},
tipsToAdd: []*blockNode{},
expectedTips: newSet(),
expectedTips: newBlockSet(),
expectedSelectedParent: nil,
},
{
name: "virtual with genesis tip",
tipsToSet: []*blockNode{node0},
tipsToAdd: []*blockNode{},
expectedTips: setFromSlice(node0),
expectedTips: blockSetFromSlice(node0),
expectedSelectedParent: node0,
},
{
name: "virtual with genesis tip, add child of genesis",
tipsToSet: []*blockNode{node0},
tipsToAdd: []*blockNode{node1},
expectedTips: setFromSlice(node1),
expectedTips: blockSetFromSlice(node1),
expectedSelectedParent: node1,
},
{
name: "empty virtual, add a full DAG",
tipsToSet: []*blockNode{},
tipsToAdd: []*blockNode{node0, node1, node2, node3, node4, node5, node6},
expectedTips: setFromSlice(node2, node5, node6),
expectedTips: blockSetFromSlice(node2, node5, node6),
expectedSelectedParent: node5,
},
}
@@ -106,7 +106,7 @@ func TestVirtualBlock(t *testing.T) {
virtual := newVirtualBlock(dag, nil)
// Set the tips. This will be the initial state
virtual.SetTips(setFromSlice(test.tipsToSet...))
virtual.SetTips(blockSetFromSlice(test.tipsToSet...))
// Add all blockNodes in tipsToAdd in order
for _, tipToAdd := range test.tipsToAdd {
@@ -147,9 +147,9 @@ func TestSelectedPath(t *testing.T) {
tip := dag.genesis
virtual.AddTip(tip)
initialPath := setFromSlice(tip)
initialPath := blockSetFromSlice(tip)
for i := 0; i < 5; i++ {
tip = buildNode(t, dag, setFromSlice(tip))
tip = buildNode(t, dag, blockSetFromSlice(tip))
initialPath.add(tip)
virtual.AddTip(tip)
}
@@ -157,7 +157,7 @@ func TestSelectedPath(t *testing.T) {
firstPath := initialPath.clone()
for i := 0; i < 5; i++ {
tip = buildNode(t, dag, setFromSlice(tip))
tip = buildNode(t, dag, blockSetFromSlice(tip))
firstPath.add(tip)
virtual.AddTip(tip)
}
@@ -175,7 +175,7 @@ func TestSelectedPath(t *testing.T) {
secondPath := initialPath.clone()
tip = initialTip
for i := 0; i < 100; i++ {
tip = buildNode(t, dag, setFromSlice(tip))
tip = buildNode(t, dag, blockSetFromSlice(tip))
secondPath.add(tip)
virtual.AddTip(tip)
}
@@ -193,7 +193,7 @@ func TestSelectedPath(t *testing.T) {
tip = initialTip
for i := 0; i < 3; i++ {
tip = buildNode(t, dag, setFromSlice(tip))
tip = buildNode(t, dag, blockSetFromSlice(tip))
virtual.AddTip(tip)
}
// Because we added a very short chain, the selected path should not be affected.
@@ -215,7 +215,7 @@ func TestSelectedPath(t *testing.T) {
t.Fatalf("updateSelectedParentSet didn't panic")
}
}()
virtual2.updateSelectedParentSet(buildNode(t, dag, setFromSlice()))
virtual2.updateSelectedParentSet(buildNode(t, dag, blockSetFromSlice()))
}
func TestChainUpdates(t *testing.T) {
@@ -236,23 +236,23 @@ func TestChainUpdates(t *testing.T) {
var toBeRemovedNodes []*blockNode
toBeRemovedTip := genesis
for i := 0; i < 5; i++ {
toBeRemovedTip = buildNode(t, dag, setFromSlice(toBeRemovedTip))
toBeRemovedTip = buildNode(t, dag, blockSetFromSlice(toBeRemovedTip))
toBeRemovedNodes = append(toBeRemovedNodes, toBeRemovedTip)
}
// Create a VirtualBlock with the toBeRemoved chain
virtual := newVirtualBlock(dag, setFromSlice(toBeRemovedNodes...))
virtual := newVirtualBlock(dag, blockSetFromSlice(toBeRemovedNodes...))
// Create a chain to be added
var toBeAddedNodes []*blockNode
toBeAddedTip := genesis
for i := 0; i < 8; i++ {
toBeAddedTip = buildNode(t, dag, setFromSlice(toBeAddedTip))
toBeAddedTip = buildNode(t, dag, blockSetFromSlice(toBeAddedTip))
toBeAddedNodes = append(toBeAddedNodes, toBeAddedTip)
}
// Set the virtual tip to be the tip of the toBeAdded chain
chainUpdates := virtual.setTips(setFromSlice(toBeAddedTip))
chainUpdates := virtual.setTips(blockSetFromSlice(toBeAddedTip))
// Make sure that the removed blocks are as expected (in reverse order)
if len(chainUpdates.removedChainBlockHashes) != len(toBeRemovedNodes) {

View File

@@ -5,6 +5,7 @@
package main
import (
"github.com/pkg/errors"
"os"
"path/filepath"
"runtime"
@@ -16,28 +17,29 @@ import (
)
const (
// blockDbNamePrefix is the prefix for the kaspad block database.
blockDbNamePrefix = "blocks"
// blockDBNamePrefix is the prefix for the kaspad block database.
blockDBNamePrefix = "blocks"
)
var (
cfg *ConfigFlags
log logs.Logger
log *logs.Logger
spawn func(func())
)
// loadBlockDB opens the block database and returns a handle to it.
func loadBlockDB() (database.DB, error) {
// The database name is based on the database type.
dbName := blockDbNamePrefix + "_" + cfg.DbType
dbName := blockDBNamePrefix + "_" + cfg.DBType
dbPath := filepath.Join(cfg.DataDir, dbName)
log.Infof("Loading block database from '%s'", dbPath)
db, err := database.Open(cfg.DbType, dbPath, ActiveConfig().NetParams().Net)
db, err := database.Open(cfg.DBType, dbPath, ActiveConfig().NetParams().Net)
if err != nil {
// Return the error if it's not because the database doesn't
// exist.
if dbErr, ok := err.(database.Error); !ok || dbErr.ErrorCode !=
var dbErr database.Error
if ok := errors.As(err, &dbErr); !ok || dbErr.ErrorCode !=
database.ErrDbDoesNotExist {
return nil, err
@@ -48,7 +50,7 @@ func loadBlockDB() (database.DB, error) {
if err != nil {
return nil, err
}
db, err = database.Create(cfg.DbType, dbPath, ActiveConfig().NetParams().Net)
db, err = database.Create(cfg.DBType, dbPath, ActiveConfig().NetParams().Net)
if err != nil {
return nil, err
}

View File

@@ -19,7 +19,7 @@ import (
)
const (
defaultDbType = "ffldb"
defaultDBType = "ffldb"
defaultDataFile = "bootstrap.dat"
defaultProgress = 10
)
@@ -40,12 +40,11 @@ func ActiveConfig() *ConfigFlags {
//
// See loadConfig for details on the configuration load process.
type ConfigFlags struct {
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
DbType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
TxIndex bool `long:"txindex" description:"Build a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
AddrIndex bool `long:"addrindex" description:"Build a full address-based transaction index which makes the searchrawtransactions RPC available"`
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
DBType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainFromBlock RPC available"`
config.NetworkFlags
}
@@ -75,7 +74,7 @@ func loadConfig() (*ConfigFlags, []string, error) {
// Default config.
activeConfig = &ConfigFlags{
DataDir: defaultDataDir,
DbType: defaultDbType,
DBType: defaultDBType,
InFile: defaultDataFile,
Progress: defaultProgress,
}
@@ -84,7 +83,8 @@ func loadConfig() (*ConfigFlags, []string, error) {
parser := flags.NewParser(&activeConfig, flags.Default)
remainingArgs, err := parser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
parser.WriteHelp(os.Stderr)
}
return nil, nil, err
@@ -96,10 +96,10 @@ func loadConfig() (*ConfigFlags, []string, error) {
}
// Validate database type.
if !validDbType(activeConfig.DbType) {
if !validDbType(activeConfig.DBType) {
str := "%s: The specified database type [%s] is invalid -- " +
"supported types %s"
err := errors.Errorf(str, "loadConfig", activeConfig.DbType, strings.Join(knownDbTypes, ", "))
err := errors.Errorf(str, "loadConfig", activeConfig.DBType, strings.Join(knownDbTypes, ", "))
fmt.Fprintln(os.Stderr, err)
parser.WriteHelp(os.Stderr)
return nil, nil, err

View File

@@ -6,13 +6,13 @@ package main
import (
"encoding/binary"
"github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/pkg/errors"
"io"
"sync"
"time"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
@@ -101,14 +101,14 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
// Skip blocks that already exist.
blockHash := block.Hash()
if bi.dag.HaveBlock(blockHash) {
if bi.dag.IsKnownBlock(blockHash) {
return false, nil
}
// Don't bother trying to process orphans.
parentHashes := block.MsgBlock().Header.ParentHashes
if len(parentHashes) > 0 {
if !bi.dag.HaveBlocks(parentHashes) {
if !bi.dag.AreKnownBlocks(parentHashes) {
return false, errors.Errorf("import file contains block "+
"%v which does not link to the available "+
"block DAG", parentHashes)
@@ -288,28 +288,11 @@ func (bi *blockImporter) Import() chan *importResults {
// newBlockImporter returns a new importer for the provided file reader seeker
// and database.
func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
// Create the transaction and address indexes if needed.
//
// CAUTION: the txindex needs to be first in the indexes array because
// the addrindex uses data from the txindex during catchup. If the
// addrindex is run first, it may not have the transactions from the
// current block indexed.
// Create the acceptance index if needed.
var indexes []indexers.Indexer
if cfg.TxIndex || cfg.AddrIndex {
// Enable transaction index if address index is enabled since it
// requires it.
if !cfg.TxIndex {
log.Infof("Transaction index enabled because it is " +
"required by the address index")
cfg.TxIndex = true
} else {
log.Info("Transaction index is enabled")
}
indexes = append(indexes, indexers.NewTxIndex())
}
if cfg.AddrIndex {
log.Info("Address index is enabled")
indexes = append(indexes, indexers.NewAddrIndex(ActiveConfig().NetParams()))
if cfg.AcceptanceIndex {
log.Info("Acceptance index is enabled")
indexes = append(indexes, indexers.NewAcceptanceIndex())
}
// Create an index manager if any of the optional indexes are enabled.
@@ -321,7 +304,7 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
dag, err := blockdag.New(&blockdag.Config{
DB: db,
DAGParams: ActiveConfig().NetParams(),
TimeSource: blockdag.NewMedianTime(),
TimeSource: blockdag.NewTimeSource(),
IndexManager: indexManager,
})
if err != nil {

View File

@@ -1,87 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"time"
)
const (
getSubnetworkRetryDelay = 5 * time.Second
maxGetSubnetworkRetries = 12
)
func main() {
cfg, err := parseConfig()
if err != nil {
panic(errors.Errorf("error parsing command-line arguments: %s", err))
}
privateKey, addrPubKeyHash, err := decodeKeys(cfg)
if err != nil {
panic(errors.Errorf("error decoding public key: %s", err))
}
client, err := connect(cfg)
if err != nil {
panic(errors.Errorf("could not connect to RPC server: %s", err))
}
log.Infof("Connected to server %s", cfg.RPCServer)
fundingOutpoint, fundingTx, err := findUnspentTXO(cfg, client, addrPubKeyHash)
if err != nil {
panic(errors.Errorf("error finding unspent transactions: %s", err))
}
if fundingOutpoint == nil || fundingTx == nil {
panic(errors.Errorf("could not find any unspent transactions this for key"))
}
log.Infof("Found transaction to spend: %s:%d", fundingOutpoint.TxID, fundingOutpoint.Index)
registryTx, err := buildSubnetworkRegistryTx(cfg, fundingOutpoint, fundingTx, privateKey)
if err != nil {
panic(errors.Errorf("error building subnetwork registry tx: %s", err))
}
_, err = client.SendRawTransaction(registryTx, true)
if err != nil {
panic(errors.Errorf("failed sending subnetwork registry tx: %s", err))
}
log.Infof("Successfully sent subnetwork registry transaction")
subnetworkID, err := blockdag.TxToSubnetworkID(registryTx)
if err != nil {
panic(errors.Errorf("could not build subnetwork ID: %s", err))
}
err = waitForSubnetworkToBecomeAccepted(client, subnetworkID)
if err != nil {
panic(errors.Errorf("error waiting for subnetwork to become accepted: %s", err))
}
log.Infof("Subnetwork '%s' was successfully registered.", subnetworkID)
}
func waitForSubnetworkToBecomeAccepted(client *rpcclient.Client, subnetworkID *subnetworkid.SubnetworkID) error {
retries := 0
for {
_, err := client.GetSubnetwork(subnetworkID.String())
if err != nil {
if rpcError, ok := err.(*rpcmodel.RPCError); ok && rpcError.Code == rpcmodel.ErrRPCSubnetworkNotFound {
log.Infof("Subnetwork not found")
retries++
if retries == maxGetSubnetworkRetries {
return errors.Errorf("failed to get subnetwork %d times: %s", maxGetSubnetworkRetries, err)
}
log.Infof("Waiting %d seconds...", int(getSubnetworkRetryDelay.Seconds()))
<-time.After(getSubnetworkRetryDelay)
continue
}
return errors.Errorf("failed getting subnetwork: %s", err)
}
return nil
}
}

View File

@@ -1,71 +0,0 @@
package main
import (
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/config"
"github.com/pkg/errors"
)
var activeConfig *ConfigFlags
// ActiveConfig returns the active configuration struct
func ActiveConfig() *ConfigFlags {
return activeConfig
}
// ConfigFlags holds the configurations set by the command line argument
type ConfigFlags struct {
PrivateKey string `short:"k" long:"private-key" description:"Private key" required:"true"`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username" required:"true"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password" required:"true"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to" required:"true"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
GasLimit uint64 `long:"gaslimit" description:"The gas limit of the new subnetwork"`
RegistryTxFee uint64 `long:"regtxfee" description:"The fee for the subnetwork registry transaction"`
config.NetworkFlags
}
const (
defaultSubnetworkGasLimit = 1000
defaultRegistryTxFee = 3000
)
func parseConfig() (*ConfigFlags, error) {
activeConfig = &ConfigFlags{}
parser := flags.NewParser(activeConfig, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return nil, err
}
if activeConfig.RPCCert == "" && !activeConfig.DisableTLS {
return nil, errors.New("--notls has to be disabled if --cert is used")
}
if activeConfig.RPCCert != "" && activeConfig.DisableTLS {
return nil, errors.New("--cert should be omitted if --notls is used")
}
err = activeConfig.ResolveNetwork(parser)
if err != nil {
return nil, err
}
if activeConfig.GasLimit < 0 {
return nil, errors.Errorf("gaslimit may not be smaller than 0")
}
if activeConfig.GasLimit == 0 {
activeConfig.GasLimit = defaultSubnetworkGasLimit
}
if activeConfig.RegistryTxFee < 0 {
return nil, errors.Errorf("regtxfee may not be smaller than 0")
}
if activeConfig.RegistryTxFee == 0 {
activeConfig.RegistryTxFee = defaultRegistryTxFee
}
return activeConfig, nil
}

View File

@@ -1,37 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/rpcclient"
"github.com/pkg/errors"
"io/ioutil"
)
func connect(cfg *ConfigFlags) (*rpcclient.Client, error) {
var cert []byte
if !cfg.DisableTLS {
var err error
cert, err = ioutil.ReadFile(cfg.RPCCert)
if err != nil {
return nil, errors.Errorf("error reading certificates file: %s", err)
}
}
connCfg := &rpcclient.ConnConfig{
Host: cfg.RPCServer,
Endpoint: "ws",
User: cfg.RPCUser,
Pass: cfg.RPCPassword,
DisableTLS: cfg.DisableTLS,
}
if !cfg.DisableTLS {
connCfg.Certificates = cert
}
client, err := rpcclient.New(connCfg, nil)
if err != nil {
return nil, errors.Errorf("error connecting to address %s: %s", cfg.RPCServer, err)
}
return client, nil
}

View File

@@ -1,19 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/base58"
)
func decodeKeys(cfg *ConfigFlags) (*ecc.PrivateKey, *util.AddressPubKeyHash, error) {
privateKeyBytes := base58.Decode(cfg.PrivateKey)
privateKey, _ := ecc.PrivKeyFromBytes(ecc.S256(), privateKeyBytes)
serializedPrivateKey := privateKey.PubKey().SerializeCompressed()
addr, err := util.NewAddressPubKeyHashFromPublicKey(serializedPrivateKey, ActiveConfig().NetParams().Prefix)
if err != nil {
return nil, nil, err
}
return privateKey, addr, nil
}

View File

@@ -1,10 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/logs"
)
var (
backendLog = logs.NewBackend()
log = backendLog.Logger("ASUB")
)

View File

@@ -1,29 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
func buildSubnetworkRegistryTx(cfg *ConfigFlags, fundingOutpoint *wire.Outpoint, fundingTx *wire.MsgTx, privateKey *ecc.PrivateKey) (*wire.MsgTx, error) {
txIn := &wire.TxIn{
PreviousOutpoint: *fundingOutpoint,
Sequence: wire.MaxTxInSequenceNum,
}
txOut := &wire.TxOut{
ScriptPubKey: fundingTx.TxOut[fundingOutpoint.Index].ScriptPubKey,
Value: fundingTx.TxOut[fundingOutpoint.Index].Value - cfg.RegistryTxFee,
}
registryTx := wire.NewRegistryMsgTx(1, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}, cfg.GasLimit)
SignatureScript, err := txscript.SignatureScript(registryTx, 0, fundingTx.TxOut[fundingOutpoint.Index].ScriptPubKey,
txscript.SigHashAll, privateKey, true)
if err != nil {
return nil, errors.Errorf("failed to build signature script: %s", err)
}
txIn.SignatureScript = SignatureScript
return registryTx, nil
}

View File

@@ -1,112 +0,0 @@
package main
import (
"bytes"
"encoding/hex"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
const (
resultsCount = 1000
minConfirmations = 10
)
func findUnspentTXO(cfg *ConfigFlags, client *rpcclient.Client, addrPubKeyHash *util.AddressPubKeyHash) (*wire.Outpoint, *wire.MsgTx, error) {
txs, err := collectTransactions(client, addrPubKeyHash)
if err != nil {
return nil, nil, err
}
utxos := buildUTXOs(txs)
for outpoint, tx := range utxos {
// Skip TXOs that can't pay for registration
if tx.TxOut[outpoint.Index].Value < cfg.RegistryTxFee {
continue
}
return &outpoint, tx, nil
}
return nil, nil, nil
}
func collectTransactions(client *rpcclient.Client, addrPubKeyHash *util.AddressPubKeyHash) ([]*wire.MsgTx, error) {
txs := make([]*wire.MsgTx, 0)
skip := 0
for {
results, err := client.SearchRawTransactionsVerbose(addrPubKeyHash, skip, resultsCount, true, false, nil)
if err != nil {
// Break when there are no further txs
if rpcError, ok := err.(*rpcmodel.RPCError); ok && rpcError.Code == rpcmodel.ErrRPCNoTxInfo {
break
}
return nil, err
}
for _, result := range results {
// Mempool transactions bring about unnecessary complexity, so
// simply don't bother processing them
if result.IsInMempool {
continue
}
tx, err := parseRawTransactionResult(result)
if err != nil {
return nil, errors.Errorf("failed to process SearchRawTransactionResult: %s", err)
}
if tx == nil {
continue
}
if !isTxMatured(tx, *result.Confirmations) {
continue
}
txs = append(txs, tx)
}
skip += resultsCount
}
return txs, nil
}
func parseRawTransactionResult(result *rpcmodel.SearchRawTransactionsResult) (*wire.MsgTx, error) {
txBytes, err := hex.DecodeString(result.Hex)
if err != nil {
return nil, errors.Errorf("failed to decode transaction bytes: %s", err)
}
var tx wire.MsgTx
reader := bytes.NewReader(txBytes)
err = tx.Deserialize(reader)
if err != nil {
return nil, errors.Errorf("failed to deserialize transaction: %s", err)
}
return &tx, nil
}
func isTxMatured(tx *wire.MsgTx, confirmations uint64) bool {
if !tx.IsCoinBase() {
return confirmations >= minConfirmations
}
return confirmations >= ActiveConfig().NetParams().BlockCoinbaseMaturity
}
func buildUTXOs(txs []*wire.MsgTx) map[wire.Outpoint]*wire.MsgTx {
utxos := make(map[wire.Outpoint]*wire.MsgTx)
for _, tx := range txs {
for i := range tx.TxOut {
outpoint := wire.NewOutpoint(tx.TxID(), uint32(i))
utxos[*outpoint] = tx
}
}
for _, tx := range txs {
for _, input := range tx.TxIn {
delete(utxos, input.PreviousOutpoint)
}
}
return utxos
}

View File

@@ -6,6 +6,7 @@ package main
import (
"fmt"
"github.com/pkg/errors"
"io/ioutil"
"os"
"path/filepath"
@@ -32,7 +33,8 @@ func main() {
parser := flags.NewParser(&cfg, flags.Default)
_, err := parser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
parser.WriteHelp(os.Stderr)
}
return

View File

@@ -7,9 +7,9 @@ package main
import (
"fmt"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/version"
"github.com/pkg/errors"
"io/ioutil"
"net"
"os"
"path/filepath"
"regexp"
@@ -36,11 +36,6 @@ var (
activeConfig *ConfigFlags
)
// ActiveConfig returns the active configuration struct
func ActiveConfig() *ConfigFlags {
return activeConfig
}
// listCommands categorizes and lists all of the usable commands along with
// their one-line usage.
func listCommands() {
@@ -108,28 +103,6 @@ type ConfigFlags struct {
config.NetworkFlags
}
// normalizeAddress returns addr with the passed default port appended if
// there is not already a port specified.
func normalizeAddress(addr string, useTestnet, useSimnet, useDevnet bool) string {
_, _, err := net.SplitHostPort(addr)
if err != nil {
var defaultPort string
switch {
case useDevnet:
defaultPort = "16610"
case useTestnet:
defaultPort = "16210"
case useSimnet:
defaultPort = "16510"
default:
defaultPort = "16110"
}
return net.JoinHostPort(addr, defaultPort)
}
return addr
}
// cleanAndExpandPath expands environement variables and leading ~ in the
// passed path, cleans the result, and returns it.
func cleanAndExpandPath(path string) string {
@@ -172,7 +145,8 @@ func loadConfig() (*ConfigFlags, []string, error) {
preParser := flags.NewParser(preCfg, flags.HelpFlag)
_, err := preParser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); ok && flagsErr.Type == flags.ErrHelp {
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, "")
fmt.Fprintln(os.Stderr, "The special parameter `-` "+
@@ -188,7 +162,7 @@ func loadConfig() (*ConfigFlags, []string, error) {
appName = strings.TrimSuffix(appName, filepath.Ext(appName))
usageMessage := fmt.Sprintf("Use %s -h to show options", appName)
if preCfg.ShowVersion {
fmt.Println(appName, "version", version())
fmt.Println(appName, "version", version.Version())
os.Exit(0)
}
@@ -216,7 +190,7 @@ func loadConfig() (*ConfigFlags, []string, error) {
parser := flags.NewParser(activeConfig, flags.Default)
err = flags.NewIniParser(parser).ParseFile(preCfg.ConfigFile)
if err != nil {
if _, ok := err.(*os.PathError); !ok {
if pErr := &(os.PathError{}); !errors.As(err, &pErr) {
fmt.Fprintf(os.Stderr, "Error parsing config file: %s\n",
err)
fmt.Fprintln(os.Stderr, usageMessage)
@@ -227,7 +201,8 @@ func loadConfig() (*ConfigFlags, []string, error) {
// Parse command line options again to ensure they take precedence.
remainingArgs, err := parser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
fmt.Fprintln(os.Stderr, usageMessage)
}
return nil, nil, err
@@ -242,8 +217,10 @@ func loadConfig() (*ConfigFlags, []string, error) {
// Add default port to RPC server based on --testnet and --simnet flags
// if needed.
activeConfig.RPCServer = normalizeAddress(activeConfig.RPCServer, activeConfig.Testnet,
activeConfig.Simnet, activeConfig.Devnet)
activeConfig.RPCServer, err = activeConfig.NetParams().NormalizeRPCServerAddress(activeConfig.RPCServer)
if err != nil {
return nil, nil, err
}
return activeConfig, remainingArgs, nil
}

View File

@@ -95,11 +95,12 @@ func sendPostRequest(marshalledJSON []byte, cfg *ConfigFlags) ([]byte, error) {
}
// Read the raw bytes and close the response.
respBytes, err := ioutil.ReadAll(httpResponse.Body)
httpResponse.Body.Close()
respBytes, err := func() ([]byte, error) {
defer httpResponse.Body.Close()
return ioutil.ReadAll(httpResponse.Body)
}()
if err != nil {
err = errors.Errorf("error reading json reply: %s", err)
return nil, err
return nil, errors.Wrap(err, "error reading json reply")
}
// Handle unsuccessful HTTP responses

View File

@@ -5,6 +5,7 @@ import (
"bytes"
"encoding/json"
"fmt"
"github.com/pkg/errors"
"io"
"os"
"path/filepath"
@@ -110,9 +111,10 @@ func main() {
// rpcmodel.Error as it reallistcally will always be since the
// NewCommand function is only supposed to return errors of that
// type.
if jerr, ok := err.(rpcmodel.Error); ok {
var rpcModelErr rpcmodel.Error
if ok := errors.As(err, &rpcModelErr); ok {
fmt.Fprintf(os.Stderr, "%s error: %s (command code: %s)\n",
method, err, jerr.ErrorCode)
method, err, rpcModelErr.ErrorCode)
commandUsage(method)
os.Exit(1)
}

View File

@@ -1,75 +0,0 @@
// Copyright (c) 2013 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"fmt"
"strings"
)
// semanticAlphabet
const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
// These constants define the application version and follow the semantic
// versioning 2.0.0 spec (http://semver.org/).
const (
appMajor uint = 0
appMinor uint = 12
appPatch uint = 0
// appPreRelease MUST only contain characters from semanticAlphabet
// per the semantic versioning spec.
appPreRelease = "beta"
)
// appBuild is defined as a variable so it can be overridden during the build
// process with '-ldflags "-X main.appBuild foo' if needed. It MUST only
// contain characters from semanticAlphabet per the semantic versioning spec.
var appBuild string
// version returns the application version as a properly formed string per the
// semantic versioning 2.0.0 spec (http://semver.org/).
func version() string {
// Start with the major, minor, and patch versions.
version := fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch)
// Append pre-release version if there is one. The hyphen called for
// by the semantic versioning spec is automatically appended and should
// not be contained in the pre-release string. The pre-release version
// is not appended if it contains invalid characters.
preRelease := normalizeVerString(appPreRelease)
if preRelease != "" {
version = fmt.Sprintf("%s-%s", version, preRelease)
}
// Append build metadata if there is any. The plus called for
// by the semantic versioning spec is automatically appended and should
// not be contained in the build metadata string. The build metadata
// string is not appended if it contains invalid characters.
build := normalizeVerString(appBuild)
if build != "" {
version = fmt.Sprintf("%s+%s", version, build)
}
return version
}
// normalizeVerString returns the passed string stripped of all characters which
// are not valid according to the semantic versioning guidelines for pre-release
// version and build metadata strings. In particular they MUST only contain
// characters in semanticAlphabet.
func normalizeVerString(str string) string {
var result bytes.Buffer
for _, r := range str {
if strings.ContainsRune(semanticAlphabet, r) {
// Ignoring the error here since it can only fail if
// the the system is out of memory and there are much
// bigger issues at that point.
_, _ = result.WriteRune(r)
}
}
return result.String()
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"io/ioutil"
"net"
"time"
)
@@ -43,8 +42,13 @@ func connectToServer(cfg *configFlags) (*minerClient, error) {
return nil, err
}
rpcAddr, err := cfg.NetParams().NormalizeRPCServerAddress(cfg.RPCServer)
if err != nil {
return nil, err
}
connCfg := &rpcclient.ConnConfig{
Host: normalizeRPCServerAddress(cfg.RPCServer, cfg),
Host: rpcAddr,
Endpoint: "ws",
User: cfg.RPCUser,
Pass: cfg.RPCPassword,
@@ -63,16 +67,6 @@ func connectToServer(cfg *configFlags) (*minerClient, error) {
return client, nil
}
// normalizeRPCServerAddress returns addr with the current network default
// port appended if there is not already a port specified.
func normalizeRPCServerAddress(addr string, cfg *configFlags) string {
_, _, err := net.SplitHostPort(addr)
if err != nil {
return net.JoinHostPort(addr, cfg.NetParams().RPCPort)
}
return addr
}
func readCert(cfg *configFlags) ([]byte, error) {
if cfg.DisableTLS {
return nil, nil

View File

@@ -2,16 +2,18 @@ package main
import (
"fmt"
"github.com/kaspanet/kaspad/config"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/cmd/kaspaminer/version"
"github.com/kaspanet/kaspad/version"
)
const (
@@ -37,6 +39,7 @@ type configFlags struct {
Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
config.NetworkFlags
}
@@ -78,6 +81,13 @@ func parseConfig() (*configFlags, error) {
return nil, errors.New("--rpccert should be omitted if --notls is used")
}
if cfg.Profile != "" {
profilePort, err := strconv.Atoi(cfg.Profile)
if err != nil || profilePort < 1024 || profilePort > 65535 {
return nil, errors.New("The profile port must be between 1024 and 65535")
}
}
initLog(defaultLogFile, defaultErrLogFile)
return cfg, nil

View File

@@ -1,5 +1,5 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
FROM golang:1.14-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
@@ -20,7 +20,7 @@ WORKDIR /go/src/github.com/kaspanet/kaspad/cmd/kaspaminer
RUN GOFMT_RESULT=`go fmt ./...`; echo $GOFMT_RESULT; test -z "$GOFMT_RESULT"
RUN go vet ./...
RUN golint -set_exit_status ./...
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o kaspaminer .
RUN GOOS=linux go build -a -installsuffix cgo -o kaspaminer .
# --- multistage docker build: stage #2: runtime image
FROM alpine

View File

@@ -28,7 +28,5 @@ func initLog(logFile, errLogFile string) {
}
func enableRPCLogging() {
rpclog := backendLog.Logger("RPCC")
rpclog.SetLevel(logs.LevelTrace)
rpcclient.UseLogger(rpclog)
rpcclient.UseLogger(backendLog, logs.LevelTrace)
}

View File

@@ -2,17 +2,22 @@ package main
import (
"fmt"
"net"
"net/http"
"os"
"github.com/kaspanet/kaspad/version"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/cmd/kaspaminer/version"
_ "net/http/pprof"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/util/panics"
)
func main() {
defer panics.HandlePanic(log, nil, nil)
defer panics.HandlePanic(log, nil)
interrupt := signal.InterruptListener()
cfg, err := parseConfig()
@@ -28,6 +33,17 @@ func main() {
enableRPCLogging()
}
// Enable http profiling server if requested.
if cfg.Profile != "" {
spawn(func() {
listenAddr := net.JoinHostPort("", cfg.Profile)
log.Infof("Profile server listening on %s", listenAddr)
profileRedirect := http.RedirectHandler("/debug/pprof", http.StatusSeeOther)
http.Handle("/", profileRedirect)
log.Errorf("%s", http.ListenAndServe(listenAddr, nil))
})
}
client, err := connectToServer(cfg)
if err != nil {
panic(errors.Wrap(err, "Error connecting to the RPC server"))

View File

@@ -7,6 +7,7 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/kaspanet/kaspad/rpcclient"
@@ -20,6 +21,9 @@ import (
)
var random = rand.New(rand.NewSource(time.Now().UnixNano()))
var hashesTried uint64
const logHashRateInterval = 10 * time.Second
func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) error {
errChan := make(chan error)
@@ -50,6 +54,8 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) err
doneChan <- struct{}{}
})
logHashRate()
select {
case err := <-errChan:
return err
@@ -58,10 +64,30 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) err
}
}
func logHashRate() {
spawn(func() {
lastCheck := time.Now()
for range time.Tick(logHashRateInterval) {
currentHashesTried := hashesTried
currentTime := time.Now()
kiloHashesTried := float64(currentHashesTried) / 1000.0
hashRate := kiloHashesTried / currentTime.Sub(lastCheck).Seconds()
log.Infof("Current hash rate is %.2f Khash/s", hashRate)
lastCheck = currentTime
// subtract from hashesTried the hashes we already sampled
atomic.AddUint64(&hashesTried, -currentHashesTried)
}
})
}
func mineNextBlock(client *minerClient, foundBlock chan *util.Block, templateStopChan chan struct{}, errChan chan error) {
newTemplateChan := make(chan *rpcmodel.GetBlockTemplateResult)
go templatesLoop(client, newTemplateChan, errChan, templateStopChan)
go solveLoop(newTemplateChan, foundBlock, errChan)
spawn(func() {
templatesLoop(client, newTemplateChan, errChan, templateStopChan)
})
spawn(func() {
solveLoop(newTemplateChan, foundBlock, errChan)
})
}
func handleFoundBlock(client *minerClient, block *util.Block) error {
@@ -131,6 +157,7 @@ func solveBlock(block *util.Block, stopChan chan struct{}, foundBlock chan *util
default:
msgBlock.Header.Nonce = i
hash := msgBlock.BlockHash()
atomic.AddUint64(&hashesTried, 1)
if daghash.HashToBig(hash).Cmp(targetDifficulty) <= 0 {
foundBlock <- block
return
@@ -193,7 +220,9 @@ func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock
return
}
go solveBlock(block, stopOldTemplateSolving, foundBlock)
spawn(func() {
solveBlock(block, stopOldTemplateSolving, foundBlock)
})
}
if stopOldTemplateSolving != nil {
close(stopOldTemplateSolving)

View File

@@ -1,50 +0,0 @@
package version
import (
"fmt"
"strings"
)
// validCharacters is a list of characters valid in the appBuild string
const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
const (
appMajor uint = 0
appMinor uint = 1
appPatch uint = 0
)
// appBuild is defined as a variable so it can be overridden during the build
// process with '-ldflags "-X github.com/kaspanet/kaspad/cmd/kaspaminer/version.appBuild=foo"' if needed.
// It MUST only contain characters from validCharacters.
var appBuild string
var version = "" // string used for memoization of version
// Version returns the application version as a properly formed string
func Version() string {
if version == "" {
// Start with the major, minor, and patch versions.
version = fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch)
// Append build metadata if there is any.
// Panic if any invalid characters are encountered
if appBuild != "" {
checkAppBuild(appBuild)
version = fmt.Sprintf("%s-%s", version, appBuild)
}
}
return version
}
// checkAppBuild verifies that appBuild does not contain any characters outside of validCharacters.
// In case of any invalid characters checkAppBuild panics
func checkAppBuild(appBuild string) {
for _, r := range appBuild {
if !strings.ContainsRune(validCharacters, r) {
panic(fmt.Errorf("appBuild string (%s) contains forbidden characters. Only alphanumeric characters and dashes are allowed", appBuild))
}
}
}

View File

@@ -4,10 +4,11 @@ import (
"bytes"
"encoding/hex"
"fmt"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"os"
)
@@ -27,7 +28,11 @@ func main() {
printErrorAndExit(err, "Failed to decode transaction")
}
scriptPubKey, err := createScriptPubKey(privateKey.PubKey())
pubkey, err := privateKey.SchnorrPublicKey()
if err != nil {
printErrorAndExit(err, "Failed to generate a public key")
}
scriptPubKey, err := createScriptPubKey(pubkey)
if err != nil {
printErrorAndExit(err, "Failed to create scriptPubKey")
}
@@ -45,26 +50,38 @@ func main() {
fmt.Printf("Signed Transaction (hex): %s\n\n", serializedTransaction)
}
func parsePrivateKey(privateKeyHex string) (*ecc.PrivateKey, error) {
func parsePrivateKey(privateKeyHex string) (*secp256k1.PrivateKey, error) {
privateKeyBytes, err := hex.DecodeString(privateKeyHex)
privateKey, _ := ecc.PrivKeyFromBytes(ecc.S256(), privateKeyBytes)
return privateKey, err
if err != nil {
return nil, errors.Errorf("'%s' isn't a valid hex. err: '%s' ", privateKeyHex, err)
}
return secp256k1.DeserializePrivateKeyFromSlice(privateKeyBytes)
}
func parseTransaction(transactionHex string) (*wire.MsgTx, error) {
serializedTx, err := hex.DecodeString(transactionHex)
if err != nil {
return nil, errors.Wrap(err, "couldn't decode transaction hex")
}
var transaction wire.MsgTx
err = transaction.Deserialize(bytes.NewReader(serializedTx))
return &transaction, err
}
func createScriptPubKey(publicKey *ecc.PublicKey) ([]byte, error) {
p2pkhAddress, err := util.NewAddressPubKeyHashFromPublicKey(publicKey.SerializeCompressed(), ActiveConfig().NetParams().Prefix)
func createScriptPubKey(publicKey *secp256k1.SchnorrPublicKey) ([]byte, error) {
serializedKey, err := publicKey.SerializeCompressed()
if err != nil {
return nil, err
}
p2pkhAddress, err := util.NewAddressPubKeyHashFromPublicKey(serializedKey, ActiveConfig().NetParams().Prefix)
if err != nil {
return nil, err
}
scriptPubKey, err := txscript.PayToAddrScript(p2pkhAddress)
return scriptPubKey, err
}
func signTransaction(transaction *wire.MsgTx, privateKey *ecc.PrivateKey, scriptPubKey []byte) error {
func signTransaction(transaction *wire.MsgTx, privateKey *secp256k1.PrivateKey, scriptPubKey []byte) error {
for i, transactionInput := range transaction.TxIn {
signatureScript, err := txscript.SignatureScript(transaction, i, scriptPubKey, txscript.SigHashAll, privateKey, true)
if err != nil {

View File

@@ -56,8 +56,6 @@ const (
DefaultMaxOrphanTxSize = 100000
defaultSigCacheMaxSize = 100000
sampleConfigFilename = "sample-kaspad.conf"
defaultTxIndex = false
defaultAddrIndex = false
defaultAcceptanceIndex = false
)
@@ -79,15 +77,6 @@ var activeConfig *Config
// to parse and execute service commands specified via the -s flag.
var RunServiceCommand func(string) error
// minUint32 is a helper function to return the minimum of two uint32s.
// This avoids a math import and the need to cast to floats.
func minUint32(a, b uint32) uint32 {
if a < b {
return a
}
return b
}
// Flags defines the configuration options for kaspad.
//
// See loadConfig for details on the configuration load process.
@@ -137,11 +126,7 @@ type Flags struct {
NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"`
SigCacheMaxSize uint `long:"sigcachemaxsize" description:"The maximum number of entries in the signature verification cache"`
BlocksOnly bool `long:"blocksonly" description:"Do not accept transactions from remote peers."`
TxIndex bool `long:"txindex" description:"Maintain a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
DropTxIndex bool `long:"droptxindex" description:"Deletes the hash-based transaction index from the database on start up and then exits."`
AddrIndex bool `long:"addrindex" description:"Maintain a full address-based transaction index which makes the searchrawtransactions RPC available"`
DropAddrIndex bool `long:"dropaddrindex" description:"Deletes the address-based transaction index from the database on start up and then exits."`
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainByBlock RPC available"`
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainFromBlock RPC available"`
DropAcceptanceIndex bool `long:"dropacceptanceindex" description:"Deletes the hash-based acceptance index from the database on start up and then exits."`
RelayNonStd bool `long:"relaynonstd" description:"Relay non-standard transactions regardless of the default settings for the active network."`
RejectNonStd bool `long:"rejectnonstd" description:"Reject non-standard transactions regardless of the default settings for the active network."`
@@ -218,6 +203,12 @@ func ActiveConfig() *Config {
return activeConfig
}
// SetActiveConfig sets the active config
// to the given config.
func SetActiveConfig(cfg *Config) {
activeConfig = cfg
}
// loadConfig initializes and parses the config using a config file and command
// line options.
//
@@ -251,8 +242,6 @@ func loadConfig() (*Config, []string, error) {
MaxOrphanTxs: defaultMaxOrphanTransactions,
SigCacheMaxSize: defaultSigCacheMaxSize,
MinRelayTxFee: defaultMinRelayTxFee,
TxIndex: defaultTxIndex,
AddrIndex: defaultAddrIndex,
AcceptanceIndex: defaultAcceptanceIndex,
}
@@ -267,7 +256,8 @@ func loadConfig() (*Config, []string, error) {
preParser := newConfigParser(&preCfg, &serviceOpts, flags.HelpFlag)
_, err := preParser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); ok && flagsErr.Type == flags.ErrHelp {
fmt.Fprintln(os.Stderr, err)
return nil, nil, err
}
@@ -313,7 +303,7 @@ func loadConfig() (*Config, []string, error) {
err := flags.NewIniParser(parser).ParseFile(preCfg.ConfigFile)
if err != nil {
if _, ok := err.(*os.PathError); !ok {
if pErr := &(os.PathError{}); !errors.As(err, &pErr) {
fmt.Fprintf(os.Stderr, "Error parsing config "+
"file: %s\n", err)
fmt.Fprintln(os.Stderr, usageMessage)
@@ -331,7 +321,8 @@ func loadConfig() (*Config, []string, error) {
// Parse command line options again to ensure they take precedence.
remainingArgs, err := parser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
fmt.Fprintln(os.Stderr, usageMessage)
}
return nil, nil, err
@@ -344,7 +335,8 @@ func loadConfig() (*Config, []string, error) {
// Show a nicer error message if it's because a symlink is
// linked to a directory that does not exist (probably because
// it's not mounted).
if e, ok := err.(*os.PathError); ok && os.IsExist(err) {
var e *os.PathError
if ok := errors.As(err, &e); ok && os.IsExist(err) {
if link, lerr := os.Readlink(e.Path); lerr == nil {
str := "is symlink %s -> %s mounted?"
err = errors.Errorf(str, e.Path, link)
@@ -588,7 +580,7 @@ func loadConfig() (*Config, []string, error) {
}
// Disallow 0 and negative min tx fees.
if activeConfig.MinRelayTxFee <= 0 {
if activeConfig.MinRelayTxFee == 0 {
str := "%s: The minrelaytxfee option must be greater than 0 -- parsed [%d]"
err := errors.Errorf(str, funcName, activeConfig.MinRelayTxFee)
fmt.Fprintln(os.Stderr, err)
@@ -631,38 +623,6 @@ func loadConfig() (*Config, []string, error) {
}
}
// --txindex and --droptxindex do not mix.
if activeConfig.TxIndex && activeConfig.DropTxIndex {
err := errors.Errorf("%s: the --txindex and --droptxindex "+
"options may not be activated at the same time",
funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// --addrindex and --dropaddrindex do not mix.
if activeConfig.AddrIndex && activeConfig.DropAddrIndex {
err := errors.Errorf("%s: the --addrindex and --dropaddrindex "+
"options may not be activated at the same time",
funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// --addrindex and --droptxindex do not mix.
if activeConfig.AddrIndex && activeConfig.DropTxIndex {
err := errors.Errorf("%s: the --addrindex and --droptxindex "+
"options may not be activated at the same time "+
"because the address index relies on the transaction "+
"index",
funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// --acceptanceindex and --dropacceptanceindex do not mix.
if activeConfig.AcceptanceIndex && activeConfig.DropAcceptanceIndex {
err := errors.Errorf("%s: the --acceptanceindex and --dropacceptanceindex "+
@@ -705,13 +665,19 @@ func loadConfig() (*Config, []string, error) {
// Add default port to all listener addresses if needed and remove
// duplicate addresses.
activeConfig.Listeners = network.NormalizeAddresses(activeConfig.Listeners,
activeConfig.Listeners, err = network.NormalizeAddresses(activeConfig.Listeners,
activeConfig.NetParams().DefaultPort)
if err != nil {
return nil, nil, err
}
// Add default port to all rpc listener addresses if needed and remove
// duplicate addresses.
activeConfig.RPCListeners = network.NormalizeAddresses(activeConfig.RPCListeners,
activeConfig.RPCListeners, err = network.NormalizeAddresses(activeConfig.RPCListeners,
activeConfig.NetParams().RPCPort)
if err != nil {
return nil, nil, err
}
// Only allow TLS to be disabled if the RPC is bound to localhost
// addresses.
@@ -745,10 +711,17 @@ func loadConfig() (*Config, []string, error) {
// Add default port to all added peer addresses if needed and remove
// duplicate addresses.
activeConfig.AddPeers = network.NormalizeAddresses(activeConfig.AddPeers,
activeConfig.AddPeers, err = network.NormalizeAddresses(activeConfig.AddPeers,
activeConfig.NetParams().DefaultPort)
activeConfig.ConnectPeers = network.NormalizeAddresses(activeConfig.ConnectPeers,
if err != nil {
return nil, nil, err
}
activeConfig.ConnectPeers, err = network.NormalizeAddresses(activeConfig.ConnectPeers,
activeConfig.NetParams().DefaultPort)
if err != nil {
return nil, nil, err
}
// Setup dial and DNS resolution (lookup) functions depending on the
// specified options. The default is to use the standard
@@ -808,18 +781,14 @@ func createDefaultConfigFile(destinationPath string) error {
return err
}
generatedRPCUser := base64.StdEncoding.EncodeToString(randomBytes)
rpcUserString := fmt.Sprintf("rpcuser=%s\n", generatedRPCUser)
_, err = rand.Read(randomBytes)
if err != nil {
return err
}
generatedRPCPass := base64.StdEncoding.EncodeToString(randomBytes)
src, err := os.Open(sampleConfigPath)
if err != nil {
return err
}
defer src.Close()
rpcPassString := fmt.Sprintf("rpcpass=%s\n", generatedRPCPass)
dest, err := os.OpenFile(destinationPath,
os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
@@ -828,6 +797,23 @@ func createDefaultConfigFile(destinationPath string) error {
}
defer dest.Close()
// If the sample config file is missing because e.g. kaspad was
// installed using go install, simply create the destination
// file and write the RPC credentials into it as is.
if _, err := os.Stat(sampleConfigPath); os.IsNotExist(err) {
toWrite := rpcUserString + rpcPassString
if _, err := dest.WriteString(toWrite); err != nil {
return err
}
return nil
}
src, err := os.Open(sampleConfigPath)
if err != nil {
return err
}
defer src.Close()
// We copy every line from the sample config file to the destination,
// only replacing the two lines for rpcuser and rpcpass
reader := bufio.NewReader(src)
@@ -839,9 +825,9 @@ func createDefaultConfigFile(destinationPath string) error {
}
if strings.Contains(line, "rpcuser=") {
line = "rpcuser=" + generatedRPCUser + "\n"
line = rpcUserString
} else if strings.Contains(line, "rpcpass=") {
line = "rpcpass=" + generatedRPCPass + "\n"
line = rpcPassString
}
if _, err := dest.WriteString(line); err != nil {

View File

@@ -1,7 +1,7 @@
connmgr
=======
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/connmgr)
Package connmgr implements a generic Kaspa network connection manager.

View File

@@ -89,8 +89,8 @@ type ConnReq struct {
// updateState updates the state of the connection request.
func (c *ConnReq) updateState(state ConnState) {
c.stateMtx.Lock()
defer c.stateMtx.Unlock()
c.state = state
c.stateMtx.Unlock()
}
// ID returns a unique identifier for the connection request.
@@ -101,8 +101,8 @@ func (c *ConnReq) ID() uint64 {
// State is the connection state of the requested connection.
func (c *ConnReq) State() ConnState {
c.stateMtx.RLock()
defer c.stateMtx.RUnlock()
state := c.state
c.stateMtx.RUnlock()
return state
}
@@ -234,7 +234,7 @@ func (cm *ConnManager) handleFailedConn(c *ConnReq, err error) {
}
spawnAfter(d, func() {
cm.Connect(c)
}, nil)
})
} else if cm.cfg.GetNewAddress != nil {
cm.failedAttempts++
if cm.failedAttempts >= maxFailedAttempts {
@@ -243,9 +243,9 @@ func (cm *ConnManager) handleFailedConn(c *ConnReq, err error) {
"-- retrying further connections every %s", maxFailedAttempts,
cm.cfg.RetryDuration)
}
spawnAfter(cm.cfg.RetryDuration, cm.NewConnReq, cm.handlePanic)
spawnAfter(cm.cfg.RetryDuration, cm.NewConnReq)
} else {
spawn(cm.NewConnReq, cm.handlePanic)
spawn(cm.NewConnReq)
}
}
}
@@ -336,7 +336,6 @@ out:
conns[connReq.id] = connReq
log.Debugf("Connected to %s", connReq)
connReq.retryCount = 0
cm.failedAttempts = 0
delete(pending, connReq.id)
@@ -376,7 +375,9 @@ out:
}
if cm.cfg.OnDisconnection != nil {
go cm.cfg.OnDisconnection(connReq)
spawn(func() {
cm.cfg.OnDisconnection(connReq)
})
}
// All internal state has been cleaned up, if
@@ -429,6 +430,13 @@ out:
log.Trace("Connection handler done")
}
// NotifyConnectionRequestComplete notifies the connection
// manager that a peer had been successfully connected and
// marked as good.
func (cm *ConnManager) NotifyConnectionRequestComplete() {
cm.failedAttempts = 0
}
// NewConnReq creates a new connection request and connects to the
// corresponding address.
func (cm *ConnManager) NewConnReq() {
@@ -566,7 +574,9 @@ func (cm *ConnManager) listenHandler(listener net.Listener) {
}
continue
}
go cm.cfg.OnAccept(conn)
spawn(func() {
cm.cfg.OnAccept(conn)
})
}
cm.wg.Done()
@@ -582,19 +592,24 @@ func (cm *ConnManager) Start() {
log.Trace("Connection manager started")
cm.wg.Add(1)
spawn(cm.connHandler, cm.handlePanic)
spawn(cm.connHandler)
// Start all the listeners so long as the caller requested them and
// provided a callback to be invoked when connections are accepted.
if cm.cfg.OnAccept != nil {
for _, listner := range cm.cfg.Listeners {
for _, listener := range cm.cfg.Listeners {
// Declaring this variable is necessary as it needs be declared in the same
// scope of the anonymous function below it.
listenerCopy := listener
cm.wg.Add(1)
go cm.listenHandler(listner)
spawn(func() {
cm.listenHandler(listenerCopy)
})
}
}
for i := atomic.LoadUint64(&cm.connReqCount); i < uint64(cm.cfg.TargetOutbound); i++ {
spawn(cm.NewConnReq, cm.handlePanic)
spawn(cm.NewConnReq)
}
}
@@ -603,10 +618,6 @@ func (cm *ConnManager) Wait() {
cm.wg.Wait()
}
func (cm *ConnManager) handlePanic() {
atomic.AddInt32(&cm.stop, 1)
}
// Stop gracefully shuts down the connection manager.
func (cm *ConnManager) Stop() {
if atomic.AddInt32(&cm.stop, 1) != 1 {

View File

@@ -68,9 +68,9 @@ type DynamicBanScore struct {
// String returns the ban score as a human-readable string.
func (s *DynamicBanScore) String() string {
s.mtx.Lock()
defer s.mtx.Unlock()
r := fmt.Sprintf("persistent %d + transient %f at %d = %d as of now",
s.persistent, s.transient, s.lastUnix, s.Int())
s.mtx.Unlock()
return r
}
@@ -80,8 +80,8 @@ func (s *DynamicBanScore) String() string {
// This function is safe for concurrent access.
func (s *DynamicBanScore) Int() uint32 {
s.mtx.Lock()
defer s.mtx.Unlock()
r := s.int(time.Now())
s.mtx.Unlock()
return r
}
@@ -91,8 +91,8 @@ func (s *DynamicBanScore) Int() uint32 {
// This function is safe for concurrent access.
func (s *DynamicBanScore) Increase(persistent, transient uint32) uint32 {
s.mtx.Lock()
defer s.mtx.Unlock()
r := s.increase(persistent, transient, time.Now())
s.mtx.Unlock()
return r
}
@@ -101,10 +101,10 @@ func (s *DynamicBanScore) Increase(persistent, transient uint32) uint32 {
// This function is safe for concurrent access.
func (s *DynamicBanScore) Reset() {
s.mtx.Lock()
defer s.mtx.Unlock()
s.persistent = 0
s.transient = 0
s.lastUnix = 0
s.mtx.Unlock()
}
// int returns the ban score, the sum of the persistent and decaying scores at a

View File

@@ -38,8 +38,8 @@ func TestDynamicBanScoreLifetime(t *testing.T) {
var bs DynamicBanScore
base := time.Now()
r := bs.increase(0, math.MaxUint32, base)
r = bs.int(base.Add(Lifetime * time.Second))
bs.increase(0, math.MaxUint32, base)
r := bs.int(base.Add(Lifetime * time.Second))
if r != 3 { // 3, not 4 due to precision loss and truncating 3.999...
t.Errorf("Pre max age check with MaxUint32 failed - %d", r)
}

View File

@@ -10,5 +10,5 @@ import (
)
var log, _ = logger.Get(logger.SubsystemTags.CMGR)
var spawn = panics.GoroutineWrapperFuncWithPanicHandler(log)
var spawnAfter = panics.AfterFuncWrapperFuncWithPanicHandler(log)
var spawn = panics.GoroutineWrapperFunc(log)
var spawnAfter = panics.AfterFuncWrapperFunc(log)

View File

@@ -66,7 +66,7 @@ func SeedFromDNS(dagParams *dagconfig.Params, reqServices wire.ServiceFlag, incl
}
}
go func(host string) {
spawn(func() {
randSource := mrand.New(mrand.NewSource(time.Now().UnixNano()))
seedpeers, err := lookupFn(host)
@@ -94,6 +94,6 @@ func SeedFromDNS(dagParams *dagconfig.Params, reqServices wire.ServiceFlag, incl
}
seedFn(addresses)
}(host)
})
}
}

View File

@@ -1,7 +1,7 @@
dagconfig
========
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/dagconfig)
Package dagconfig defines DAG configuration parameters for the standard

View File

@@ -5,6 +5,7 @@
package dagconfig
import (
"github.com/kaspanet/kaspad/util/network"
"math"
"math/big"
"time"
@@ -47,9 +48,14 @@ var (
devnetPowMax = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 239), bigOne)
)
const ghostdagK = 10
const difficultyAdjustmentWindowSize = 2640
const timestampDeviationTolerance = 132
const (
ghostdagK = 10
difficultyAdjustmentWindowSize = 2640
timestampDeviationTolerance = 132
finalityDuration = 24 * time.Hour
targetTimePerBlock = 1 * time.Second
finalityInterval = uint64(finalityDuration / targetTimePerBlock)
)
// ConsensusDeployment defines details related to a specific consensus rule
// change that is voted in. This is part of BIP0009.
@@ -132,7 +138,7 @@ type Params struct {
TargetTimePerBlock time.Duration
// FinalityInterval is the interval that determines the finality window of the DAG.
FinalityInterval int
FinalityInterval uint64
// TimestampDeviationTolerance is the maximum offset a block timestamp
// is allowed to be in the future before it gets delayed
@@ -180,6 +186,12 @@ type Params struct {
HDCoinType uint32
}
// NormalizeRPCServerAddress returns addr with the current network default
// port appended if there is not already a port specified.
func (p *Params) NormalizeRPCServerAddress(addr string) (string, error) {
return network.NormalizeAddress(addr, p.RPCPort)
}
// MainnetParams defines the network parameters for the main Kaspa network.
var MainnetParams = Params{
K: ghostdagK,
@@ -195,8 +207,8 @@ var MainnetParams = Params{
PowMax: mainPowMax,
BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 210000,
TargetTimePerBlock: time.Second * 1, // 1 second
FinalityInterval: 1000,
TargetTimePerBlock: targetTimePerBlock,
FinalityInterval: finalityInterval,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance,
@@ -252,8 +264,8 @@ var RegressionNetParams = Params{
PowMax: regressionPowMax,
BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 150,
TargetTimePerBlock: time.Second * 1, // 1 second
FinalityInterval: 1000,
TargetTimePerBlock: targetTimePerBlock,
FinalityInterval: finalityInterval,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance,
@@ -307,8 +319,8 @@ var TestnetParams = Params{
PowMax: testnetPowMax,
BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 210000,
TargetTimePerBlock: time.Second * 1, // 1 second
FinalityInterval: 1000,
TargetTimePerBlock: targetTimePerBlock,
FinalityInterval: finalityInterval,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance,
@@ -368,8 +380,8 @@ var SimnetParams = Params{
PowMax: simnetPowMax,
BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 210000,
TargetTimePerBlock: time.Second * 1, // 1 second
FinalityInterval: 1000,
TargetTimePerBlock: targetTimePerBlock,
FinalityInterval: finalityInterval,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance,
@@ -421,8 +433,8 @@ var DevnetParams = Params{
PowMax: devnetPowMax,
BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 210000,
TargetTimePerBlock: time.Second * 1, // 1 second
FinalityInterval: 1000,
TargetTimePerBlock: targetTimePerBlock,
FinalityInterval: finalityInterval,
DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize,
TimestampDeviationTolerance: timestampDeviationTolerance,

View File

@@ -1,7 +1,7 @@
database
========
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/database)
Package database provides a block and metadata storage database.

View File

@@ -1,391 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"encoding/binary"
"github.com/pkg/errors"
"io"
"os"
"sync"
"time"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
)
// importCmd defines the configuration options for the insecureimport command.
type importCmd struct {
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
}
var (
// importCfg defines the configuration options for the command.
importCfg = importCmd{
InFile: "bootstrap.dat",
Progress: 10,
}
)
// importResults houses the stats and result as an import operation.
type importResults struct {
blocksProcessed int64
blocksImported int64
err error
}
// blockImporter houses information about an ongoing import from a block data
// file to the block database.
type blockImporter struct {
db database.DB
r io.ReadSeeker
processQueue chan []byte
doneChan chan bool
errChan chan error
quit chan struct{}
wg sync.WaitGroup
blocksProcessed int64
blocksImported int64
receivedLogBlocks int64
receivedLogTx int64
lastHeight int64
lastBlockTime time.Time
lastLogTime time.Time
}
// readBlock reads the next block from the input file.
func (bi *blockImporter) readBlock() ([]byte, error) {
// The block file format is:
// <network> <block length> <serialized block>
var net uint32
err := binary.Read(bi.r, binary.LittleEndian, &net)
if err != nil {
if err != io.EOF {
return nil, err
}
// No block and no error means there are no more blocks to read.
return nil, nil
}
if net != uint32(activeNetParams.Net) {
return nil, errors.Errorf("network mismatch -- got %x, want %x",
net, uint32(activeNetParams.Net))
}
// Read the block length and ensure it is sane.
var blockLen uint32
if err := binary.Read(bi.r, binary.LittleEndian, &blockLen); err != nil {
return nil, err
}
if blockLen > wire.MaxMessagePayload {
return nil, errors.Errorf("block payload of %d bytes is larger "+
"than the max allowed %d bytes", blockLen,
wire.MaxMessagePayload)
}
serializedBlock := make([]byte, blockLen)
if _, err := io.ReadFull(bi.r, serializedBlock); err != nil {
return nil, err
}
return serializedBlock, nil
}
// processBlock potentially imports the block into the database. It first
// deserializes the raw block while checking for errors. Already known blocks
// are skipped and orphan blocks are considered errors. Returns whether the
// block was imported along with any potential errors.
//
// NOTE: This is not a safe import as it does not verify DAG rules.
func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
// Deserialize the block which includes checks for malformed blocks.
block, err := util.NewBlockFromBytes(serializedBlock)
if err != nil {
return false, err
}
// update progress statistics
bi.lastBlockTime = block.MsgBlock().Header.Timestamp
bi.receivedLogTx += int64(len(block.MsgBlock().Transactions))
// Skip blocks that already exist.
var exists bool
err = bi.db.View(func(dbTx database.Tx) error {
exists, err = dbTx.HasBlock(block.Hash())
return err
})
if err != nil {
return false, err
}
if exists {
return false, nil
}
// Don't bother trying to process orphans.
parentHashes := block.MsgBlock().Header.ParentHashes
for _, parentHash := range parentHashes {
var exists bool
err := bi.db.View(func(dbTx database.Tx) error {
exists, err = dbTx.HasBlock(parentHash)
return err
})
if err != nil {
return false, err
}
if !exists {
return false, errors.Errorf("import file contains block "+
"%s which does not link to the available "+
"block DAG", parentHash)
}
}
// Put the blocks into the database with no checking of DAG rules.
err = bi.db.Update(func(dbTx database.Tx) error {
return dbTx.StoreBlock(block)
})
if err != nil {
return false, err
}
return true, nil
}
// readHandler is the main handler for reading blocks from the import file.
// This allows block processing to take place in parallel with block reads.
// It must be run as a goroutine.
func (bi *blockImporter) readHandler() {
out:
for {
// Read the next block from the file and if anything goes wrong
// notify the status handler with the error and bail.
serializedBlock, err := bi.readBlock()
if err != nil {
bi.errChan <- errors.Errorf("Error reading from input "+
"file: %s", err.Error())
break out
}
// A nil block with no error means we're done.
if serializedBlock == nil {
break out
}
// Send the block or quit if we've been signalled to exit by
// the status handler due to an error elsewhere.
select {
case bi.processQueue <- serializedBlock:
case <-bi.quit:
break out
}
}
// Close the processing channel to signal no more blocks are coming.
close(bi.processQueue)
bi.wg.Done()
}
// logProgress logs block progress as an information message. In order to
// prevent spam, it limits logging to one message every importCfg.Progress
// seconds with duration and totals included.
func (bi *blockImporter) logProgress() {
bi.receivedLogBlocks++
now := time.Now()
duration := now.Sub(bi.lastLogTime)
if duration < time.Second*time.Duration(importCfg.Progress) {
return
}
// Truncate the duration to 10s of milliseconds.
durationMillis := int64(duration / time.Millisecond)
tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)
// Log information about new block height.
blockStr := "blocks"
if bi.receivedLogBlocks == 1 {
blockStr = "block"
}
txStr := "transactions"
if bi.receivedLogTx == 1 {
txStr = "transaction"
}
log.Infof("Processed %d %s in the last %s (%d %s, height %d, %s)",
bi.receivedLogBlocks, blockStr, tDuration, bi.receivedLogTx,
txStr, bi.lastHeight, bi.lastBlockTime)
bi.receivedLogBlocks = 0
bi.receivedLogTx = 0
bi.lastLogTime = now
}
// processHandler is the main handler for processing blocks. This allows block
// processing to take place in parallel with block reads from the import file.
// It must be run as a goroutine.
func (bi *blockImporter) processHandler() {
out:
for {
select {
case serializedBlock, ok := <-bi.processQueue:
// We're done when the channel is closed.
if !ok {
break out
}
bi.blocksProcessed++
bi.lastHeight++
imported, err := bi.processBlock(serializedBlock)
if err != nil {
bi.errChan <- err
break out
}
if imported {
bi.blocksImported++
}
bi.logProgress()
case <-bi.quit:
break out
}
}
bi.wg.Done()
}
// statusHandler waits for updates from the import operation and notifies
// the passed doneChan with the results of the import. It also causes all
// goroutines to exit if an error is reported from any of them.
func (bi *blockImporter) statusHandler(resultsChan chan *importResults) {
select {
// An error from either of the goroutines means we're done so signal
// caller with the error and signal all goroutines to quit.
case err := <-bi.errChan:
resultsChan <- &importResults{
blocksProcessed: bi.blocksProcessed,
blocksImported: bi.blocksImported,
err: err,
}
close(bi.quit)
// The import finished normally.
case <-bi.doneChan:
resultsChan <- &importResults{
blocksProcessed: bi.blocksProcessed,
blocksImported: bi.blocksImported,
err: nil,
}
}
}
// Import is the core function which handles importing the blocks from the file
// associated with the block importer to the database. It returns a channel
// on which the results will be returned when the operation has completed.
func (bi *blockImporter) Import() chan *importResults {
// Start up the read and process handling goroutines. This setup allows
// blocks to be read from disk in parallel while being processed.
bi.wg.Add(2)
spawn(bi.readHandler)
spawn(bi.processHandler)
// Wait for the import to finish in a separate goroutine and signal
// the status handler when done.
spawn(func() {
bi.wg.Wait()
bi.doneChan <- true
})
// Start the status handler and return the result channel that it will
// send the results on when the import is done.
resultChan := make(chan *importResults)
go bi.statusHandler(resultChan)
return resultChan
}
// newBlockImporter returns a new importer for the provided file reader seeker
// and database.
func newBlockImporter(db database.DB, r io.ReadSeeker) *blockImporter {
return &blockImporter{
db: db,
r: r,
processQueue: make(chan []byte, 2),
doneChan: make(chan bool),
errChan: make(chan error),
quit: make(chan struct{}),
lastLogTime: time.Now(),
}
}
// Execute is the main entry point for the command. It's invoked by the parser.
func (cmd *importCmd) Execute(args []string) error {
// Setup the global config options and ensure they are valid.
if err := setupGlobalConfig(); err != nil {
return err
}
// Ensure the specified block file exists.
if !fileExists(cmd.InFile) {
str := "The specified block file [%s] does not exist"
return errors.Errorf(str, cmd.InFile)
}
// Load the block database.
db, err := loadBlockDB()
if err != nil {
return err
}
defer db.Close()
// Ensure the database is sync'd and closed on Ctrl+C.
addInterruptHandler(func() {
log.Infof("Gracefully shutting down the database...")
db.Close()
})
fi, err := os.Open(importCfg.InFile)
if err != nil {
return err
}
defer fi.Close()
// Create a block importer for the database and input file and start it.
// The results channel returned from start will contain an error if
// anything went wrong.
importer := newBlockImporter(db, fi)
// Perform the import asynchronously and signal the main goroutine when
// done. This allows blocks to be processed and read in parallel. The
// results channel returned from Import contains the statistics about
// the import including an error if something went wrong. This is done
// in a separate goroutine rather than waiting directly so the main
// goroutine can be signaled for shutdown by either completion, error,
// or from the main interrupt handler. This is necessary since the main
// goroutine must be kept running long enough for the interrupt handler
// goroutine to finish.
spawn(func() {
log.Info("Starting import")
resultsChan := importer.Import()
results := <-resultsChan
if results.err != nil {
dbErr, ok := results.err.(database.Error)
if !ok || ok && dbErr.ErrorCode != database.ErrDbNotOpen {
shutdownChannel <- results.err
return
}
}
log.Infof("Processed a total of %d blocks (%d imported, %d "+
"already known)", results.blocksProcessed,
results.blocksImported,
results.blocksProcessed-results.blocksImported)
shutdownChannel <- nil
})
// Wait for shutdown signal from either a normal completion or from the
// interrupt handler.
err = <-shutdownChannel
return err
}

View File

@@ -1,93 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"time"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
// headersCmd defines the configuration options for the loadheaders command.
type headersCmd struct {
Bulk bool `long:"bulk" description:"Use bulk loading of headers instead of one at a time"`
}
var (
// headersCfg defines the configuration options for the command.
headersCfg = headersCmd{
Bulk: false,
}
)
// Execute is the main entry point for the command. It's invoked by the parser.
func (cmd *headersCmd) Execute(args []string) error {
// Setup the global config options and ensure they are valid.
if err := setupGlobalConfig(); err != nil {
return err
}
// Load the block database.
db, err := loadBlockDB()
if err != nil {
return err
}
defer db.Close()
// NOTE: This code will only work for ffldb. Ideally the package using
// the database would keep a metadata index of its own.
blockIdxName := []byte("ffldb-blockidx")
if !headersCfg.Bulk {
err = db.View(func(dbTx database.Tx) error {
totalHdrs := 0
blockIdxBucket := dbTx.Metadata().Bucket(blockIdxName)
blockIdxBucket.ForEach(func(k, v []byte) error {
totalHdrs++
return nil
})
log.Infof("Loading headers for %d blocks...", totalHdrs)
numLoaded := 0
startTime := time.Now()
blockIdxBucket.ForEach(func(k, v []byte) error {
var hash daghash.Hash
copy(hash[:], k)
_, err := dbTx.FetchBlockHeader(&hash)
if err != nil {
return err
}
numLoaded++
return nil
})
log.Infof("Loaded %d headers in %s", numLoaded,
time.Since(startTime))
return nil
})
return err
}
// Bulk load headers.
err = db.View(func(dbTx database.Tx) error {
blockIdxBucket := dbTx.Metadata().Bucket(blockIdxName)
hashes := make([]*daghash.Hash, 0, 500000)
blockIdxBucket.ForEach(func(k, v []byte) error {
var hash daghash.Hash
copy(hash[:], k)
hashes = append(hashes, &hash)
return nil
})
log.Infof("Loading headers for %d blocks...", len(hashes))
startTime := time.Now()
hdrs, err := dbTx.FetchBlockHeaders(hashes)
if err != nil {
return err
}
log.Infof("Loaded %d headers in %s", len(hdrs),
time.Since(startTime))
return nil
})
return err
}

Some files were not shown because too many files have changed in this diff Show More