Compare commits

...

154 Commits

Author SHA1 Message Date
Ori Newman
34fb066590 [NOD-518] Implement getmempoolentry (#656) 2020-03-12 16:00:18 +02:00
stasatdaglabs
299826f392 [NOD-827] Get rid of dbtools insecureimport.go and loadheaders.go (#655)
* [NOD-827] Get rid of dbtools insecureimport.go and loadheaders.go

* [NOD-827] Remove commands from realMain().
2020-03-10 16:31:13 +02:00
stasatdaglabs
3d8dd8724d [NOD-816] Remove TxIndex and AddrIndex (#653)
* [NOD-816] Remove TxIndex.

* [NOD-816] Remove AddrIndex.

* [NOD-816] Remove mentions of TxIndex and AddrIndex.

* [NOD-816] Remove mentions of getrawtransaction.

* [NOD-816] Remove mentions of searchrawtransaction.

* [NOD-816] Remove cmd/addsubnetwork.

* [NOD-816] Fix a comment.

* [NOD-816] Fix a comment.

* [NOD-816] Implement BlockDAG.TxConfirmations.

* [NOD-816] Return confirmations in getTxOut.

* [NOD-816] Rename TxConfirmations to UTXOConfirmations.

* [NOD-816] Rename txConfirmations to utxoConfirmations.

* [NOD-816] Fix capitalization in variable names.

* [NOD-816] Add acceptance index to addblock.

* [NOD-816] Get rid of txrawresult-confirmations.

* [NOD-816] Fix config flag.
2020-03-10 16:09:31 +02:00
Svarog
b8a00f7519 [NOD-778] Optimize RestoreUTXO (#652)
* [NOD-778] Add WithDiffInPlace

* [NOD-778] Fix bug in WithDiffInPlace

* [NOD-778] Add comment to WithDiffInPlace

* [NOD-778] Add double dag.restoreUTXO to benchmark, to remove time for hard-disk loading

* [NOD-778] Also test WithDiffInPlace in TestUTXODiffRules

* [NOD-778] Add tests for all cases possible in TestUTXODiffRules

* [NOD-778] Fix test-case 'first in toAdd in this, second in toRemove in this and toAdd in other'

* [NOD-778] Fixed in WithDiffInPlace

* [NOD-778] Update error messages when diffFrom(withDiffResult) fails in TestUTXODiffRules

* [NOD-778] diffFrom: disallow utxos both in d.toAdd, other.toAdd, and only one of d.toRemove and other.toRemove

* [NOD-778] Fix expected value in 'first in toRemove in this, second in toRemove in other'

* [NOD-778] diffFrom: Disallow situations where utxo both in d.toRemove and other.toRemove with different blue scores and no corresponding utxo in d.toAdd

* [NOD-778] WithDiff: Fix faulty logic that allows updates to blue scores

* [NOD-778] Fix WithDiffInPlace to pass all tests

* [NOD-778] Deleted temporary prints

* [NOD-778] Sorted TestUTXODiffRules tests according to spreadsheet

* [NOD-778] Delete deeputxo_test.go

* [NOD-778] Updated comments

* [NOD-778] Re-order

* [NOD-778] Re-order test-cases to be according to spreadsheet

* [NOD-778] Simplified case when both d.toRemove and other.toRemove have the same outpoint in diffFrom

* [NOD-778] Change a few error messages that say 'transaction' instead of 'outpoint'

* [NOD-778] Rename: utxoToAdd/Remove -> entryToAdd/Remove

* [NOD-788] Remove redundant else

* [NOD-778] Rename: existingUTXO -> existingEntry + remove redundant else

* [NOD-778] Correct test name
2020-03-10 15:32:19 +02:00
stasatdaglabs
4dfc8cf5b0 [NOD-816] Remove addsubnetwork. (#654) 2020-03-10 11:09:33 +02:00
Ori Newman
5a99e4d2f3 [NOD-806] Exit early after panic (#650)
* [NOD-806] After panic, gracefully stop logs, and then exit immediately

* [NOD-806] Convert non-kaspad applications to use the new spawn

* [NOD-806] Fix disabled log at rpcclient

* [NOD-806] Refactor HandlePanic

* [NOD-806] Cancel Logger interface

* [NOD-806] Remove redundant spawn checks from waitgroup_test.go

* [NOD-806] Use caller subsystem when logging panics

* [NOD-806] Fix go vet errors
2020-03-08 11:24:37 +02:00
Svarog
606cd668ff [NOD-810] Fix error text in lookupParentNodes (#651) 2020-03-05 15:49:36 +02:00
Ori Newman
dd537f5143 [NOD-808] Use syndtr/goleveldb instead of btcsuite/goleveldb. (#649) 2020-03-05 12:26:48 +02:00
stasatdaglabs
a1c631be62 [NOD-798] Disconnect from a peer if a block received from it gets rejected (#648)
* [NOD-798] Disconnect from a peer if its block gets rejected.

* [NOD-798] Make a comment less ambiguous.
2020-03-03 09:47:22 +02:00
Ori Newman
707a728656 [NOD-552] Add NormalizeRPCServerAddress and use it where needed (#643)
* [NOD-552] Add NormalizeRPCServerAddress and use it where needed

* [NOD-552] Make NormalizeAddress return an error for an invalid address

* [NOD-552] Use longer lines for a comment
2020-03-01 16:37:26 +02:00
stasatdaglabs
80b5631a48 [NOD-726] Only print "no sync peer" message when not current (#646)
* [NOD-726] Only print "no sync peer" message when not current.

* [NOD-726] Shorten duration in which "no sync peer" messages would not print.
2020-02-27 17:38:39 +02:00
Ori Newman
2373965551 [NOD-576] Rename NextHashes to ChildHashes in GetBlock/GetBlockHeaders rpc call (#645)
* [NOD-576] Rename NextHashes to ChildHashes in GetBlock/GetBlockHeaders rpc call

* [NOD-576] Fix typo
2020-02-27 17:34:38 +02:00
Ori Newman
65cbb6655b [NOD-661] Change BCDB subsystem tag (for logs) to KSDB (#644) 2020-02-27 17:30:08 +02:00
Ori Newman
cdd96d0670 [NOD-664] Remove version from everything inside kaspad/cmd - use kaspad version instead (#642)
* [NOD-664] Remove version from everything inside kaspad/cmd - use kaspad version instead

* [NOD-664] Fix broken import
2020-02-27 13:26:22 +02:00
Dan Aharoni
ad04bbde83 [NOD-782] Make sure errors.As gets parameter that implements error interface (#641)
* [NOD-782] Make sure errors.As gets parameter that implements error interface.

* [NOD-782] Pass pointer to errors.As
2020-02-27 12:27:38 +02:00
Ori Newman
5374d95416 [NOD-656] Log hashrate in kaspaminer (#632)
* [NOD-656] Log hashrate in kaspaminer

* [NOD-656] Measure hash rate in kilohashes

* [NOD-656] Show hash rate once in 10 seconds

* [NOD-656] Put hash rate logic in a separate function

* [NOD-656] Create logHashRateInterval constant
2020-02-24 11:59:02 +02:00
Ori Newman
de9aa39cc5 [NOD-721] Add defers (#638)
* [NOD-721] Defer unlocks

* [NOD-721] Add functions with locks to rpcmodel

* [NOD-721] Defer unlocks

* [NOD-721] Add filterDataWithLock function

* [NOD-721] Defer unlocks

* [NOD-721] Defer .Close()

* [NOD-721] Fix access to wsc.filterData without a lock

* [NOD-721] De-anonymize some anonymous functions

* [NOD-721] Remove redundant assignments

* [NOD-721] Remove redundant assignments

* [NOD-721] Remove redundant assignments

* [NOD-721] Get rid of submitOld, and break handleGetBlockTemplateLongPoll to smaller functions

* [NOD-721] Rename existsUnspentOutpoint->existsUnspentOutpointNoLock, existsUnspentOutpointWithLock->existsUnspentOutpoint

* [NOD-721] Rename filterDataWithLock->FilterData

* [NOD-721] Fixed comments
2020-02-24 09:19:44 +02:00
Ori Newman
98987f4a8f [NOD-603] Update validateParents to use reachability (#640)
* [NOD-603] Update validateParents to use reachability

* [NOD-603] Break a long line

* [NOD-721] Remove redundant check if block parent is a tip
2020-02-24 08:59:12 +02:00
Ori Newman
9745f31b69 [NOD-693] Update link to license (#639) 2020-02-20 17:12:53 +02:00
Ori Newman
ee08531a52 [NOD-610] Rename newSet->newBlockSet and setFromSlice->blockSetFromSlice (#635) 2020-02-20 16:19:28 +02:00
stasatdaglabs
61baf7b260 [NOD-769] Add a log for when a reachability reindex occurs (#637)
*  [NOD-719] Added defers to unlocks  (#618)

* [NOD-719] Added defers to unlocks

* [NOD-719] Added another defer to another Unlock

* [NOD-719] Added yet another defer to yet another Unlock

* [NOD-747] Change FinalityInterval to be 24 hours, isCurrent to be true if the DAG's time is less than 12 hours than the present, and change MaxInvPerMsg to be 1 << 17 (#625)

* [NOD-769] Add a log for when a reachability reindex occurs.

Co-authored-by: Svarog <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-02-19 13:39:45 +02:00
Ori Newman
650e4f735e [NOD-757] Readd addrmanager tests (#628) 2020-02-18 18:12:19 +02:00
Dan Aharoni
550b12b041 [NOD-772] Fix a bug where we ignore the return value of forAllOutboundPeers. (#636) 2020-02-18 18:02:15 +02:00
Ori Newman
a4bb070722 [NOD-754] Fix staticcheck errors (#627)
* [NOD-754] Fix staticcheck errors

* [NOD-754] Remove some unused exported functions

* [NOD-754] Fix staticcheck errors

* [NOD-754] Don't panic if out/in close fails

* [NOD-754] Wrap outside errors with custom message
2020-02-18 16:56:38 +02:00
Ori Newman
30fe0c279b [NOD-738] Move rpcmodel helper functions to pointers package (#629)
* [NOD-738] Move rpcmodel helper functions to copytopointer package

* [NOD-738] Rename copytopointer->pointers
2020-02-18 14:06:34 +02:00
stasatdaglabs
e405dd5981 [NOD-694] Fix requesting blocks that will surely be orphaned during netsync. (#630) 2020-02-18 12:12:34 +02:00
stasatdaglabs
243b4b8021 [NOD-765] Fix database corruption after restart in reachabilitystore and utxodiffstore. (#634) 2020-02-18 12:04:50 +02:00
Ori Newman
dd4c93e1ef [NOD-759] Merge v0.1.1-dev into v0.1.2-dev (#633)
*  [NOD-719] Added defers to unlocks  (#618)

* [NOD-719] Added defers to unlocks

* [NOD-719] Added another defer to another Unlock

* [NOD-719] Added yet another defer to yet another Unlock

* [NOD-747] Change FinalityInterval to be 24 hours, isCurrent to be true if the DAG's time is less than 12 hours than the present, and change MaxInvPerMsg to be 1 << 17 (#625)
2020-02-18 11:02:25 +02:00
Ori Newman
a07335d74d [NOD-737] Remove btc prefix from util file names (#631) 2020-02-17 13:11:24 +02:00
Dan Aharoni
7567cd4cb9 [NOD-744] Wrap go routines with spawn (#626)
* [NOD-744] Wrap go routines with spawn

* [NOD-747] Wrap some more go routines with spawn

* [NOD-744] Some more missing go routines

* [NOD-744] Break lines so make code more readable

* [NOD-744] Declare a local scope variable so the func would use it.

* [NOD-744] Fix type and update comment.

* [NOD-744] Declare local var so go routine would use it

* [NOD-744] Rename variable, use normal assignment;

* [NOD-744] Rename variable.
2020-02-13 13:10:07 +02:00
stasatdaglabs
51ff9e2562 [NOD-571] Cover ghostdag in tests where possible (#613)
* [NOD-571] Cover reachabilityInterval split methods.

* [NOD-571] Cover reindexInterval.

* [NOD-571] Cover reachability String() methods.

* [NOD-571] Cover blueAnticoneSize.

* [NOD-571] Remove unnecessary error from setTreeNode.

* [NOD-571] Add TestGHOSTDAGErrors.

* [NOD-571] Use PrepareBlockForTest in TestBlueAnticoneSizeErrors.

* [NOD-571] Use PrepareBlockForTest in TestGHOSTDAGErrors.

* [NOD-571] Add substring checks to TestSplitFractionErrors.

* [NOD-571] Add substring checks to TestSplitExactErrors and TestSplitWithExponentialBiasErrors.

* [NOD-571] Add comments to TestReindexIntervalErrors.

* [NOD-571] Add additional info in some error messages.

* [NOD-571] Fix error messages.
2020-02-09 11:27:10 +02:00
stasatdaglabs
5b8ab63890 [NOD-717] Fix nodes getting stuck in an infinite loop in addrManager.getAddress (#624)
* [NOD-717] Fix nodes getting stuck in an infinite loop in addrManager.getAddress.

* [NOD-717] Rename ResetFailedAttempts -> NotifyConnectionRequestComplete.
2020-02-06 18:17:10 +02:00
Dan Aharoni
3dd7dc4496 [NOD-727] Do not allow delayed blocks from RPC. (#623)
* [NOD-727] Do not allow delayed blocks from RPC.

* [NOD-727] Refactor sentFromRPC -> DisallowDelay

* [NOD-727] Clarify comment; Clarify error message.

* [NOD-727] Change error message.
2020-02-05 11:14:26 +02:00
Ori Newman
d90a08ecfa [NOD-722] Fix processBlockMsg case in blockHandler to send only one response to msg.reply, and rename blockHandler->messageHandler (#622) 2020-02-04 18:10:15 +02:00
Ori Newman
45dc1a3e7b [NOD-545] Remove headers first related logic (#621)
* [NOD-545] Remove headers first related logic

* [NOD-545] Fix tests

* [NOD-545] Change getTopHeadersMaxHeaders to be equal to getHeadersMaxHeaders
2020-02-04 14:54:42 +02:00
Ori Newman
4ffb5daa37 [NOD-622] Fix populateTemplateFromCandidates to sort txsForBlockTemplate.txMasses and txsForBlockTemplate.txFees (#617)
* [NOD-622] Fix populateTemplateFromCandidates to sort txsForBlockTemplate.txMasses and txsForBlockTemplate.txFees

* [NOD-622] Sort transactions in PrepareBlockForTest

* [NOD-622] Remove duplicate append of selected transactions
2020-02-03 13:42:40 +02:00
Ori Newman
b9138b720d [NOD-597] Make BlockIndex clear its dirty entries only after it successfully written them to disk (#620) 2020-02-03 13:39:25 +02:00
Ori Newman
d8954f1339 [NOD-615] Make bluesAnticoneSizes a map with *blockNode as a key (#619) 2020-02-03 12:40:39 +02:00
Ori Newman
eb953286ec [NOD-641] Upgrade to github.com/pkg/errors v0.9.1 and use errors.As where needed (#614)
* [NOD-641] Upgrade to github.com/pkg/errors v0.9.1 and use errors.As where needed

* [NOD-641] Fix find and replace error

* [NOD-641] Use errors.As for error type checking

* [NOD-641] Fix errors.As for pointer types

* [NOD-641] Use errors.As where needed

* [NOD-641] Rename rErr->ruleErr

* [NOD-641] Rename derr->dbErr

* [NOD-641] e->flagsErr where necessary

* [NOD-641] change jerr to more appropriate name

* [NOD-641] Rename cerr->bdRuleErr

* [NOD-641] Rename serr->scriptErr

* [NOD-641] Use errors.Is instead of testutil.AreErrorsEqual in TestNewHashFromStr

* [NOD-641] Rename bdRuleErr->dagRuleErr

* [NOD-641] Rename mErr->msgErr

* [NOD-641] Rename dErr->deserializeErr
2020-02-03 12:38:33 +02:00
Ori Newman
41c8178ad3 [NOD-648] Add TestProcessDelayedBlocks (#612)
* [NOD-648] Add TestProcessDelayedBlocks

* [NOD-648] Add one second to secondsUntilDelayedBlockIsValid to make sure the delayedBlock timestamp will be valid, and add comments

* [NOD-648] Remove redundant import

* [NOD-648] Use fakeTimeSource instead of time.Sleep

* [NOD-648] Rename dag.HaveBlock->dag.IsKnownBlock,  dag.BlockExists->dag.IsInDAG

* [NOD-648] Add comment

* [NOD-641] Rename HaveBlock->IsKnownBlock, BlockExists->IsInDAG
2020-02-03 11:30:03 +02:00
Ori Newman
aa74b51e6f [NOD-687] Remove -gcflags='-l' from all tests (#616) 2020-02-02 15:26:26 +02:00
Mike Zak
f7800eb5c4 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-02-02 15:17:25 +02:00
stasatdaglabs
193add502f [NOD-716] Fix a crash in GetTopHeaders. (#615) 2020-02-02 13:51:53 +02:00
Ori Newman
44c55900f8 [NOD-715] Replace testDbRoot with os.TempDir() (#611) 2020-01-30 12:54:15 +02:00
Ori Newman
4c0ea78026 [NOD-586] Remove subTreeSize from reachabilityTreeNode (#610)
* [NOD-586] Remove subTreeSize from reachabilityTreeNode

* [NOD-586] Convert else { if { ... } } to else if { ... }
2020-01-30 10:39:53 +02:00
stasatdaglabs
03a93fe51e [NOD-647] Create a default config file even if the sample default config file is missing (#609)
* [NOD-647] Create a default config file even if the sample default config file is missing.

* [NOD-647] Unfancify WriteString().
2020-01-29 17:40:59 +02:00
Mike Zak
eca0514465 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 14:39:22 +02:00
stasatdaglabs
aadbebb720 [NOD-691] Remove addTrying from AddrManager. (#608) 2020-01-28 13:57:02 +02:00
Mike Zak
5daab45947 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 12:25:52 +02:00
stasatdaglabs
607b838ded [NOD-702] Fix netsync slowing down significantly due to excessive allocs in serializeUTXO (#605)
* [NOD-702] Fix netsync slowing down significantly due to excessive allocs in serializeUTXO.

* [NOD-702] Fix bad make statement.

* [NOD-702] Move writeBuffer to flushToDB.
2020-01-28 12:24:09 +02:00
Mike Zak
25bdaeed31 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 11:35:21 +02:00
Dan Aharoni
8b2d3f07ce [NOD-636] Prevent db corruption on crash. (#607)
* [NOD-636] Scope err so defer anonymous defer function will get it.

* [NOD-636] Add comment to explain why this line is needed.

* [NOD-636] Edit comment.
2020-01-28 11:04:41 +02:00
Mike Zak
a3dc2f7da7 Update version to v0.1.2 2020-01-28 10:53:10 +02:00
Svarog
bf36f9ceb6 [NOD-704] Call dag.IsInSelectedParentChain in every iteration of dag.SelectedParentChain (#606) 2020-01-27 17:09:41 +02:00
stasatdaglabs
11de12304e [NOD-700] Convert blockSet to map[*blockNode]struct{} (#604)
* [NOD-700] Convert blockSet to map[*blockNode]struct{}.

* [NOD-700] Rename bluestNode to bluestBlock in bluest().

* [NOD-700] Make IsInSelectedParentChain not use the now-slower containsHash.

* [NOD-700] Rename block to node in blockset.go.

* [NOD-700] Remove containsHash and hashesEqual.

* [NOD-700] Add a comment to IsInSelectedParentChain about how it'll fail if the given blockHash is not within the block index.
2020-01-27 11:48:58 +02:00
Ori Newman
a10320ad7b [NOD-696] Handle panics on time.AfterFunc (#600)
* [NOD-696] Handle panics on time.AfterFunc

* [NOD-696] Fix comment

* [NOD-696] Rename afterFunc->spawnAfter
2020-01-27 11:12:23 +02:00
Ori Newman
fd2bbf3557 [NOD-698] Change confirmations to be selectedTip.blueScore-acceptingBlock.blueScore+1 (#602) 2020-01-27 11:10:27 +02:00
stasatdaglabs
7f9cf17274 [NOD-697] In blockLocator, return an error if lowHash blueScore >= highHash blueScore. (#601) 2020-01-26 15:44:58 +02:00
Ori Newman
ba0e239557 [NOD-692] Fix thresholdState to use blueBlockWindow instead of selected chain height (#599) 2020-01-23 17:17:56 +02:00
Dan Aharoni
ed606bfda3 [NOD-575] Change devent address prefix to kaspadev. (#598) 2020-01-23 15:36:40 +02:00
Dan Aharoni
c0463a8a68 [NOD-675] Replace start-hash/stop-hash with meaningful names (#597)
* [NOD-675] Rename startHash/stopHash to lowHigh/stopHash

* [NOD-675] Fix typo

* [NOD-675] Undo go.mod go.sum conflicts

* [NOD-675] revert back to startHash for getChainFromBlock.

* [NOD-675] Revet back to startHash in getChainFromBlock leftovers.

* [NOD-675] Fix test name.
2020-01-22 18:14:42 +02:00
Dan Aharoni
52e0a0967d [NOD-629] change GHOSTDAG k to uint8 (#594)
* [NOD-629] Change GHOSTDAG K to a a single byte using type

* [NOD-629] Rename variable

* [NOD-629] Rename K to KSize

* [NOD-629] Remove redundant casting

* [NOD-629] Add test for KSize

* [NOD-629] Seperate block serialization and db store

* [NOD-629] Make sure K is serialized as uint8

* [NOD-629] Rename KSize to KType

* [NOD-629] Comment for test

* [NOD-629] Change fail message

* [NOD-629] Remove newlines

* [NOD-629] Fix test

* [NOD-629] Do not use maxuint8, but !0 instead

* [NOD-629] Fix test

* [NOD-629] Merge conflict

* [NOD-629] Fix test; Update comment
2020-01-22 16:58:53 +02:00
Ori Newman
29bcc271b5 [NOD-652] Add selected tip and get selected tip messages (#595)
* [NOD-652] Add selectedTip and getSelectedTip messages

* [NOD-652] Remove peerSyncState.isSelectedTipKnown

* [NOD-652] Do nothing on OnSelectedTip if the peer selected tip hasn't changed

* [NOD-652] Handle selected tip message with block handler

* [NOD-652] Add comments

* [NOD-652] go mod tidy

* [NOD-652] Fix TestVersion

* [NOD-652] Use dag.AdjustedTime instead of dag.timeSource.AdjustedTime

* [NOD-652] Create shouldQueryPeerSelectedTips and queueMsgGetSelectedTip functions

* [NOD-652] Change selectedTip to selectedTipHash where needed

* [NOD-652] add minDAGTimeDelay constant

* [NOD-652] add comments

* [NOD-652] Fix names and comments

* [NOD-652] Put msg.reply push in the right place

* [NOD-652] Fix comments and names
2020-01-22 16:34:21 +02:00
stasatdaglabs
94ec159147 [NOD-676] Remove blockLocator defaults. (#596) 2020-01-22 14:05:01 +02:00
stasatdaglabs
9d434de4a5 [NOD-640] Revamp blueBlocksBetween to return up to maxEntries from lowNode's antiPast to highNode's antiFuture (#593)
* [NOD-640] Revamp blueBlocksBetween to return up to maxEntries from highNode's antiFuture.

* [NOD-640] Fix bad traversal.

* [NOD-640] Use more accurate len.

* [NOD-640] Use more appropriate len in another place.

* [NOD-640] Remove the whole business with highNode's anticone.

* [NOD-640] Rename highNodeAntiFuture to candidateNodes.

* [NOD-640] Explain the highNode.blueScore-lowNode.blueScore+1 approximation.

* [NOD-640] UpHeap -> upHeap.

* [NOD-640] Fix off-by-one error.

* [NOD-640] Rename blueBlocksBetween to antiPastBetween,

* [NOD-640] upHeap -> up-heap.

* [NOD-640] Use a classic for to populate nodes.

* [NOD-640] Reworded a comment.

* [NOD-640] Clarify a comment.

* [NOD-640] Fix nodes declaration.
2020-01-22 12:13:55 +02:00
stasatdaglabs
49418f4222 [NOD-669] Rename start/endHash -> low/highHash (#591)
* [NOD-669] Remove the "get" from getBlueBlocksBetween.

* [NOD-669] Remove the "Get" from GetBlueBlocksHeadersBetween.

* [NOD-669] In blueBlocksBetween, rename startHash to lowHash and stopHash to highHash.

* [NOD-669] Rename startHash to lowHash and stopHash to highHash in blockLocator logic.

* [NOD-669] Remove zeroHash logic in blockLocator.

* [NOD-669] Finish renaming startHash and stopHash in blockdag.

* [NOD-669] Rename startHash and stopHash in blockdag where I previously missed it.

* [NOD-669] Rename startHash and stopHash in blockdag where I previously missed it some more.

* [NOD-669] Rename startHash and stopHash in blockdag where I previously missed it some more some more.

* [NOD-669] Fix bad grammar in method names.

* [NOD-669] Rename lowHash to blockHash in SelectedParentChain.

* [NOD-669] Fix a comment.
2020-01-20 12:47:16 +02:00
stasatdaglabs
38b4749f20 [NOD-669] Fix startSync sending a blockLocatorMsg with a zeroHash insead of the peer's selectedTip (#592)
* [NOD-669] Fix startSync sending a blockLocatorMsg with a zeroHash instead of the peer's selectedTip.

* [NOD-669] Rename bestPeer to syncPeer.

* [NOD-669] Fix comments.
2020-01-20 12:29:17 +02:00
stasatdaglabs
045984e6b9 [NOD-665] Initialize blockNode blueScore to be MaxUint64 by default (#590)
* [NOD-665] Initialize blockNode blueScore to be MaxUint64 by default.

* [NOD-665] Remove redundant err declaration.
2020-01-19 15:21:43 +02:00
Ori Newman
38883d1a98 [NOD-650] Remove CPU miner from the node and add kaspaminer in ./cmd (#587)
* [NOD-650] Add kaspaminer

* [NOD-650] Remove CPU miner

* [NOD-650] Fix comments and error messages

* [NOD-650] Remove redundant check for closing foundBlock

* [NOD-650] Submit block synchronically

* [NOD-650] Use ParseUint instead of ParseInt

* [NOD-650] Rearrange functions order in mineloop.go

* [NOD-650] Add block delay CLI argument to kaspaminer

* [NOD-650] Remove redundant spawn

* [NOD-650] Add Dockerfile for kaspaminer

* [NOD-650] Remove redundant comments

* [NOD-650] Remove tests from kaspaminer Dockerfile

* [NOD-650] Remove redundant argument on OnFilteredBlockAdded
2020-01-19 15:18:26 +02:00
stasatdaglabs
b5f365d282 [NOD-668] Rename height to blueScore in OnFilteredBlockAdded. (#589) 2020-01-16 14:10:26 +02:00
stasatdaglabs
a7d3a40465 [NOD-646] Fix initDAGState treating invalid blocks as genesis blocks. (#588) 2020-01-16 13:21:20 +02:00
stasatdaglabs
359b16fca9 [NOD-616] Remove blockNode.chainHeight (#586)
* [NOD-616] Remove unused methods from BlockDAG.

* [NOD-616] Remove Height from GetRawMempoolVerboseResult and TxDesc.

* [NOD-616] Replaced BlockDAG.ChainHeight with SelectedTipBlueScore.

* [NOD-616] Remove the unused BlockChainHeightByHash.

* [NOD-616] Remove the unused blockChainHeight from checkBlockHeaderContext.

* [NOD-616] Remove chainHeight from util.Block.

* [NOD-616] Remove TestChainHeight.

* [NOD-616] Update unknown rule activation warning to use blueScore.

* [NOD-616] Update thresholdState to use blueScore instead of chainHeight.

* [NOD-616] Update blockLocator to use blueScore instead of chainHeight.

* [NOD-616] Remove blockNode.chainHeight.

* [NOD-616] Fix comments and variable names.

* [NOD-616] Replace a weird for loop with a while loop.

* [NOD-616] Fix a comment.

* [NOD-616] Remove pre-allocation in blockLocator.

* [NOD-616] Coalesce checks that startHash and stopHash are not the same into the same condition.

* [NOD-616] Fix a comment.

* [NOD-616] Remove weird blueScore logic around childHashStrings.

* [NOD-616] Fix hash pointer comparison.

* [NOD-616] Fix a comment.

* [NOD-616] Add ban score to peers misusing GetBlockLocator.

* [NOD-616] Replace adding ban score with disconnecting.

* [NOD-616] Add blueScore to FilteredBlockAddedNtfn.
2020-01-16 13:09:16 +02:00
Mike Zak
8b8e73feb5 Merge branch 'v0.1.1-dev' of github.com:kaspanet/kaspad into v0.1.1-dev 2020-01-13 16:59:40 +02:00
Svarog
6044b6ac1a [NOD-643] Remove monkey patch (#585)
* [NOD-643] Removed any mentions of monkey.Patch

* [NOD-643] Removed monkey from go.mod
2020-01-13 16:59:16 +02:00
stasatdaglabs
a177ea4f15 [NOD-555] Remove build scripts. (#581) 2020-01-13 13:37:21 +02:00
Mike Zak
3a15aa4bae Update version to v0.1.1 2020-01-13 12:24:36 +02:00
Ori Newman
427185b6a8 [NOD-635] Change testnet maximum difficulty (#582) 2020-01-09 18:10:23 +02:00
Svarog
b282734a3f [NOD-626] Delete CHANGES file (#580) 2020-01-08 18:43:50 +02:00
Ori Newman
6d765f58ba [NOD-570] Separate genesis variables for different netwroks (#578)
* [NOD-570] Separate genesis variables for different netwroks

* [NOD-570] Make Testnet genesis

* [NOD-570] Make simnet and regtest genesis

* [NOD-570] Remake devnet genesis

* [NOD-570] Rename regNet -> regTest testnet->testNet

* [NOD-570] Change network names to one word instead of camel case

* [NOD-570] Change network names to one word instead of camel case

* [NOD-570] Fix test names

* [NOD-570] Fix TestGHOSTDAG

Co-authored-by: Dan Aharoni <dereeno@protonmail.com>
2020-01-08 18:42:47 +02:00
Svarog
20819ca4cd [NOD-505] Try reading the response in case of upnp error (#576) 2020-01-08 17:51:31 +02:00
Ori Newman
2174a0a7f2 [NOD-497] Implement GHOSTDAG (#575)
* [NOD-540] Implement reachability (#545)

* [NOD-540] Begin implementing reachability.

* [NOD-540] Finish implementing reachability.

* [NOD-540] Implement TestIsFutureBlock.

* [NOD-540] Implement TestInsertFutureBlock.

* [NOD-540] Add comments.

* [NOD-540] Add comment for interval in blockNode.

* [NOD-540] Updated comments over insertFutureBlock and isFutureBlock.

* [NOD-540] Implement interval splitting methods.

* [NOD-540] Begin implementing tree manipulation in blockNode.

* [NOD-540] Implement countSubtreesUp.

* [NOD-540] Add a comment explaining an impossible condition.

* [NOD-540] Implement applyIntervalDown.

* [NOD-540] Moved the reachability tree stuff into reachability.go.

* [NOD-540] Add some comments.

* [NOD-540] Add more comments, implement isInPast.

* [NOD-540] Fix comments.

* [NOD-540] Implement TestSplitFraction.

* [NOD-540] Implement TestSplitExact.

* [NOD-540] Implement TestSplit.

* [NOD-540] Add comments to structs.

* [NOD-540] Implement TestAddTreeChild.

* [NOD-540] Fix a comment.

* [NOD-540] Rename isInPast to isAncestorOf.

* [NOD-540] Rename futureBlocks to futureCoveringSet.

* [NOD-540] Rename isFutureBlock to isInFuture.

* [NOD-540] move reachabilityInterval to the top of reachability.go.

* [NOD-540] Change "s.t." to "such that" in a comment.

* [NOD-540] Fix indentation.

* [NOD-540] Fix a potential bug involving float inaccuracy.

* [NOD-540] Wrote a more descriptive error message.

* [NOD-540] Fix error messsage.

* [NOD-540] Fix the recursive countSubtreesUp.

* [NOD-540] Rename countSubtreesUp to countSubtrees and applyIntervalDown to propagateInterval.

* [NOD-540] Implement updating reachability for a valid new block.

* [NOD-540] Implement a disk storage for reachability data.

* [NOD-540] Fix not all tree nodes being written to the database.

* [NOD-540] Implement serialization for reachabilityData.

* [NOD-540] Implement some deserialization for reachabilityData.

* [NOD-540] Implement restoring the reachabilityStore on node restart.

* [NOD-540] Made interval and remainingInterval pointers.

* [NOD-540] Rename setTreeInterval to setInterval.

* [NOD-540] Rename reindexTreeIntervals to reindexIntervals and fixed the comment above it.

* [NOD-540] Expand the comment above reindexIntervals.

* [NOD-540] Fix comment above countSubtrees.

* [NOD-540] Fix comment above countSubtrees some more.

* [NOD-540] Fix comment above split.

* [NOD-540] Fix comment above isAncestorOf.

* [NOD-540] Fix comment above reachabilityTreeNode.

* [NOD-540] Fix weird condition in addTreeChild.

* [NOD-540] Rename addTreeChild to addChild.

* [NOD-540] Fix weird condition in splitFraction.

* [NOD-540] Reverse the lines in reachabilityTreeNode.String().

* [NOD-540] Renamed f to fraction and x to size.

* [NOD-540] Fix comment above bisect.

* [NOD-540] Implement rtn.isAncestorOf().

* [NOD-540] Use treeNode isAncestorOf instead of treeInterval isAncestorOf.

* [NOD-540] Use newReachabilityInterval instead of struct initialization.

* [NOD-540] Make reachabilityTreeNode.String() use strings.Join.

* [NOD-540] Use sync.RWMutex instead of locks.PriorityMutex.

* [NOD-540] Rename thisTreeNode to newTreeNode.

* [NOD-540] Rename setTreeNode to addTreeNode.

* [NOD-540] Extracted selectedParentAnticone to a separate function.

* [NOD-540] Rename node to this.

* [NOD-540] Move updateReachability and isAncestorOf from dag.go to reachability.go.

* [NOD-540] Add whitespace after multiline function signatures in reachability.go.

* [NOD-540] Make splitFraction return an error on empty interval.

* [NOD-540] Add a comment about rounding to splitFraction.

* [NOD-540] Replace sneaky tabs with spaces.

* [NOD-540] Rename split to splitExponential.

* [NOD-540] Extract exponentialFractions to a separate function.

* [NOD-540] Rename bisect to findIndex.

* [NOD-540] Add call to reachabilityStore.clearDirtyEntries at the end of saveChangesFromBlock.

* [NOD-540] Explain the dirty hack in reachabilityStore.init().

* [NOD-540] Split the function signature for deserializeReachabilityData to two lines.

* [NOD-540] Add a comment about float precision loss to exponentialFractions.

* [NOD-540] Corrected a comment about float precision loss to exponentialFractions.

* [NOD-540] Fixed a comment about float precision loss to exponentialFractions some more.

* [NOD-540] Added further comments above futureCoveringBlockSet.

* [NOD-540] Rename addTreeNode to setTreeNode.

* [NOD-540] Rename splitExponential to splitWithExponentialBias.

* [NOD-540] Fix object references in reachabilityData deserialization (#563)

* [NOD-540] Fix broken references in deserialization.

* [NOD-540] Fix broken references in futureCoveringSet deserialization. Also add comments.

* [NOD-540] Don't deserialize on the first pass in reachabilityStore.init().

* [NOD-540] Remove redundant assignment to loaded[hash].

* [NOD-540] Use NewHash instead of SetBytes. Rename data to destination.

* [NOD-540] Preallocate futureCoveringSet.

* [NOD-541] Implement GHOSTDAG (#560)

* [NOD-541] Implement GHOSTDAG

* [NOD-541] Replace the old PHANTOM variant with GHOSTDAG

* [NOD-541] Move dag.updateReachability to the top of dag.applyDAGChanges to update reachability before the virtual block is updated

* [NOD-541] Fix blueAnticoneSize

* [NOD-541] Initialize node.bluesAnticoneSizes

* [NOD-541] Fix pastUTXO and applyBlueBlocks blues order

* [NOD-541] Add serialization logic to node.bluesAnticoneSizes

* [NOD-541] Fix GHOSTDAG to not count the new block and the blue candidates anticone, add selected parent to blues, and save to node.bluesAnticoneSizes properly

* [NOD-541] Fix test names in inner strings

* [NOD-541] Writing TestGHOSTDAG

* [NOD-541] In blueAnticoneSize change node->current

* [NOD-541] name ghostdag return values

* [NOD-541] fix ghostdag to return slice

* [NOD-541] Split k-cluster violation rules

* [NOD-541] Add missing space

* [NOD-541] Add comment to ghostdag

* [NOD-541] In selectedParentAnticone rename past->selectedParentPast

* [NOD-541] Fix misrefernces to TestChainUpdates

* [NOD-541] Fix ghostdag comment

* [NOD-541] Make PrepareBlockForTest in blockdag package

* [NOD-541] Make PrepareBlockForTest in blockdag package

* [NOD-541] Assign to selectedParentAnticone[i] instead of appending

* [NOD-541] Remove redundant forceTransactions arguments from PrepareBlockForTEST

* [NOD-541] Add non-selected parents to anticoneHeap

* [NOD-541] add test for ghostdag

* [NOD-541] Add comments

* [NOD-541] Use adjusted time for initializing blockNode

* [NOD-541] Rename isAncestorOf -> isAncestorOfBlueCandidate

* [NOD-541] Remove params from PrepareBlockForTest

* [NOD-541] Fix TestChainHeight

* [NOD-541] Remove recursive lock

* [NOD-541] Fix TestTxIndexConnectBlock

* [NOD-541] Fix TestBlueBlockWindow

* [NOD-541] Put prepareAndProcessBlock in common_test.go

* [NOD-541] Fix TestConfirmations

* [NOD-541] Fix TestAcceptingBlock

* [NOD-541] Fix TestDifficulty

* [NOD-541] Fix TestVirtualBlock

* [NOD-541] Fix TestSelectedPath

* [NOD-541] Fix TestChainUpdates

* [NOD-541] Shorten TestDifficulty test time

* [NOD-541] Make PrepareBlockForTest use minimal valid block time

* [NOD-541] Remove TODO comment

* [NOD-541] Move blockdag related mining functions to mining.go

* [NOD-541] Use NextBlockCoinbaseTransaction instead of NextBlockCoinbaseTransactionNoLock in NextCoinbaseFromAddress

* [NOD-541] Remove useMinimalTime from BlockForMining

* [NOD-541] Make MedianAdjustedTime a *BlockDAG method

* [NOD-541] Fix ghostdag to use anticone slice instead of heap

* [NOD-541] Fix NewBlockTemplate locks

* [NOD-541] Fix ghostdag comments

* [NOD-541] Convert MedianAdjustedTime to NextBlockTime

* [NOD-541] Fix ghostdag comment

* [NOD-541] Fix TestGHOSTDAG comment

* [NOD-541] Add comment before sanity check

* [NOD-541] Explicitly initialize .blues in ghostdag

* [NOD-541] Rename *blockNode.lessThan to *blockNode.less

* [NOD-541] Remove redundant check if block != chainBlock

* [NOD-541] Fix comment

* [NOD-541] Fix comment

* [NOD-497] Add comment; General refactoring

* [NOD-497] General refactoring.

* [NOD-497] Use isAncestor of the tree rather than the node

* [NOD-497] Remove reachability mutex lock as it is redundant (dag lock is held so no need); General refactoring.

* [NOD-497] Update comment

* [NOD-497] Undo test blocktimestamp

* [NOD-497] Update comments; Use BlockNode.less for blockset;

* [NOD-497] Change processBlock to return boolean and not the delay duration (merge conflict)

* [NOD-497] Undo change for bluest to use less; Change blocknode less to use daghash.Less

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
Co-authored-by: Dan Aharoni <dereeno@protonmail.com>
2020-01-08 17:41:22 +02:00
Dan Aharoni
ea6f7a28c2 [NOD-420] Process delayed blocks (#529)
* [NOD-420] Delay blocks with valid timestamp (non-delayed) that point to a delayed block.

* [NOD-420] Mark block as requested when setting as delayed.

* [NOD-420] Merge master; Use dag.timeSource.AdjustedTime() instead of time.Now;

* [NOD-420] Return nil when not expecting an error

* [NOD-420] Initialise delyaed blocks mapping

* [NOD-420] Trigger delayed blocks processing every time we process a block.

* [NOD-420] Hold the read lock in processDelayedBlocks

* [NOD-420] Add delayed blocks heap sorted by their process time so we could process them in order.

* [NOD-420] Update debug log

* [NOD-420] Fix process blocks loop

* [NOD-420] Add comment

* [NOD-420] Log error message

* [NOD-420] Implement peek method for delayed block heap. extract delayed block processing to another  function.

* [NOD-420] Trigger process delayed blocks only in process block

* [NOD-420] Move delayed block addition to process block

* [NOD-420] Use process block to make sure we fully process the delayed block and deal with orphans.

* [NOD-420] Unexport functions when not needed; Return isDelayed boolean from ProcessBlock instead of the delay duration

* [NOd-420] Remove redundant delayedBlocksLock

* [NOD-420] Resolve merge conflict; Return delay 0 instead of boolean

* [NOD-420] Do not treat delayed block as orphan

* [NOD-420] Make sure block is not processed if we have already sa delayed.

* [NOD-420] Process delayed block if parent is delayed to make sure it would not be treated as orphan.

* [NOD-420] Rename variable

* [NOD-420] Rename function. Move maxDelayOfParents to process.go

* [NOD-420] Fix typo

* [NOD-420] Handle errors from processDelayedBlocks properly

* [NOD-420] Return default values if err != nil from dag.addDelayedBlock

* [NOD-420] Return default values if err != nil from dag.addDelayedBlock in another place

Co-authored-by: Svarog <feanorr@gmail.com>
2020-01-08 15:28:52 +02:00
Svarog
ac9aa74a75 [NOD-549] Use version.Version anywhere relevant + update related tests (#577)
* [NOD-549] Update version to 0.1.0 and allow injection of appBuild

* [NOD-549] Fixed peer tests

* [NOD-549] Fixed wire tests

* [NOD-549] Remove any mention of semVer.

* [NOD-549] Don't include appBuild at all if it includes invalid characters

* [NOD-549] Panic if appBuild contains invalid characters

* [NOD-549] Move checkAppBuild into

* [NOD-549] Update comment
2020-01-08 12:14:59 +02:00
Ori Newman
d46857677f [NOD-503] Remove Tor functionality (#573) 2020-01-08 10:45:27 +02:00
stasatdaglabs
cd719b1d5b [NOD-546] Report build failures to Discord instead of Telegram (#572)
* [NOD-546] Report build failures to Discord instead of Telegram.

* [NOD-546] Make a temporary compilation error.

* [NOD-546] Make a couple of temporary print outs.

* [NOD-546] Remove temporary debug stuff.

* [NOD-546] Make notify_discord() return early if Discord variables are not set.
2020-01-06 16:36:21 +02:00
Svarog
7cf15ac93b [NOD-549] Update version to 0.1.0 and allow injection of appBuild (#568)
* [NOD-549] Update version to 0.1.0 and allow injection of appBuild

* [NOD-549] Fixed peer tests

* [NOD-549] Fixed wire tests

* [NOD-549] Remove any mention of semVer.

* [NOD-549] Don't include appBuild at all if it includes invalid characters

* [NOD-549] Panic if appBuild contains invalid characters

* [NOD-549] Move checkAppBuild into
2020-01-06 15:30:00 +02:00
Svarog
d8e3191469 [NOD-619] Disable tor related cli flags (#571)
* [NOD-619] Disable tor related cli flags

* [NOD-619] Disabled --proxy and related flags

* [NOD-619] Added missing space
2020-01-06 14:50:32 +02:00
Svarog
784d3de4ca [NOD-618] Removed outdated docs folder (#570) 2020-01-06 10:48:41 +02:00
Ori Newman
733d06af5a [NOD-617] Remove test coverage from test.sh (#569)
* [NOD-617] Remove test coverage from test.sh

* [NOD-617] Log coverage
2020-01-06 10:31:09 +02:00
stasatdaglabs
df91643976 [NOD-608] Make the user agent read the version from the version package. (#566) 2020-01-05 16:47:58 +02:00
stasatdaglabs
ebf635e6ff [NOD-559] Remove cmd/genaddr. (#567) 2020-01-05 11:41:14 +02:00
stasatdaglabs
e41d9866c3 [NOD-613] Fix concurrent access to ecmh cache (#565)
* [NOD-613] Fix concurrent access to ecmh cache.

* [NOD-613] Localized dag.Lock().
2020-01-02 18:03:25 +02:00
stasatdaglabs
d984151549 [NOD-517] Update doc.go files (#559)
* [NOD-517] Remove copyright notices from all doc.go.

* [NOD-517] Updated the root doc.go.

* [NOD-517] Remove all cov_report.sh and test_coverage.txt.

* [NOD-517] Make all doc.go use the same style of comment.

* [NOD-517] Update dagconfig doc.go.

* [NOD-517] Update blockdag doc.go.

* [NOD-517] Update doc.go in connmgr.

* [NOD-517] Update doc.go in fullblocktests.

* [NOD-517] Update doc.go in database.

* [NOD-517] Update doc.go in ecc.

* [NOD-517] Update doc.go in rpctest.

* [NOD-517] Removed superfluous license in logs.

* [NOD-517] Update doc.go in mempool.

* [NOD-517] Updated doc.go in peer.

* [NOD-517] Update doc.go in rpcclient.

* [NOD-517] Update doc.go in txscript.

* [NOD-517] Update doc.go in util.

* [NOD-517] Update doc.go in base58.

* [NOD-517] Update doc.go in bech32.

* [NOD-517] Update doc.go in txsort.

* [NOD-517] Update doc.go in wire.

* [NOD-517] Fix indentation.

* [NOD-517] Add a copyright notice to the main doc.go.

* [NOD-517] Add Conformal to the license notices.

* [NOD-517] Remove superfluous language from a doc.

* [NOD-517] Fix bad example.
2020-01-02 16:57:43 +02:00
stasatdaglabs
6099ce56bd [NOD-5] Remove TestFullBlocks and package fullblocktests (#564) 2019-12-31 16:16:10 +02:00
Svarog
e0b5c145f7 [NOD-543] Added dnsseeds to testnet and mainnet (#562) 2019-12-31 10:57:43 +02:00
Ori Newman
cf37f733ef [NOD-601] Omit nil selected parent in GetBlockVerboseResult (#561) 2019-12-30 18:44:17 +02:00
Ori Newman
66a92a243c [NOD-591] Add selected parent to GetBlockVerboseResult (#558)
* [NOD-591] Add selected parent to GetBlockVerboseResult

* [NOD-591] Add selected parent to GetBlockHeaderResult
2019-12-29 12:46:35 +02:00
Ori Newman
4a88eea57e [NOD-590] Export newLogClosure (#557) 2019-12-26 18:26:22 +02:00
aspect
fbaf360a42 Fix missing pkg/errors reference in windows-only file (#547)
* Fixing missing pkg/errors reference

* go fmt pass
2019-12-25 15:50:43 +02:00
Svarog
1346810af8 [NOD-583] Move InterruptListener() to beggining of main (#555) 2019-12-25 13:14:28 +02:00
Svarog
9cbab94264 [NOD-579] Remove erroneous dependancy on github.com/prometheus/common/log (#554) 2019-12-25 11:54:36 +02:00
Svarog
48f29cc11f [NOD-401] Allow to pass PrefixUnknown to ParseAddress + add .Prefix() to addresses (#553)
* [NOD-401] Created CLI-Wallet base structure and new command

* [NOD-401] Switched to go-flags sub-command parsing

* [NOD-401] Added config for all sub-commands

* [NOD-401] Work in progress for send command in cli-wallet

* [NOD-401] Allow to pass PrefixUnknown to ParseAddress + add .Prefix() to addresses

* [NOD-401] Finished implementing all wallet commands

* [NOD-401] some refactorings to sendTx

* [NOD-401] Moved wallet to kasparov repo + updated tests with new prefixes
2019-12-24 15:38:47 +02:00
Ori Newman
e2b57e6231 [NOD-566] Do not accept coinbase transaction from the selected parent anticone (#552) 2019-12-19 18:05:55 +02:00
Ori Newman
f72afc8bbb [NOD-499] Change ports and network magics (#550)
* [NOD-499] Change network magics

* [NOD-499] Change default rpc ports

* [NOD-499] Change default p2p ports

* [NOD-499] Change port 18333 to 10433 everywhere

* [NOD-499] Change port 8333 to 10333 everywhere

* [NOD-499] Fix TestElementWire

* [NOD-499] Fix tests

* [NOD-499] Change port 10333->16111 and 10332->16110

* [NOD-499] Change port 10433->16211 and 10432->16210

* [NOD-499] Change port 10633->16511 and 10632->16510

* [NOD-499] Change port 10533->16611 and 10532->16610
2019-12-18 16:18:37 +02:00
stasatdaglabs
0d1f447cb7 [NOD-510] Fix comments so that they don't mention bitcoin. (#551) 2019-12-18 13:46:32 +02:00
Svarog
818f8c93eb [NOD-551] Move httpserverutils to kasparov (#549)
* [NOD-511] Move httpserverutils to kasparov repository
2019-12-18 12:51:05 +02:00
Svarog
264ffaae93 [NOD-495] Move out non-kaspad apps (#548)
* [NOD-495] Remove txgen to separate repository

* [NOD-495] Remove DNSSeeder to separate repository

* [NOD-495] Remove kasparov to separate repository

* [NOD-495] Remove miningsimulator to separate repository

* [NOD-495] httpserverutils should use kaspad logger package

* [NOD-495] Remove faucet to separate repository

* [NOD-495] httpserverutils should use kasparov logger
2019-12-18 12:14:07 +02:00
stasatdaglabs
03b7af9a13 [NOD-532] Replace "chain" with "DAG" where appropriate (#537)
* [NOD-532] Change chain to DAG in the root package.

* [NOD-532] Change chain to DAG in checkpoints.go.

* [NOD-532] Change chain to DAG in blockdag.

* [NOD-532] Change chain to DAG in cmd.

* [NOD-532] Change chain to DAG in dagconfig.

* [NOD-532] Change chain to DAG in database.

* [NOD-532] Change chain to DAG in mempool.

* [NOD-532] Change chain to DAG in mempool.

* [NOD-532] Change chain to DAG in netsync.

* [NOD-532] Change chain to DAG in rpcclient.

* [NOD-532] Change chain to DAG in server.

* [NOD-532] Change chain to DAG in txscript.

* [NOD-532] Change chain to DAG in util.

* [NOD-532] Change chain to DAG in wire.

* [NOD-532] Remove block heights in dagio.go examples.

* [NOD-532] Rename fakeChain to fakeDAG.

* [NOD-532] Fix comments, remove unused EnableBCInfoHacks flag.

* [NOD-532] Fix comments and variable names.

* [NOD-532] Fix comments.

* [NOD-532] Fix merge errors.

* [NOD-532] Formatted project.
2019-12-17 13:40:03 +02:00
Ori Newman
e3d7e83d44 [NOD-548] Remove default dns seed from devnet (#546)
* [NOD-390] Add faucet Dockerfile

* [NOD-390] Allow running migration without -api-server-url and --private-key arguments

* [NOD-390] Change kasparov-server to kasparovd in its Dockerfile

* [NOD-548] Remove default DNS seed from devnet
2019-12-17 12:26:27 +02:00
Ori Newman
07651e51c8 [NOD-390] Add faucet dockerfile (#544)
* [NOD-390] Add faucet Dockerfile

* [NOD-390] Allow running migration without -api-server-url and --private-key arguments

* [NOD-390] Change kasparov-server to kasparovd in its Dockerfile

* [NOD-390] Change API server and Kasparov server to kasparovd
2019-12-17 11:10:59 +02:00
Svarog
1cd2eb9308 [NOD-494] Update readmes (#543)
* [NOD-494] Updated main README.md

* [NOD-494] Updated blockdag/README.md

* [NOD-494] Aligned text length in main README.md

* [NOD-494] Updated most remaining packages READMEs + deleted util/coinset

* [NOD-494] Update integration README

* [NOD-494] Did a final pass over all readmes

* [NOD-494] Updated README for DNSSeeder with more info on how to create a functioning setup

* [NOD-494] Remove all double spaces from readmes

* [NOD-494] Minor fixes in READMEs + update license to kaspanet developers

* [NOD-494] Add backtick around ecc and util in hdkeychain README
2019-12-16 17:37:17 +02:00
stasatdaglabs
a140327dd2 [NOD-500] Remove checkpoints (#541)
* [NOD-502] Remove checkpoints.

* [NOD-502] Remove remaining references to checkpoints.

* [NOD-500] Split RejectFinality to RejectDifficulty.

* [NOD-500] Remove support for headers-first in p2p.

* [NOD-500] Panic in newHashFromStr in case of an error.
2019-12-16 17:22:10 +02:00
stasatdaglabs
c1f7ae72e0 [NOD-514] Change dagcoin to kaspa, dagtest to kaspatest, dagreg to kaspareg, and dagsim to kaspasim (#538)
* [NOD-514] Change dagcoin to kaspa, dagtest to kaspatest, etc.

* [NOD-514] Remove no-longer-relevant link in doc.
2019-12-15 18:24:15 +02:00
stasatdaglabs
3a12fe9b1d [NOD-516] Remove cmd/genesis. (#542) 2019-12-15 14:50:04 +02:00
stasatdaglabs
c25c9b25bd [NOD-502] Remove RPCQuirks. (#540) 2019-12-15 14:49:22 +02:00
stasatdaglabs
f46dec449d [NOD-510] Change all references to Bitcoin to Kaspa (#531)
* [NOD-510] Change coinbase flags to kaspad.

* [NOD-510] Removed superfluous spaces after periods in comments.

* [NOD-510] Rename btcd -> kaspad in the root folder.

* [NOD-510] Rename BtcEncode -> KaspaEncode and BtcDecode -> KaspaDecode.

* [NOD-510] Rename BtcEncode -> KaspaEncode and BtcDecode -> KaspaDecode.

* [NOD-510] Continue renaming btcd -> kaspad.

* [NOD-510] Rename btcjson -> kaspajson.

* [NOD-510] Rename file names inside kaspajson.

* [NOD-510] Rename kaspajson -> jsonrpc.

* [NOD-510] Finish renaming in addrmgr.

* [NOD-510] Rename package btcec to ecc.

* [NOD-510] Finish renaming stuff in blockdag.

* [NOD-510] Rename stuff in cmd.

* [NOD-510] Rename stuff in config.

* [NOD-510] Rename stuff in connmgr.

* [NOD-510] Rename stuff in dagconfig.

* [NOD-510] Rename stuff in database.

* [NOD-510] Rename stuff in docker.

* [NOD-510] Rename stuff in integration.

* [NOD-510] Rename jsonrpc to rpcmodel.

* [NOD-510] Rename stuff in limits.

* [NOD-510] Rename stuff in logger.

* [NOD-510] Rename stuff in mempool.

* [NOD-510] Rename stuff in mining.

* [NOD-510] Rename stuff in netsync.

* [NOD-510] Rename stuff in peer.

* [NOD-510] Rename stuff in release.

* [NOD-510] Rename stuff in rpcclient.

* [NOD-510] Rename stuff in server.

* [NOD-510] Rename stuff in signal.

* [NOD-510] Rename stuff in txscript.

* [NOD-510] Rename stuff in util.

* [NOD-510] Rename stuff in wire.

* [NOD-510] Fix failing tests.

* [NOD-510] Fix merge errors.

* [NOD-510] Fix go vet errors.

* [NOD-510] Remove merged file that's no longer relevant.

* [NOD-510] Add a comment above Op0.

* [NOD-510] Fix some comments referencing Bitcoin Core.

* [NOD-510] Fix some more comments referencing Bitcoin Core.

* [NOD-510] Fix bitcoin -> kaspa.

* [NOD-510] Fix more bitcoin -> kaspa.

* [NOD-510] Fix comments, remove DisconnectBlock in addrindex.

* [NOD-510] Rename KSPD to KASD.

* [NOD-510] Fix comments and user agent.
2019-12-12 15:21:41 +02:00
Ori Newman
60ab6330ff [NOD-521] Add blocks to Kasparov DB immediately after getblocks request (#532) 2019-12-12 10:14:37 +02:00
Dan Aharoni
89dee3e005 [NOD-533] Kaspad renaming in docker (#536)
* [NOD-533] Rename kasparov folder leftovers

* [NOD-533] Rename btcd to kaspad

* [NOD-533] Fix folder name

* [NOD-533] Add file name
2019-12-12 10:09:57 +02:00
Dan Aharoni
70d7009985 [NOD-533] Rename kasparov folder leftovers (#535) 2019-12-11 16:43:39 +02:00
Dan Aharoni
3322a892e9 [NOD-531] Add migrations to Kasparov docker files (#534)
* [NOD-531] Add migrations to kasparov docker files

* [NOD-531] Remove newline
2019-12-11 13:04:46 +02:00
stasatdaglabs
61d066e958 [NOD-525] Rename kasparovsync to kasparovsyncd. (#533) 2019-12-11 12:35:04 +02:00
Svarog
7b9ffc6c25 [NOD-525] renamed folders server and syncd to kasparovd and kasparovsync respectively (#530)
* [NOD-525] renamed folders server and syncd to kasparovserver and kasparovsync respectively

* [NOD-525] Fixed references to kasparov sub-apps

* [NOD-525] Renamed kasparovserver -> kasparovd
2019-12-11 10:28:21 +02:00
Ori Newman
7a163d4dd7 [NOD-471] Make AddTx return false for duplicate coinbase, and make pastUTXO return accepted transaction with the accepting block blue score (#523)
* [NOD-471] Make AddTx return false for duplicate coinbase, and make pastUTXO return accepted transaction with the accepting block blue score

* [NOD-471] Remove diffFromAcceptanceData

* [NOD-471] Make fetchBlueBlocks return also selected parent

* [NOD-471] Skip adding coinbase transactions on applyBlueBlocks

* [NOD-471] Use tx.IsCoinbase() instead of i == util.CoinbaseTransactionIndex
2019-12-10 14:02:10 +02:00
Svarog
189a3380a2 [NOD-340] Remove unimplemented RPC commands: getNetworkHashPS, estimatePriority, getChainTips, invalidateBlock, preciousBlock, reconsiderBlock (#528)
* [NOD-340] Remove GetNetworkHashPS cmd

* [NOD-340] Removed unimplemented commands: estimatePriority, getChainTips, invalidateBlock, preciousBlock, reconsiderBlock

* [NOD-340] Apply gofmt
2019-12-10 12:58:26 +02:00
Dan Aharoni
8680231e5a [NOD-339] Remove cfilters and cfindex (#527)
* [NOD-339] Remove cfilters and cfindex

* [NOD-339] Remove some leftovers
2019-12-10 10:13:49 +02:00
stasatdaglabs
30f0e95969 [NOD-506] Remove blockNode.height and any references to it (#522)
* [NOD-506] Remove blockNode.height.

* [NOD-506] Use blueScore instead of chainHeight in validateParents.
2019-12-09 14:27:53 +02:00
Dan Aharoni
c94becf144 [NOD-498] Disable the option to choose mainnet and activeNet. (#525) 2019-12-08 18:33:04 +02:00
Svarog
369ec449a8 [NOD-509] Change organization name to kaspanet (#524)
* [NOD-509] Change organization name to kaspanet

* [NOD-509] Reorganize imports
2019-12-08 17:33:42 +02:00
Svarog
f4c6859e51 [NOD-509] Updated repository and imports to github.com/daglabs/kaspad (#521) 2019-12-08 16:28:53 +02:00
stasatdaglabs
683dd52fcf [NOD-492] Split API-Server to syncer and frontend applications (#519)
* [NOD-492] Split ApiServer to server and syncd.

* [NOD-492] Add missing file.

* [NOD-492] Remove references to --migrate from common config.

* [NOD-492] Move MQTT to the sync daemon.

* [NOD-492] Fix server Dockerfile and create one for syncd.

* [NOD-492] Rename ApiServer to Kasparov.

* [NOD-492] Fix packages.

* [NOD-492] Fix more packages.

* [NOD-492] Fix comments and package names.

* [NOD-492] Move blank import packages to main.

* [NOD-492] Move common logging logic out of individual config.go files.

* [NOD-492] Move database models to a package called dbmodels.

* [NOD-492] Rename models package to apimodels.
2019-12-08 14:38:47 +02:00
stasatdaglabs
11e936d109 [NOD-493] Make GetBlock return an appropriate error if the hash is known to be of an invalid block. (#520) 2019-12-08 14:05:56 +02:00
stasatdaglabs
9adb105e37 [NOD-487] Implement a mechanism to gracefully shut down after a panic (#512)
* [NOD-487] Implement a mechanism to gracefully shut down after a panic.

* [NOD-487] Fixed bad log.

* [NOD-487] Removed unused import.

* [NOD-487] Convert panic handlers from anonymous functions to methods.
2019-12-05 12:29:39 +02:00
stasatdaglabs
7b6ed9a778 [NOD-412] Remove run-dev.sh-related stuff. (#518) 2019-12-04 18:35:50 +02:00
Ori Newman
3218fc5a04 [NOD-488] Make getBlueBlocksBetween return error if startHash is not in selected parent chain of stopHash (#514)
* [NOD-488] Make getBlueBlocksBetween return error if start hash is not in the selected parent chain of stop hash

* [NOD-488] Convert for to while style
2019-12-04 17:52:28 +02:00
Ori Newman
3f94f8ca4c [NOD-491] Withhold blocks in mining simulator (#517) 2019-12-04 17:36:27 +02:00
Ori Newman
0842778c2c [NOD-485] Remove redundant argument from saveChangesFromBlock (#516) 2019-12-04 17:24:20 +02:00
Ori Newman
1332e1aa68 [NOD-479] Fix unorphaning in API server initial sync (#513) 2019-12-04 17:06:20 +02:00
Ori Newman
e872ebc7b3 [NOD-417] Remove mempool and alert messages (#515) 2019-12-04 10:43:50 +02:00
Svarog
e68b242243 [NOD-489] Don't skip notification about transactions for orphan/non-current blocks (#511) 2019-12-03 14:18:32 +02:00
Ori Newman
9cc2a7260b [NOD-479] Separate max outbound connections and max inbound connections (#509)
* [NOD-479] Separate max outbound connections and max inbound connections

* [NOD-479] Fix merge

* [NOD-479] Renames and add function countinboundPeers

* [NOD-479] Remove redundant check on maximum outbound peers

* [NOD-479] Rename countinboundPeers -> countInboundPeers
2019-12-03 12:27:49 +02:00
Ori Newman
bcd73012de [NOD-428] Require RPC user and password, and do not create a default config file for btcctl if rpc login details were provided (#510)
* [NOD-428] Required RPC user and password, and do not create a default config file for btcctl if rpc login details were provided

* [NOD-428] Don't check rpc user and password if rpc is disabled

* [NOD-428] Fix error message
2019-12-03 11:18:28 +02:00
Dan Aharoni
1fea2a9421 [NOD-486] API Server TX posting: Forward error when RPC Error is received (#507)
* [NOD-486] Forward error when RPC Error is recieved

* [NOD-486] Rename variable

* [NOD-486] Rename variable

* [NOD-486] Rename Variable (again)
2019-12-02 18:44:39 +02:00
stasatdaglabs
bb7d68deda [NOD-484] Fix deadlock between p2p server and sync manager during shutdown (#508)
* [NOD-484] Fix deadlock between p2p server and sync manager during shutdown.

* [NOD-484] Fix quitWaitGroup.Wait() potentially not waiting in some scenarios.

* [NOD-484] Add a comment explaining quitWaitGroup.

* [NOD-484] Fix typo.

* [NOD-484] Add etc to comment.
2019-12-02 18:08:32 +02:00
Ori Newman
3ab861227d [NOD-443] Fix API server unorphaning (#501)
* [NOD-443] Immediately request missing parents in API server sync

* [NOD-443] Add rpc client log to api server

* [NOD-443] Fix wrong ordering of pendingHeaders queue

* [NOD-443] Fix error comparision at TestNewHashFromStr

* [NOD-443] Make a separate handleMissingParentHeaders function

* [NOD-443] Change log level

* [NOD-443] Put handleMissingParentHeaders next to handleBlockHeader

* [NOD-443] Make handleBlockAddedMsg function

* [NOD-443] Make reusable missingParentsHashesStr string

* [NOD-443] Remove redundant 's'

* [NOD-443] Refactor to first get all blocks and then add them to database

* [NOD-443] Rename variables and functions, and remove redundant logs

* [NOD-443] Make fetchBlockAndMissingAncestors use block hash as an argument

* [NOD-443] Add log only for first orphan block

* [NOD-443] Fix wrong order of adding blocks to pendingBlocks

* [NOD-443] Write logs for all orphans

* [NOD-443] Log only missing parents that are not already fetched

* [NOD-443] Rename rawVerboseBlockTuple -> rawVerboseBlock

* [NOD-443] Make fetchBlock return *rawVerboseBlock

* [NOD-443] Rename rawVerboseBlock -> rawAndVerboseBlock
2019-12-02 13:29:28 +02:00
stasatdaglabs
8f0d98ef9b [NOD-464] Fix error messages in GetBlocks. (#506) 2019-12-02 13:05:56 +02:00
Svarog
dbd8bf3d2c [NOD-478] Add buffer to newOutboundConnection channel (#502) 2019-12-01 17:58:05 +02:00
Dan Aharoni
1b6b02e0d2 [NOD-475] Return error when requesting limit=0 (#505) 2019-12-01 17:25:27 +02:00
Ori Newman
2402bae1ff [NOD-410] Add log level CLI argument to API server (#503)
* [NOD-410] Add log level CLI argument to API server

* [NOD-410] Add comments

* [NOD-410] Remove pre-allocation of one item
2019-12-01 17:24:12 +02:00
Dan Aharoni
3dcf8d88b8 [NOD-481] Fix /blocks limit error message for api server (#504) 2019-12-01 16:19:14 +02:00
stasatdaglabs
dbf9c09a2e [NOD-461] Fix error code and message in GetTransactionsByAddressHandler. (#499) 2019-11-28 17:32:34 +02:00
stasatdaglabs
5e9fc2defc [NOD-464] Fix error messages in GetBlocks. (#500) 2019-11-28 17:31:19 +02:00
Ori Newman
bdc3cbceaa [NOD-472] Don't fetch accepting block and tx confirmations for getBlocks (#498)
* [NOD-472] Don't fetch accepting block and tx confirmations for getBlocks

* [NOD-472] Don't fetch accepting block and tx confirmations in any block verbose result

* [NOD-472] Add stringPointerToString function
2019-11-28 13:04:03 +02:00
stasatdaglabs
a71528fefb [NOD-450] Fix netsync clogging its own request queue with orphans that it had just now processed (#497)
* [NOD-450] Fix netsync clogging its own request queue with orphans that it had just now processed.

* [NOD-450] Rename hash to orphanHash.
2019-11-28 11:30:50 +02:00
Dan Aharoni
6725742d2c [NOD-470] Pass string instead of hash to controller (#496) 2019-11-27 17:08:23 +02:00
718 changed files with 15118 additions and 46114 deletions

2
.gitignore vendored
View File

@@ -2,7 +2,7 @@
*~
# Databases
btcd.db
kaspad.db
*-shm
*-wal

955
CHANGES
View File

@@ -1,955 +0,0 @@
============================================================================
User visible changes for btcd
A full-node bitcoin implementation written in Go
============================================================================
Changes in 0.12.0 (Fri Nov 20 2015)
- Protocol and network related changes:
- Add a new checkpoint at block height 382320 (#555)
- Implement BIP0065 which includes support for version 4 blocks, a new
consensus opcode (OP_CHECKLOCKTIMEVERIFY) that enforces transaction
lock times, and a double-threshold switchover mechanism (#535, #459,
#455)
- Implement BIP0111 which provides a new bloom filter service flag and
hence provides support for protocol version 70011 (#499)
- Add a new parameter --nopeerbloomfilters to allow disabling bloom
filter support (#499)
- Reject non-canonically encoded variable length integers (#507)
- Add mainnet peer discovery DNS seed (seed.bitcoin.jonasschnelli.ch)
(#496)
- Correct reconnect handling for persistent peers (#463, #464)
- Ignore requests for block headers if not fully synced (#444)
- Add CLI support for specifying the zone id on IPv6 addresses (#538)
- Fix a couple of issues where the initial block sync could stall (#518,
#229, #486)
- Fix an issue which prevented the --onion option from working as
intended (#446)
- Transaction relay (memory pool) changes:
- Require transactions to only include signatures encoded with the
canonical 'low-s' encoding (#512)
- Add a new parameter --minrelaytxfee to allow the minimum transaction
fee in BTC/kB to be overridden (#520)
- Retain memory pool transactions when they redeem another one that is
removed when a block is accepted (#539)
- Do not send reject messages for a transaction if it is valid but
causes an orphan transaction which depends on it to be determined
as invalid (#546)
- Refrain from attempting to add orphans to the memory pool multiple
times when the transaction they redeem is added (#551)
- Modify minimum transaction fee calculations to scale based on bytes
instead of full kilobyte boundaries (#521, #537)
- Implement signature cache:
- Provides a limited memory cache of validated signatures which is a
huge optimization when verifying blocks for transactions that are
already in the memory pool (#506)
- Add a new parameter '--sigcachemaxsize' which allows the size of the
new cache to be manually changed if desired (#506)
- Mining support changes:
- Notify getblocktemplate long polling clients when a block is pushed
via submitblock (#488)
- Speed up getblocktemplate by making use of the new signature cache
(#506)
- RPC changes:
- Implement getmempoolinfo command (#453)
- Implement getblockheader command (#461)
- Modify createrawtransaction command to accept a new optional parameter
'locktime' (#529)
- Modify listunspent result to include the 'spendable' field (#440)
- Modify getinfo command to include 'errors' field (#511)
- Add timestamps to blockconnected and blockdisconnected notifications
(#450)
- Several modifications to searchrawtranscations command:
- Accept a new optional parameter 'vinextra' which causes the results
to include information about the outputs referenced by a transaction's
inputs (#485, #487)
- Skip entries in the mempool too (#495)
- Accept a new optional parameter 'reverse' to return the results in
reverse order (most recent to oldest) (#497)
- Accept a new optional parameter 'filteraddrs' which causes the
results to only include inputs and outputs which involve the
provided addresses (#516)
- Change the notification order to notify clients about mined
transactions (recvtx, redeemingtx) before the blockconnected
notification (#449)
- Update verifymessage RPC to use the standard algorithm so it is
compatible with other implementations (#515)
- Improve ping statistics by pinging on an interval (#517)
- Websocket changes:
- Implement session command which returns a per-session unique id (#500,
#503)
- btcctl utility changes:
- Add getmempoolinfo command (#453)
- Add getblockheader command (#461)
- Add getwalletinfo command (#471)
- Notable developer-related package changes:
- Introduce a new peer package which acts a common base for creating and
concurrently managing bitcoin network peers (#445)
- Various cleanup of the new peer package (#528, #531, #524, #534,
#549)
- Blocks heights now consistently use int32 everywhere (#481)
- The BlockHeader type in the wire package now provides the BtcDecode
and BtcEncode methods (#467)
- Update wire package to recognize BIP0064 (getutxo) service bit (#489)
- Export LockTimeThreshold constant from txscript package (#454)
- Export MaxDataCarrierSize constant from txscript package (#466)
- Provide new IsUnspendable function from the txscript package (#478)
- Export variable length string functions from the wire package (#514)
- Export DNS Seeds for each network from the chaincfg package (#544)
- Preliminary work towards separating the memory pool into a separate
package (#525, #548)
- Misc changes:
- Various documentation updates (#442, #462, #465, #460, #470, #473,
#505, #530, #545)
- Add installation instructions for gentoo (#542)
- Ensure an error is shown if OS limits can't be set at startup (#498)
- Tighten the standardness checks for multisig scripts (#526)
- Test coverage improvement (#468, #494, #527, #543, #550)
- Several optimizations (#457, #474, #475, #476, #508, #509)
- Minor code cleanup and refactoring (#472, #479, #482, #519, #540)
- Contributors (alphabetical order):
- Ben Echols
- Bruno Clermont
- danda
- Daniel Krawisz
- Dario Nieuwenhuis
- Dave Collins
- David Hill
- Javed Khan
- Jonathan Gillham
- Joseph Becher
- Josh Rickmar
- Justus Ranvier
- Mawuli Adzoe
- Olaoluwa Osuntokun
- Rune T. Aune
Changes in 0.11.1 (Wed May 27 2015)
- Protocol and network related changes:
- Use correct sub-command in reject message for rejected transactions
(#436, #437)
- Add a new parameter --torisolation which forces new circuits for each
connection when using tor (#430)
- Transaction relay (memory pool) changes:
- Reduce the default number max number of allowed orphan transactions
to 1000 (#419)
- Add a new parameter --maxorphantx which allows the maximum number of
orphan transactions stored in the mempool to be specified (#419)
- RPC changes:
- Modify listtransactions result to include the 'involveswatchonly' and
'vout' fields (#427)
- Update getrawtransaction result to omit the 'confirmations' field
when it is 0 (#420, #422)
- Update signrawtransaction result to include errors (#423)
- btcctl utility changes:
- Add gettxoutproof command (#428)
- Add verifytxoutproof command (#428)
- Notable developer-related package changes:
- The btcec package now provides the ability to perform ECDH
encryption and decryption (#375)
- The block and header validation in the blockchain package has been
split to help pave the way toward concurrent downloads (#386)
- Misc changes:
- Minor peer optimization (#433)
- Contributors (alphabetical order):
- Dave Collins
- David Hill
- Federico Bond
- Ishbir Singh
- Josh Rickmar
Changes in 0.11.0 (Wed May 06 2015)
- Protocol and network related changes:
- **IMPORTANT: Update is required due to the following point**
- Correct a few corner cases in script handling which could result in
forking from the network on non-standard transactions (#425)
- Add a new checkpoint at block height 352940 (#418)
- Optimized script execution (#395, #400, #404, #409)
- Fix a case that could lead stalled syncs (#138, #296)
- Network address manager changes:
- Implement eclipse attack countermeasures as proposed in
http://cs-people.bu.edu/heilman/eclipse (#370, #373)
- Optional address indexing changes:
- Fix an issue where a reorg could cause an orderly shutdown when the
address index is active (#340, #357)
- Transaction relay (memory pool) changes:
- Increase maximum allowed space for nulldata transactions to 80 bytes
(#331)
- Implement support for the following rules specified by BIP0062:
- The S value in ECDSA signature must be at most half the curve order
(rule 5) (#349)
- Script execution must result in a single non-zero value on the stack
(rule 6) (#347)
- NOTE: All 7 rules of BIP0062 are now implemented
- Use network adjusted time in finalized transaction checks to improve
consistency across nodes (#332)
- Process orphan transactions on acceptance of new transactions (#345)
- RPC changes:
- Add support for a limited RPC user which is not allowed admin level
operations on the server (#363)
- Implement node command for more unified control over connected peers
(#79, #341)
- Implement generate command for regtest/simnet to support
deterministically mining a specified number of blocks (#362, #407)
- Update searchrawtransactions to return the matching transactions in
order (#354)
- Correct an issue with searchrawtransactions where it could return
duplicates (#346, #354)
- Increase precision of 'difficulty' field in getblock result to 8
(#414, #415)
- Omit 'nextblockhash' field from getblock result when it is empty
(#416, #417)
- Add 'id' and 'timeoffset' fields to getpeerinfo result (#335)
- Websocket changes:
- Implement new commands stopnotifyspent, stopnotifyreceived,
stopnotifyblocks, and stopnotifynewtransactions to allow clients to
cancel notification registrations (#122, #342)
- btcctl utility changes:
- A single dash can now be used as an argument to cause that argument to
be read from stdin (#348)
- Add generate command
- Notable developer-related package changes:
- The new version 2 btcjson package has now replaced the deprecated
version 1 package (#368)
- The btcec package now performs all signing using RFC6979 deterministic
signatures (#358, #360)
- The txscript package has been significantly cleaned up and had a few
API changes (#387, #388, #389, #390, #391, #392, #393, #395, #396,
#400, #403, #404, #405, #406, #408, #409, #410, #412)
- A new PkScriptLocs function has been added to the wire package MsgTx
type which provides callers that deal with scripts optimization
opportunities (#343)
- Misc changes:
- Minor wire hashing optimizations (#366, #367)
- Other minor internal optimizations
- Contributors (alphabetical order):
- Alex Akselrod
- Arne Brutschy
- Chris Jepson
- Daniel Krawisz
- Dave Collins
- David Hill
- Jimmy Song
- Jonas Nick
- Josh Rickmar
- Olaoluwa Osuntokun
- Oleg Andreev
Changes in 0.10.0 (Sun Mar 01 2015)
- Protocol and network related changes:
- Add a new checkpoint at block height 343185
- Implement BIP066 which includes support for version 3 blocks, a new
consensus rule which prevents non-DER encoded signatures, and a
double-threshold switchover mechanism
- Rather than announcing all known addresses on getaddr requests which
can possibly result in multiple messages, randomize the results and
limit them to the max allowed by a single message (1000 addresses)
- Add more reserved IP spaces to the address manager
- Transaction relay (memory pool) changes:
- Make transactions which contain reserved opcodes nonstandard
- No longer accept or relay free and low-fee transactions that have
insufficient priority to be mined in the next block
- Implement support for the following rules specified by BIP0062:
- ECDSA signature must use strict DER encoding (rule 1)
- The signature script must only contain push operations (rule 2)
- All push operations must use the smallest possible encoding (rule 3)
- All stack values interpreted as a number must be encoding using the
shortest possible form (rule 4)
- NOTE: Rule 1 was already enforced, however the entire script now
evaluates to false rather than only the signature verification as
required by BIP0062
- Allow transactions with nulldata transaction outputs to be treated as
standard
- Mining support changes:
- Modify the getblocktemplate RPC to generate and return block templates
for version 3 blocks which are compatible with BIP0066
- Allow getblocktemplate to serve blocks when the current time is
less than the minimum allowed time for a generated block template
(https://github.com/btcsuite/btcd/issues/209)
- Crypto changes:
- Optimize scalar multiplication by the base point by using a
pre-computed table which results in approximately a 35% speedup
(https://github.com/btcsuite/btcec/issues/2)
- Optimize general scalar multiplication by using the secp256k1
endomorphism which results in approximately a 17-20% speedup
(https://github.com/btcsuite/btcec/issues/1)
- Optimize general scalar multiplication by using non-adjacent form
which results in approximately an additional 8% speedup
(https://github.com/btcsuite/btcec/issues/3)
- Implement optional address indexing:
- Add a new parameter --addrindex which will enable the creation of an
address index which can be queried to determine all transactions which
involve a given address
(https://github.com/btcsuite/btcd/issues/190)
- Add a new logging subsystem for address index related operations
- Support new searchrawtransactions RPC
(https://github.com/btcsuite/btcd/issues/185)
- RPC changes:
- Require TLS version 1.2 as the minimum version for all TLS connections
- Provide support for disabling TLS when only listening on localhost
(https://github.com/btcsuite/btcd/pull/192)
- Modify help output for all commands to provide much more consistent
and detailed information
- Correct case in getrawtransaction which would refuse to serve certain
transactions with invalid scripts
(https://github.com/btcsuite/btcd/issues/210)
- Correct error handling in the getrawtransaction RPC which could lead
to a crash in rare cases
(https://github.com/btcsuite/btcd/issues/196)
- Update getinfo RPC to include the appropriate 'timeoffset' calculated
from the median network time
- Modify listreceivedbyaddress result type to include txids field so it
is compatible
- Add 'iswatchonly' field to validateaddress result
- Add 'startingpriority' and 'currentpriority' fields to getrawmempool
(https://github.com/btcsuite/btcd/issues/178)
- Don't omit the 'confirmations' field from getrawtransaction when it is
zero
- Websocket changes:
- Modify the behavior of the rescan command to automatically register
for notifications about transactions paying to rescanned addresses
or spending outputs from the final rescan utxo set when the rescan
is through the best block in the chain
- btcctl utility changes:
- Make the list of commands available via the -l option rather than
dumping the entire list on usage errors
- Alphabetize and categorize the list of commands by chain and wallet
- Make the help option only show the help options instead of also
dumping all of the commands
- Make the usage syntax much more consistent and correct a few cases of
misnamed fields
(https://github.com/btcsuite/btcd/issues/305)
- Improve usage errors to show the specific parameter number, reason,
and error code
- Only show the usage for specific command is shown when a valid command
is provided with invalid parameters
- Add support for a SOCK5 proxy
- Modify output for integer fields (such as timestamps) to display
normally instead in scientific notation
- Add invalidateblock command
- Add reconsiderblock command
- Add createnewaccount command
- Add renameaccount command
- Add searchrawtransactions command
- Add importaddress command
- Add importpubkey command
- showblock utility changes:
- Remove utility in favor of the RPC getblock method
- Notable developer-related package changes:
- Many of the core packages have been relocated into the btcd repository
(https://github.com/btcsuite/btcd/issues/214)
- A new version of the btcjson package that has been completely
redesigned from the ground up based based upon how the project has
evolved and lessons learned while using it since it was first written
is now available in the btcjson/v2/btcjson directory
- This will ultimately replace the current version so anyone making
use of this package will need to update their code accordingly
- The btcec package now provides better facilities for working directly
with its public and private keys without having to mix elements from
the ecdsa package
- Update the script builder to ensure all rules specified by BIP0062 are
adhered to when creating scripts
- The blockchain package now provides a MedianTimeSource interface and
concrete implementation for providing time samples from remote peers
and using that data to calculate an offset against the local time
- Misc changes:
- Fix a slow memory leak due to tickers not being stopped
(https://github.com/btcsuite/btcd/issues/189)
- Fix an issue where a mix of orphans and SPV clients could trigger a
condition where peers would no longer be served
(https://github.com/btcsuite/btcd/issues/231)
- The RPC username and password can now contain symbols which previously
conflicted with special symbols used in URLs
- Improve handling of obtaining random nonces to prevent cases where it
could error when not enough entropy was available
- Improve handling of home directory creation errors such as in the case
of unmounted symlinks (https://github.com/btcsuite/btcd/issues/193)
- Improve the error reporting for rejected transactions to include the
inputs which are missing and/or being double spent
- Update sample config file with new options and correct a comment
regarding the fact the RPC server only listens on localhost by default
(https://github.com/btcsuite/btcd/issues/218)
- Update the continuous integration builds to run several tools which
help keep code quality high
- Significant amount of internal code cleanup and improvements
- Other minor internal optimizations
- Code Contributors (alphabetical order):
- Beldur
- Ben Holden-Crowther
- Dave Collins
- David Evans
- David Hill
- Guilherme Salgado
- Javed Khan
- Jimmy Song
- John C. Vernaleo
- Jonathan Gillham
- Josh Rickmar
- Michael Ford
- Michail Kargakis
- kac
- Olaoluwa Osuntokun
Changes in 0.9.0 (Sat Sep 20 2014)
- Protocol and network related changes:
- Add a new checkpoint at block height 319400
- Add support for BIP0037 bloom filters
(https://github.com/conformal/btcd/issues/132)
- Implement BIP0061 reject handling and hence support for protocol
version 70002 (https://github.com/conformal/btcd/issues/133)
- Add testnet DNS seeds for peer discovery (testnet-seed.alexykot.me
and testnet-seed.bitcoin.schildbach.de)
- Add mainnet DNS seed for peer discovery (seeds.bitcoin.open-nodes.org)
- Make multisig transactions with non-null dummy data nonstandard
(https://github.com/conformal/btcd/issues/131)
- Make transactions with an excessive number of signature operations
nonstandard
- Perform initial DNS lookups concurrently which allows connections
more quickly
- Improve the address manager to significantly reduce memory usage and
add tests
- Remove orphan transactions when they appear in a mined block
(https://github.com/conformal/btcd/issues/166)
- Apply incremental back off on connection retries for persistent peers
that give invalid replies to mirror the logic used for failed
connections (https://github.com/conformal/btcd/issues/103)
- Correct rate-limiting of free and low-fee transactions
- Mining support changes:
- Implement getblocktemplate RPC with the following support:
(https://github.com/conformal/btcd/issues/124)
- BIP0022 Non-Optional Sections
- BIP0022 Long Polling
- BIP0023 Basic Pool Extensions
- BIP0023 Mutation coinbase/append
- BIP0023 Mutations time, time/increment, and time/decrement
- BIP0023 Mutation transactions/add
- BIP0023 Mutations prevblock, coinbase, and generation
- BIP0023 Block Proposals
- Implement built-in concurrent CPU miner
(https://github.com/conformal/btcd/issues/137)
NOTE: CPU mining on mainnet is pointless. This has been provided
for testing purposes such as for the new simulation test network
- Add --generate flag to enable CPU mining
- Deprecate the --getworkkey flag in favor of --miningaddr which
specifies which addresses generated blocks will choose from to pay
the subsidy to
- RPC changes:
- Implement gettxout command
(https://github.com/conformal/btcd/issues/141)
- Implement validateaddress command
- Implement verifymessage command
- Mark getunconfirmedbalance RPC as wallet-only
- Mark getwalletinfo RPC as wallet-only
- Update getgenerate, setgenerate, gethashespersec, and getmininginfo
to return the appropriate information about new CPU mining status
- Modify getpeerinfo pingtime and pingwait field types to float64 so
they are compatible
- Improve disconnect handling for normal HTTP clients
- Make error code returns for invalid hex more consistent
- Websocket changes:
- Switch to a new more efficient websocket package
(https://github.com/conformal/btcd/issues/134)
- Add rescanfinished notification
- Modify the rescanprogress notification to include block hash as well
as height (https://github.com/conformal/btcd/issues/151)
- btcctl utility changes:
- Accept --simnet flag which automatically selects the appropriate port
and TLS certificates needed to communicate with btcd and btcwallet on
the simulation test network
- Fix createrawtransaction command to send amounts denominated in BTC
- Add estimatefee command
- Add estimatepriority command
- Add getmininginfo command
- Add getnetworkinfo command
- Add gettxout command
- Add lockunspent command
- Add signrawtransaction command
- addblock utility changes:
- Accept --simnet flag which automatically selects the appropriate port
and TLS certificates needed to communicate with btcd and btcwallet on
the simulation test network
- Notable developer-related package changes:
- Provide a new bloom package in btcutil which allows creating and
working with BIP0037 bloom filters
- Provide a new hdkeychain package in btcutil which allows working with
BIP0032 hierarchical deterministic key chains
- Introduce a new btcnet package which houses network parameters
- Provide new simnet network (--simnet) which is useful for private
simulation testing
- Enforce low S values in serialized signatures as detailed in BIP0062
- Return errors from all methods on the btcdb.Db interface
(https://github.com/conformal/btcdb/issues/5)
- Allow behavior flags to alter btcchain.ProcessBlock
(https://github.com/conformal/btcchain/issues/5)
- Provide a new SerializeSize API for blocks
(https://github.com/conformal/btcwire/issues/19)
- Several of the core packages now work with Google App Engine
- Misc changes:
- Correct an issue where the database could corrupt under certain
circumstances which would require a new chain download
- Slightly optimize deserialization
- Use the correct IP block for he.net
- Fix an issue where it was possible the block manager could hang on
shutdown
- Update sample config file so the comments are on a separate line
rather than the end of a line so they are not interpreted as settings
(https://github.com/conformal/btcd/issues/135)
- Correct an issue where getdata requests were not being properly
throttled which could lead to larger than necessary memory usage
- Always show help when given the help flag even when the config file
contains invalid entries
- General code cleanup and minor optimizations
Changes in 0.8.0-beta (Sun May 25 2014)
- Btcd is now Beta (https://github.com/conformal/btcd/issues/130)
- Add a new checkpoint at block height 300255
- Protocol and network related changes:
- Lower the minimum transaction relay fee to 1000 satoshi to match
recent reference client changes
(https://github.com/conformal/btcd/issues/100)
- Raise the maximum signature script size to support standard 15-of-15
multi-signature pay-to-sript-hash transactions with compressed pubkeys
to remain compatible with the reference client
(https://github.com/conformal/btcd/issues/128)
- Reduce max bytes allowed for a standard nulldata transaction to 40 for
compatibility with the reference client
- Introduce a new btcnet package which houses all of the network params
for each network (mainnet, testnet, regtest) to ultimately enable
easier addition and tweaking of networks without needing to change
several packages
- Fix several script discrepancies found by reference client test data
- Add new DNS seed for peer discovery (seed.bitnodes.io)
- Reduce the max known inventory cache from 20000 items to 1000 items
- Fix an issue where unknown inventory types could lead to a hung peer
- Implement inventory rebroadcast handler for sendrawtransaction
(https://github.com/conformal/btcd/issues/99)
- Update user agent to fully support BIP0014
(https://github.com/conformal/btcwire/issues/10)
- Implement initial mining support:
- Add a new logging subsystem for mining related operations
- Implement infrastructure for creating block templates
- Provide options to control block template creation settings
- Support the getwork RPC
- Allow address identifiers to apply to more than one network since both
testnet and the regression test network unfortunately use the same
identifier
- RPC changes:
- Set the content type for HTTP POST RPC connections to application/json
(https://github.com/conformal/btcd/issues/121)
- Modified the RPC server startup so it only requires at least one valid
listen interface
- Correct an error path where it was possible certain errors would not
be returned
- Implement getwork command
(https://github.com/conformal/btcd/issues/125)
- Update sendrawtransaction command to reject orphans
- Update sendrawtransaction command to include the reason a transaction
was rejected
- Update getinfo command to populate connection count field
- Update getinfo command to include relay fee field
(https://github.com/conformal/btcd/issues/107)
- Allow transactions submitted with sendrawtransaction to bypass the
rate limiter
- Allow the getcurrentnet and getbestblock extensions to be accessed via
HTTP POST in addition to Websockets
(https://github.com/conformal/btcd/issues/127)
- Websocket changes:
- Rework notifications to ensure they are delivered in the order they
occur
- Rename notifynewtxs command to notifyreceived (funds received)
- Rename notifyallnewtxs command to notifynewtransactions
- Rename alltx notification to txaccepted
- Rename allverbosetx notification to txacceptedverbose
(https://github.com/conformal/btcd/issues/98)
- Add rescan progress notification
- Add recvtx notification
- Add redeemingtx notification
- Modify notifyspent command to accept an array of outpoints
(https://github.com/conformal/btcd/issues/123)
- Significantly optimize the rescan command to yield up to a 60x speed
increase
- btcctl utility changes:
- Add createencryptedwallet command
- Add getblockchaininfo command
- Add importwallet command
- Add addmultisigaddress command
- Add setgenerate command
- Accept --testnet and --wallet flags which automatically select
the appropriate port and TLS certificates needed to communicate
with btcd and btcwallet (https://github.com/conformal/btcd/issues/112)
- Allow path expansion from config file entries
(https://github.com/conformal/btcd/issues/113)
- Minor refactor simplify handling of options
- addblock utility changes:
- Improve logging by making it consistent with the logging provided by
btcd (https://github.com/conformal/btcd/issues/90)
- Improve several package APIs for developers:
- Add new amount type for consistently handling monetary values
- Add new coin selector API
- Add new WIF (Wallet Import Format) API
- Add new crypto types for private keys and signatures
- Add new API to sign transactions including script merging and hash
types
- Expose function to extract all pushed data from a script
(https://github.com/conformal/btcscript/issues/8)
- Misc changes:
- Optimize address manager shuffling to do 67% less work on average
- Resolve a couple of benign data races found by the race detector
(https://github.com/conformal/btcd/issues/101)
- Add IP address to all peer related errors to clarify which peer is the
cause (https://github.com/conformal/btcd/issues/102)
- Fix a UPNP case issue that prevented the --upnp option from working
with some UPNP servers
- Update documentation in the sample config file regarding debug levels
- Adjust some logging levels to improve debug messages
- Improve the throughput of query messages to the block manager
- Several minor optimizations to reduce GC churn and enhance speed
- Other minor refactoring
- General code cleanup
Changes in 0.7.0 (Thu Feb 20 2014)
- Fix an issue when parsing scripts which contain a multi-signature script
which require zero signatures such as testnet block
000000001881dccfeda317393c261f76d09e399e15e27d280e5368420f442632
(https://github.com/conformal/btcscript/issues/7)
- Add check to ensure all transactions accepted to mempool only contain
canonical data pushes (https://github.com/conformal/btcscript/issues/6)
- Fix an issue causing excessive memory consumption
- Significantly rework and improve the websocket notification system:
- Each client is now independent so slow clients no longer limit the
speed of other connected clients
- Potentially long-running operations such as rescans are now run in
their own handler and rate-limited to one operation at a time without
preventing simultaneous requests from the same client for the faster
requests or notifications
- A couple of scenarios which could cause shutdown to hang have been
resolved
- Update notifynewtx notifications to support all address types instead
of only pay-to-pubkey-hash
- Provide a --rpcmaxwebsockets option to allow limiting the number of
concurrent websocket clients
- Add a new websocket command notifyallnewtxs to request notifications
(https://github.com/conformal/btcd/issues/86) (thanks @flammit)
- Improve btcctl utility in the following ways:
- Add getnetworkhashps command
- Add gettransaction command (wallet-specific)
- Add signmessage command (wallet-specific)
- Update getwork command to accept
- Continue cleanup and work on implementing the RPC API:
- Implement getnettotals command
(https://github.com/conformal/btcd/issues/84)
- Implement networkhashps command
(https://github.com/conformal/btcd/issues/87)
- Update getpeerinfo to always include syncnode field even when false
- Remove help addenda for getpeerinfo now that it supports all fields
- Close standard RPC connections on auth failure
- Provide a --rpcmaxclients option to allow limiting the number of
concurrent RPC clients (https://github.com/conformal/btcd/issues/68)
- Include IP address in RPC auth failure log messages
- Resolve a rather harmless data races found by the race detector
(https://github.com/conformal/btcd/issues/94)
- Increase block priority size and max standard transaction size to 50k
and 100k, respectively (https://github.com/conformal/btcd/issues/71)
- Add rate limiting of free transactions to the memory pool to prevent
penny flooding (https://github.com/conformal/btcd/issues/40)
- Provide a --logdir option (https://github.com/conformal/btcd/issues/95)
- Change the default log file path to include the network
- Add a new ScriptBuilder interface to btcscript to support creation of
custom scripts (https://github.com/conformal/btcscript/issues/5)
- General code cleanup
Changes in 0.6.0 (Tue Feb 04 2014)
- Fix an issue when parsing scripts which contain invalid signatures that
caused a chain fork on block
0000000000000001e4241fd0b3469a713f41c5682605451c05d3033288fb2244
- Correct an issue which could lead to an error in removeBlockNode
(https://github.com/conformal/btcchain/issues/4)
- Improve addblock utility as follows:
- Check imported blocks against all chain rules and checkpoints
- Skip blocks which are already known so you can stop and restart the
import or start the import after you have already downloaded a portion
of the chain
- Correct an issue where the utility did not shutdown cleanly after
processing all blocks
- Add error on attempt to import orphan blocks
- Improve error handling and reporting
- Display statistics after input file has been fully processed
- Rework, optimize, and improve headers-first mode:
- Resuming the chain sync from any point before the final checkpoint
will now use headers-first mode
(https://github.com/conformal/btcd/issues/69)
- Verify all checkpoints as opposed to only the final one
- Reduce and bound memory usage
- Rollback to the last known good point when a header does not match a
checkpoint
- Log information about what is happening with headers
- Improve btcctl utility in the following ways:
- Add getaddednodeinfo command
- Add getnettotals command
- Add getblocktemplate command (wallet-specific)
- Add getwork command (wallet-specific)
- Add getnewaddress command (wallet-specific)
- Add walletpassphrasechange command (wallet-specific)
- Add walletlock command (wallet-specific)
- Add sendfrom command (wallet-specific)
- Add sendmany command (wallet-specific)
- Add settxfee command (wallet-specific)
- Add listsinceblock command (wallet-specific)
- Add listaccounts command (wallet-specific)
- Add keypoolrefill command (wallet-specific)
- Add getreceivedbyaccount command (wallet-specific)
- Add getrawchangeaddress command (wallet-specific)
- Add gettxoutsetinfo command (wallet-specific)
- Add listaddressgroupings command (wallet-specific)
- Add listlockunspent command (wallet-specific)
- Add listlock command (wallet-specific)
- Add listreceivedbyaccount command (wallet-specific)
- Add validateaddress command (wallet-specific)
- Add verifymessage command (wallet-specific)
- Add sendtoaddress command (wallet-specific)
- Continue cleanup and work on implementing the RPC API:
- Implement submitblock command
(https://github.com/conformal/btcd/issues/61)
- Implement help command
- Implement ping command
- Implement getaddednodeinfo command
(https://github.com/conformal/btcd/issues/78)
- Implement getinfo command
- Update getpeerinfo to support bytesrecv and bytessent
(https://github.com/conformal/btcd/issues/83)
- Improve and correct several RPC server and websocket areas:
- Change the connection endpoint for websockets from /wallet to /ws
(https://github.com/conformal/btcd/issues/80)
- Implement an alternative authentication for websockets so clients
such as javascript from browsers that don't support setting HTTP
headers can authenticate (https://github.com/conformal/btcd/issues/77)
- Add an authentication deadline for RPC connections
(https://github.com/conformal/btcd/issues/68)
- Use standard authentication failure responses for RPC connections
- Make automatically generated certificate more standard so it works
from client such as node.js and Firefox
- Correct some minor issues which could prevent the RPC server from
shutting down in an orderly fashion
- Make all websocket notifications require registration
- Change the data sent over websockets to text since it is JSON-RPC
- Allow connections that do not have an Origin header set
- Expose and track the number of bytes read and written per peer
(https://github.com/conformal/btcwire/issues/6)
- Correct an issue with sendrawtransaction when invoked via websockets
which prevented a minedtx notification from being added
- Rescan operations issued from remote wallets are no stopped when
the wallet disconnects mid-operation
(https://github.com/conformal/btcd/issues/66)
- Several optimizations related to fetching block information from the
database
- General code cleanup
Changes in 0.5.0 (Mon Jan 13 2014)
- Optimize initial block download by introducing a new mode which
downloads the block headers first (up to the final checkpoint)
- Improve peer handling to remove the potential for slow peers to cause
sluggishness amongst all peers
(https://github.com/conformal/btcd/issues/63)
- Fix an issue where the initial block sync could stall when the sync peer
disconnects (https://github.com/conformal/btcd/issues/62)
- Correct an issue where --externalip was doing a DNS lookup on the full
host:port instead of just the host portion
(https://github.com/conformal/btcd/issues/38)
- Fix an issue which could lead to a panic on chain switches
(https://github.com/conformal/btcd/issues/70)
- Improve btcctl utility in the following ways:
- Show getdifficulty output as floating point to 6 digits of precision
- Show all JSON object replies formatted as standard JSON
- Allow btcctl getblock to accept optional params
- Add getaccount command (wallet-specific)
- Add getaccountaddress command (wallet-specific)
- Add sendrawtransaction command
- Continue cleanup and work on implementing RPC API calls
- Update getrawmempool to support new optional verbose flag
- Update getrawtransaction to match the reference client
- Update getblock to support new optional verbose flag
- Update raw transactions to fully match the reference client including
support for all transaction types and address types
- Correct getrawmempool fee field to return BTC instead of Satoshi
- Correct getpeerinfo service flag to return 8 digit string so it
matches the reference client
- Correct verifychain to return a boolean
- Implement decoderawtransaction command
- Implement createrawtransaction command
- Implement decodescript command
- Implement gethashespersec command
- Allow RPC handler overrides when invoked via a websocket versus
legacy connection
- Add new DNS seed for peer discovery
- Display user agent on new valid peer log message
(https://github.com/conformal/btcd/issues/64)
- Notify wallet when new transactions that pay to registered addresses
show up in the mempool before being mined into a block
- Support a tor-specific proxy in addition to a normal proxy
(https://github.com/conformal/btcd/issues/47)
- Remove deprecated sqlite3 imports from utilities
- Remove leftover profile write from addblock utility
- Quite a bit of code cleanup and refactoring to improve maintainability
Changes in 0.4.0 (Thu Dec 12 2013)
- Allow listen interfaces to be specified via --listen instead of only the
port (https://github.com/conformal/btcd/issues/33)
- Allow listen interfaces for the RPC server to be specified via
--rpclisten instead of only the port
(https://github.com/conformal/btcd/issues/34)
- Only disable listening when --connect or --proxy are used when no
--listen interface are specified
(https://github.com/conformal/btcd/issues/10)
- Add several new standard transaction checks to transaction memory pool:
- Support nulldata scripts as standard
- Only allow a max of one nulldata output per transaction
- Enforce a maximum of 3 public keys in multi-signature transactions
- The number of signatures in multi-signature transactions must not
exceed the number of public keys
- The number of inputs to a signature script must match the expected
number of inputs for the script type
- The number of inputs pushed onto the stack by a redeeming signature
script must match the number of inputs consumed by the referenced
public key script
- When a block is connected, remove any transactions from the memory pool
which are now double spends as a result of the newly connected
transactions
- Don't relay transactions resurrected during a chain switch since
other peers will also be switching chains and therefore already know
about them
- Cleanup a few cases where rejected transactions showed as an error
rather than as a rejected transaction
- Ignore the default configuration file when --regtest (regression test
mode) is specified
- Implement TLS support for RPC including automatic certificate generation
- Support HTTP authentication headers for web sockets
- Update address manager to recognize and properly work with Tor
addresses (https://github.com/conformal/btcd/issues/36) and
(https://github.com/conformal/btcd/issues/37)
- Improve btcctl utility in the following ways:
- Add the ability to specify a configuration file
- Add a default entry for the RPC cert to point to the location
it will likely be in the btcd home directory
- Implement --version flag
- Provide a --notls option to support non-TLS configurations
- Fix a couple of minor races found by the Go race detector
- Improve logging
- Allow logging level to be specified on a per subsystem basis
(https://github.com/conformal/btcd/issues/48)
- Allow logging levels to be dynamically changed via RPC
(https://github.com/conformal/btcd/issues/15)
- Implement a rolling log file with a max of 10MB per file and a
rotation size of 3 which results in a max logging size of 30 MB
- Correct a minor issue with the rescanning websocket call
(https://github.com/conformal/btcd/issues/54)
- Fix a race with pushing address messages that could lead to a panic
(https://github.com/conformal/btcd/issues/58)
- Improve which external IP address is reported to peers based on which
interface they are connected through
(https://github.com/conformal/btcd/issues/35)
- Add --externalip option to allow an external IP address to be specified
for cases such as tor hidden services or advanced network configurations
(https://github.com/conformal/btcd/issues/38)
- Add --upnp option to support automatic port mapping via UPnP
(https://github.com/conformal/btcd/issues/51)
- Update Ctrl+C interrupt handler to properly sync address manager and
remove the UPnP port mapping (if needed)
- Continue cleanup and work on implementing RPC API calls
- Add importprivkey (import private key) command to btcctl
- Update getrawtransaction to provide addresses properly, support
new verbose param, and match the reference implementation with the
exception of MULTISIG (thanks @flammit)
- Update getblock with new verbose flag (thanks @flammit)
- Add listtransactions command to btcctl
- Add getbalance command to btcctl
- Add basic support for btcd to run as a native Windows service
(https://github.com/conformal/btcd/issues/42)
- Package addblock utility with Windows MSIs
- Add support for TravisCI (continuous build integration)
- Cleanup some documentation and usage
- Several other minor bug fixes and general code cleanup
Changes in 0.3.3 (Wed Nov 13 2013)
- Significantly improve initial block chain download speed
(https://github.com/conformal/btcd/issues/20)
- Add a new checkpoint at block height 267300
- Optimize most recently used inventory handling
(https://github.com/conformal/btcd/issues/21)
- Optimize duplicate transaction input check
(https://github.com/conformal/btcchain/issues/2)
- Optimize transaction hashing
(https://github.com/conformal/btcd/issues/25)
- Rework and optimize wallet listener notifications
(https://github.com/conformal/btcd/issues/22)
- Optimize serialization and deserialization
(https://github.com/conformal/btcd/issues/27)
- Add support for minimum transaction fee to memory pool acceptance
(https://github.com/conformal/btcd/issues/29)
- Improve leveldb database performance by removing explicit GC call
- Fix an issue where Ctrl+C was not always finishing orderly database
shutdown
- Fix an issue in the script handling for OP_CHECKSIG
- Impose max limits on all variable length protocol entries to prevent
abuse from malicious peers
- Enforce DER signatures for transactions allowed into the memory pool
- Separate the debug profile http server from the RPC server
- Rework of the RPC code to improve performance and make the code cleaner
- The getrawtransaction RPC call now properly checks the memory pool
before consulting the db (https://github.com/conformal/btcd/issues/26)
- Add support for the following RPC calls: getpeerinfo, getconnectedcount,
addnode, verifychain
(https://github.com/conformal/btcd/issues/13)
(https://github.com/conformal/btcd/issues/17)
- Implement rescan websocket extension to allow wallet rescans
- Use correct paths for application data storage for all supported
operating systems (https://github.com/conformal/btcd/issues/30)
- Add a default redirect to the http profiling page when accessing the
http profile server
- Add a new --cpuprofile option which can be used to generate CPU
profiling data on platforms that support it
- Several other minor performance optimizations
- Other minor bug fixes and general code cleanup
Changes in 0.3.2 (Tue Oct 22 2013)
- Fix an issue that could cause the download of the block chain to stall
(https://github.com/conformal/btcd/issues/12)
- Remove deprecated sqlite as an available database backend
- Close sqlite compile issue as sqlite has now been removed
(https://github.com/conformal/btcd/issues/11)
- Change default RPC ports to 8334 (mainnet) and 18334 (testnet)
- Continue cleanup and work on implementing RPC API calls
- Add support for the following RPC calls: getrawmempool,
getbestblockhash, decoderawtransaction, getdifficulty,
getconnectioncount, getpeerinfo, and addnode
- Improve the btcctl utility that is used to issue JSON-RPC commands
- Fix an issue preventing btcd from cleanly shutting down with the RPC
stop command
- Add a number of database interface tests to ensure backends implement
the expected interface
- Expose some additional information from btcscript to be used for
identifying "standard"" transactions
- Add support for plan9 - thanks @mischief
(https://github.com/conformal/btcd/pull/19)
- Other minor bug fixes and general code cleanup
Changes in 0.3.1-alpha (Tue Oct 15 2013)
- Change default database to leveldb
NOTE: This does mean you will have to redownload the block chain. Since we
are still in alpha, we didn't feel writing a converter was worth the time as
it would take away from more important issues at this stage
- Add a warning if there are multiple block chain databases of different types
- Fix issue with unexpected EOF in leveldb -- https://github.com/conformal/btcd/issues/18
- Fix issue preventing block 21066 on testnet -- https://github.com/conformal/btcchain/issues/1
- Fix issue preventing block 96464 on testnet -- https://github.com/conformal/btcscript/issues/1
- Optimize transaction lookups
- Correct a few cases of list removal that could result in improper cleanup
of no longer needed orphans
- Add functionality to increase ulimits on non-Windows platforms
- Add support for mempool command which allows remote peers to query the
transaction memory pool via the bitcoin protocol
- Clean up logging a bit
- Add a flag to disable checkpoints for developers
- Add a lot of useful debug logging such as message summaries
- Other minor bug fixes and general code cleanup
Initial Release 0.3.0-alpha (Sat Oct 05 2013):
- Initial release

10
Jenkinsfile vendored
View File

@@ -1,10 +0,0 @@
node {
stage 'Checkout'
checkout scm
stage 'Version'
sh './deploy.sh version'
stage 'Build'
sh "./deploy.sh build"
}

View File

@@ -1,8 +1,9 @@
ISC License
Copyright (c) 2018-2019 DAGLabs
Copyright (c) 2018-2019 The kaspanet developers
Copyright (c) 2013-2018 The btcsuite developers
Copyright (c) 2015-2016 The Decred developers
Copyright (c) 2013-2014 Conformal Systems LLC.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above

125
README.md
View File

@@ -1,49 +1,24 @@
btcd
Kaspad
====
Warning: This is pre-alpha software. There's no guarantee anything works.
====
[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/daglabs/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad)
btcd is an alternative full node bitcoin implementation written in Go (golang).
Kaspad is the reference full node Kaspa implementation written in Go (golang).
This project is currently under active development and is in a Beta state. It
is extremely stable and has been in production use since October 2013.
It properly downloads, validates, and serves the block chain using the exact
rules (including consensus bugs) for block acceptance as Bitcoin Core. We have
taken great care to avoid btcd causing a fork to the block chain. It includes a
full block validation testing framework which contains all of the 'official'
block acceptance tests (and some additional ones) that is run on every pull
request to help ensure it properly follows consensus. Also, it passes all of
the JSON test data in the Bitcoin Core code.
It also properly relays newly mined blocks, maintains a transaction pool, and
relays individual transactions that have not yet made it into a block. It
ensures all individual transactions admitted to the pool follow the rules
required by the block chain and also includes more strict checks which filter
transactions based on miner requirements ("standard" transactions).
One key difference between btcd and Bitcoin Core is that btcd does *NOT* include
wallet functionality and this was a very intentional design decision. See the
blog entry [here](https://blog.conformal.com/btcd-not-your-moms-bitcoin-daemon)
for more details. This means you can't actually make or receive payments
directly with btcd. That functionality is provided by the
[btcwallet](https://github.com/btcsuite/btcwallet) and
[Paymetheus](https://github.com/btcsuite/Paymetheus) (Windows-only) projects
which are both under active development.
This project is currently under active development and is in a pre-Alpha state.
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
## Requirements
[Go](http://golang.org) 1.8 or newer.
Latest version of [Go](http://golang.org) (currently 1.13).
## Installation
#### Windows - MSI Available
https://github.com/daglabs/btcd/releases
#### Linux/BSD/MacOSX/POSIX - Build from Source
#### Build from Source
- Install Go according to the installation instructions here:
http://golang.org/doc/install
@@ -55,92 +30,50 @@ $ go version
$ go env GOROOT GOPATH
```
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
recommended that `GOPATH` is set to a directory in your home directory such as
`~/goprojects` to avoid write permission issues. It is also recommended to add
`~/dev/go` to avoid write permission issues. It is also recommended to add
`$GOPATH/bin` to your `PATH` at this point.
- Run the following commands to obtain btcd, all dependencies, and install it:
- Run the following commands to obtain and install kaspad including all dependencies:
```bash
$ # Install dep: https://golang.github.io/dep/docs/installation.html
$ git clone https://github.com/daglabs/btcd $GOPATH/src/github.com/daglabs/btcd
$ cd $GOPATH/src/github.com/daglabs/btcd
$ dep ensure
$ git clone https://github.com/kaspanet/kaspad $GOPATH/src/github.com/kaspanet/kaspad
$ cd $GOPATH/src/github.com/kaspanet/kaspad
$ ./test.sh
$ go install . ./cmd/...
```
`./test.sh` tests can be skipped, but some things might not run correctly on your system if tests fail.
- btcd (and utilities) will now be installed in ```$GOPATH/bin```. If you did
- Kaspad (and utilities) should now be installed in `$GOPATH/bin`. If you did
not already add the bin directory to your system path during Go installation,
we recommend you do so now.
you are encouraged to do so now.
## Updating
#### Windows
Install a newer MSI
#### Linux/BSD/MacOSX/POSIX - Build from Source
- Run the following commands to update btcd, all dependencies, and install it:
```bash
$ cd $GOPATH/src/github.com/daglabs/btcd
$ git pull && dep ensure
$ go install . ./cmd/...
```
## Getting Started
btcd has several configuration options available to tweak how it runs, but all
of the basic operations described in the intro section work with zero
configuration.
#### Windows (Installed from MSI)
Launch btcd from your Start menu.
Kaspad has several configuration options available to tweak how it runs, but all
of the basic operations work with zero configuration.
#### Linux/BSD/POSIX/Source
```bash
$ ./btcd
$ ./kaspad
```
## IRC
- irc.freenode.net
- channel #btcd
- [webchat](https://webchat.freenode.net/?channels=btcd)
## Discord
Join our discord server using the following link: https://discord.gg/WmGhhzk
## Issue Tracker
The [integrated github issue tracker](https://github.com/daglabs/btcd/issues)
The [integrated github issue tracker](https://github.com/kaspanet/kaspad/issues)
is used for this project.
## Documentation
The documentation is a work-in-progress. It is located in the [docs](https://github.com/daglabs/btcd/tree/master/docs) folder.
## GPG Verification Key
All official release tags are signed by Conformal so users can ensure the code
has not been tampered with and is coming from the btcsuite developers. To
verify the signature perform the following:
- Download the Conformal public key:
https://raw.githubusercontent.com/btcsuite/btcd/master/release/GIT-GPG-KEY-conformal.txt
- Import the public key into your GPG keyring:
```bash
gpg --import GIT-GPG-KEY-conformal.txt
```
- Verify the release tag with the following command where `TAG_NAME` is a
placeholder for the specific tag:
```bash
git tag -v TAG_NAME
```
The documentation is a work-in-progress. It is located in the [docs](https://github.com/kaspanet/kaspad/tree/master/docs) folder.
## License
btcd is licensed under the [copyfree](http://copyfree.org) ISC License.
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).

View File

@@ -7,7 +7,6 @@ package addrmgr
import (
"container/list"
crand "crypto/rand" // for seeding
"encoding/base32"
"encoding/binary"
"encoding/json"
"github.com/pkg/errors"
@@ -17,22 +16,21 @@ import (
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
type newBucket [newBucketCount]map[string]*KnownAddress
type triedBucket [triedBucketCount]*list.List
// AddrManager provides a concurrency safe address manager for caching potential
// peers on the bitcoin network.
// peers on the Kaspa network.
type AddrManager struct {
mtx sync.Mutex
peersFile string
@@ -44,7 +42,6 @@ type AddrManager struct {
addrNewFullNodes newBucket
addrTried map[subnetworkid.SubnetworkID]*triedBucket
addrTriedFullNodes triedBucket
addrTrying map[*KnownAddress]bool
started int32
shutdown int32
wg sync.WaitGroup
@@ -309,9 +306,8 @@ func (a *AddrManager) updateAddrTried(bucket int, ka *KnownAddress) {
func (a *AddrManager) expireNew(bucket *newBucket, idx int, decrNewCounter func()) {
// First see if there are any entries that are so bad we can just throw
// them away. otherwise we throw away the oldest entry in the cache.
// Bitcoind here chooses four random and just throws the oldest of
// those away, but we keep track of oldest in the initial traversal and
// use that information instead.
// We keep track of oldest in the initial traversal and use that
// information instead.
var oldest *KnownAddress
for k, v := range bucket[idx] {
if v.isBad() {
@@ -357,8 +353,7 @@ func (a *AddrManager) expireNewFullNodes(bucket int) {
}
// pickTried selects an address from the tried bucket to be evicted.
// We just choose the eldest. Bitcoind selects 4 random entries and throws away
// the older of them.
// We just choose the eldest.
func (a *AddrManager) pickTried(subnetworkID *subnetworkid.SubnetworkID, bucket int) *list.Element {
var oldest *KnownAddress
var oldestElem *list.Element
@@ -380,7 +375,6 @@ func (a *AddrManager) pickTried(subnetworkID *subnetworkid.SubnetworkID, bucket
}
func (a *AddrManager) getNewBucket(netAddr, srcAddr *wire.NetAddress) int {
// bitcoind:
// doublesha256(key + sourcegroup + int64(doublesha256(key + group + sourcegroup))%bucket_per_source_group) % num_new_buckets
data1 := []byte{}
@@ -402,7 +396,6 @@ func (a *AddrManager) getNewBucket(netAddr, srcAddr *wire.NetAddress) int {
}
func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int {
// bitcoind hashes this as:
// doublesha256(key + group + truncate_to_64bits(doublesha256(key)) % buckets_per_group) % num_buckets
data1 := []byte{}
data1 = append(data1, a.key[:]...)
@@ -421,7 +414,7 @@ func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int {
return int(binary.LittleEndian.Uint64(hash2) % triedBucketCount)
}
// addressHandler is the main handler for the address manager. It must be run
// addressHandler is the main handler for the address manager. It must be run
// as a goroutine.
func (a *AddrManager) addressHandler() {
dumpAddressTicker := time.NewTicker(dumpAddressInterval)
@@ -537,7 +530,7 @@ func (a *AddrManager) savePeers() {
}
}
// loadPeers loads the known address from the saved file. If empty, missing, or
// loadPeers loads the known address from the saved file. If empty, missing, or
// malformed file, just don't load anything and start fresh
func (a *AddrManager) loadPeers() {
a.mtx.Lock()
@@ -741,8 +734,8 @@ func (a *AddrManager) Stop() error {
return nil
}
// AddAddresses adds new addresses to the address manager. It enforces a max
// number of addresses and silently ignores duplicate addresses. It is
// AddAddresses adds new addresses to the address manager. It enforces a max
// number of addresses and silently ignores duplicate addresses. It is
// safe for concurrent access.
func (a *AddrManager) AddAddresses(addrs []*wire.NetAddress, srcAddr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) {
a.mtx.Lock()
@@ -753,8 +746,8 @@ func (a *AddrManager) AddAddresses(addrs []*wire.NetAddress, srcAddr *wire.NetAd
}
}
// AddAddress adds a new address to the address manager. It enforces a max
// number of addresses and silently ignores duplicate addresses. It is
// AddAddress adds a new address to the address manager. It enforces a max
// number of addresses and silently ignores duplicate addresses. It is
// safe for concurrent access.
func (a *AddrManager) AddAddress(addr, srcAddr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) {
a.mtx.Lock()
@@ -827,7 +820,7 @@ func (a *AddrManager) NeedMoreAddresses() bool {
return allAddrs < needAddressThreshold
}
// AddressCache returns the current address cache. It must be treated as
// AddressCache returns the current address cache. It must be treated as
// read-only (but since it is a copy now, this is not as dangerous).
func (a *AddrManager) AddressCache(includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) []*wire.NetAddress {
a.mtx.Lock()
@@ -871,7 +864,6 @@ func (a *AddrManager) AddressCache(includeAllSubnetworks bool, subnetworkID *sub
// reset resets the address manager by reinitialising the random source
// and allocating fresh empty bucket storage.
func (a *AddrManager) reset() {
a.addrIndex = make(map[string]*KnownAddress)
// fill key with bytes from a good random source.
@@ -890,28 +882,13 @@ func (a *AddrManager) reset() {
}
a.nNewFullNodes = 0
a.nTriedFullNodes = 0
a.addrTrying = make(map[*KnownAddress]bool)
}
// HostToNetAddress returns a netaddress given a host address. If the address
// is a Tor .onion address this will be taken care of. Else if the host is
// not an IP address it will be resolved (via Tor if required).
// HostToNetAddress returns a netaddress given a host address. If
// the host is not an IP address it will be resolved.
func (a *AddrManager) HostToNetAddress(host string, port uint16, services wire.ServiceFlag) (*wire.NetAddress, error) {
// Tor address is 16 char base32 + ".onion"
var ip net.IP
if len(host) == 22 && host[16:] == ".onion" {
// go base32 encoding uses capitals (as does the rfc
// but Tor and bitcoind tend to user lowercase, so we switch
// case here.
data, err := base32.StdEncoding.DecodeString(
strings.ToUpper(host[:16]))
if err != nil {
return nil, err
}
prefix := []byte{0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43}
ip = net.IP(append(prefix, data...))
} else if ip = net.ParseIP(host); ip == nil {
ip := net.ParseIP(host)
if ip == nil {
ips, err := a.lookupFunc(host)
if err != nil {
return nil, err
@@ -925,28 +902,15 @@ func (a *AddrManager) HostToNetAddress(host string, port uint16, services wire.S
return wire.NewNetAddressIPPort(ip, port, services), nil
}
// ipString returns a string for the ip from the provided NetAddress. If the
// ip is in the range used for Tor addresses then it will be transformed into
// the relevant .onion address.
func ipString(na *wire.NetAddress) string {
if IsOnionCatTor(na) {
// We know now that na.IP is long enough.
base32 := base32.StdEncoding.EncodeToString(na.IP[6:])
return strings.ToLower(base32) + ".onion"
}
return na.IP.String()
}
// NetAddressKey returns a string key in the form of ip:port for IPv4 addresses
// or [ip]:port for IPv6 addresses.
func NetAddressKey(na *wire.NetAddress) string {
port := strconv.FormatUint(uint64(na.Port), 10)
return net.JoinHostPort(ipString(na), port)
return net.JoinHostPort(na.IP.String(), port)
}
// GetAddress returns a single address that should be routable. It picks a
// GetAddress returns a single address that should be routable. It picks a
// random one from the possible addresses with preference given to ones that
// have not been used recently and should not pick 'close' addresses
// consecutively.
@@ -965,13 +929,6 @@ func (a *AddrManager) GetAddress() *KnownAddress {
a.addrNew[subnetworkID], a.nNew[subnetworkID])
}
if knownAddress != nil {
if a.addrTrying[knownAddress] {
return nil
}
a.addrTrying[knownAddress] = true
}
return knownAddress
}
@@ -1056,12 +1013,10 @@ func (a *AddrManager) Attempt(addr *wire.NetAddress) {
// set last tried time to now
ka.attempts++
ka.lastattempt = time.Now()
delete(a.addrTrying, ka)
}
// Connected Marks the given address as currently connected and working at the
// current time. The address must already be known to AddrManager else it will
// current time. The address must already be known to AddrManager else it will
// be ignored.
func (a *AddrManager) Connected(addr *wire.NetAddress) {
a.mtx.Lock()
@@ -1083,8 +1038,8 @@ func (a *AddrManager) Connected(addr *wire.NetAddress) {
}
}
// Good marks the given address as good. To be called after a successful
// connection and version exchange. If the address is unknown to the address
// Good marks the given address as good. To be called after a successful
// connection and version exchange. If the address is unknown to the address
// manager it will be ignored.
func (a *AddrManager) Good(addr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) {
a.mtx.Lock()
@@ -1287,18 +1242,6 @@ func getReachabilityFrom(localAddr, remoteAddr *wire.NetAddress) int {
return Unreachable
}
if IsOnionCatTor(remoteAddr) {
if IsOnionCatTor(localAddr) {
return Private
}
if IsRoutable(localAddr) && IsIPv4(localAddr) {
return Ipv4
}
return Default
}
if IsRFC4380(remoteAddr) {
if !IsRoutable(localAddr) {
return Default
@@ -1376,7 +1319,7 @@ func (a *AddrManager) GetBestLocalAddress(remoteAddr *wire.NetAddress) *wire.Net
// Send something unroutable if nothing suitable.
var ip net.IP
if !IsIPv4(remoteAddr) && !IsOnionCatTor(remoteAddr) {
if !IsIPv4(remoteAddr) {
ip = net.IPv6zero
} else {
ip = net.IPv4zero
@@ -1388,7 +1331,7 @@ func (a *AddrManager) GetBestLocalAddress(remoteAddr *wire.NetAddress) *wire.Net
return bestAddress
}
// New returns a new bitcoin address manager.
// New returns a new Kaspa address manager.
// Use Start to begin processing asynchronous address updates.
func New(dataDir string, lookupFunc func(string) ([]net.IP, error), subnetworkID *subnetworkid.SubnetworkID) *AddrManager {
am := AddrManager{

View File

@@ -5,19 +5,18 @@
package addrmgr
import (
"bou.ke/monkey"
"fmt"
"github.com/daglabs/btcd/config"
"github.com/daglabs/btcd/dagconfig"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/subnetworkid"
"net"
"reflect"
"testing"
"time"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/pkg/errors"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/wire"
)
// naTest is used to describe a test to be performed against the NetAddressKey
@@ -38,59 +37,59 @@ var someIP = "173.194.115.66"
func addNaTests() {
// IPv4
// Localhost
addNaTest("127.0.0.1", 8333, "127.0.0.1:8333")
addNaTest("127.0.0.1", 8334, "127.0.0.1:8334")
addNaTest("127.0.0.1", 16111, "127.0.0.1:16111")
addNaTest("127.0.0.1", 16110, "127.0.0.1:16110")
// Class A
addNaTest("1.0.0.1", 8333, "1.0.0.1:8333")
addNaTest("2.2.2.2", 8334, "2.2.2.2:8334")
addNaTest("1.0.0.1", 16111, "1.0.0.1:16111")
addNaTest("2.2.2.2", 16110, "2.2.2.2:16110")
addNaTest("27.253.252.251", 8335, "27.253.252.251:8335")
addNaTest("123.3.2.1", 8336, "123.3.2.1:8336")
// Private Class A
addNaTest("10.0.0.1", 8333, "10.0.0.1:8333")
addNaTest("10.1.1.1", 8334, "10.1.1.1:8334")
addNaTest("10.0.0.1", 16111, "10.0.0.1:16111")
addNaTest("10.1.1.1", 16110, "10.1.1.1:16110")
addNaTest("10.2.2.2", 8335, "10.2.2.2:8335")
addNaTest("10.10.10.10", 8336, "10.10.10.10:8336")
// Class B
addNaTest("128.0.0.1", 8333, "128.0.0.1:8333")
addNaTest("129.1.1.1", 8334, "129.1.1.1:8334")
addNaTest("128.0.0.1", 16111, "128.0.0.1:16111")
addNaTest("129.1.1.1", 16110, "129.1.1.1:16110")
addNaTest("180.2.2.2", 8335, "180.2.2.2:8335")
addNaTest("191.10.10.10", 8336, "191.10.10.10:8336")
// Private Class B
addNaTest("172.16.0.1", 8333, "172.16.0.1:8333")
addNaTest("172.16.1.1", 8334, "172.16.1.1:8334")
addNaTest("172.16.0.1", 16111, "172.16.0.1:16111")
addNaTest("172.16.1.1", 16110, "172.16.1.1:16110")
addNaTest("172.16.2.2", 8335, "172.16.2.2:8335")
addNaTest("172.16.172.172", 8336, "172.16.172.172:8336")
// Class C
addNaTest("193.0.0.1", 8333, "193.0.0.1:8333")
addNaTest("200.1.1.1", 8334, "200.1.1.1:8334")
addNaTest("193.0.0.1", 16111, "193.0.0.1:16111")
addNaTest("200.1.1.1", 16110, "200.1.1.1:16110")
addNaTest("205.2.2.2", 8335, "205.2.2.2:8335")
addNaTest("223.10.10.10", 8336, "223.10.10.10:8336")
// Private Class C
addNaTest("192.168.0.1", 8333, "192.168.0.1:8333")
addNaTest("192.168.1.1", 8334, "192.168.1.1:8334")
addNaTest("192.168.0.1", 16111, "192.168.0.1:16111")
addNaTest("192.168.1.1", 16110, "192.168.1.1:16110")
addNaTest("192.168.2.2", 8335, "192.168.2.2:8335")
addNaTest("192.168.192.192", 8336, "192.168.192.192:8336")
// IPv6
// Localhost
addNaTest("::1", 8333, "[::1]:8333")
addNaTest("fe80::1", 8334, "[fe80::1]:8334")
addNaTest("::1", 16111, "[::1]:16111")
addNaTest("fe80::1", 16110, "[fe80::1]:16110")
// Link-local
addNaTest("fe80::1:1", 8333, "[fe80::1:1]:8333")
addNaTest("fe91::2:2", 8334, "[fe91::2:2]:8334")
addNaTest("fe80::1:1", 16111, "[fe80::1:1]:16111")
addNaTest("fe91::2:2", 16110, "[fe91::2:2]:16110")
addNaTest("fea2::3:3", 8335, "[fea2::3:3]:8335")
addNaTest("feb3::4:4", 8336, "[feb3::4:4]:8336")
// Site-local
addNaTest("fec0::1:1", 8333, "[fec0::1:1]:8333")
addNaTest("fed1::2:2", 8334, "[fed1::2:2]:8334")
addNaTest("fec0::1:1", 16111, "[fec0::1:1]:16111")
addNaTest("fed1::2:2", 16110, "[fed1::2:2]:16110")
addNaTest("fee2::3:3", 8335, "[fee2::3:3]:8335")
addNaTest("fef3::4:4", 8336, "[fef3::4:4]:8336")
}
@@ -116,15 +115,14 @@ func TestStartStop(t *testing.T) {
}
func TestAddAddressByIP(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer activeConfigPatch.Unpatch()
defer config.SetActiveConfig(originalActiveCfg)
fmtErr := errors.Errorf("")
addrErr := &net.AddrError{}
@@ -133,7 +131,7 @@ func TestAddAddressByIP(t *testing.T) {
err error
}{
{
someIP + ":8333",
someIP + ":16111",
nil,
},
{
@@ -154,15 +152,15 @@ func TestAddAddressByIP(t *testing.T) {
for i, test := range tests {
err := amgr.AddAddressByIP(test.addrIP, nil)
if test.err != nil && err == nil {
t.Errorf("TestGood test %d failed expected an error and got none", i)
t.Errorf("TestAddAddressByIP test %d failed expected an error and got none", i)
continue
}
if test.err == nil && err != nil {
t.Errorf("TestGood test %d failed expected no error and got one", i)
t.Errorf("TestAddAddressByIP test %d failed expected no error and got one", i)
continue
}
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("TestGood test %d failed got %v, want %v", i,
t.Errorf("TestAddAddressByIP test %d failed got %v, want %v", i,
reflect.TypeOf(err), reflect.TypeOf(test.err))
continue
}
@@ -170,15 +168,14 @@ func TestAddAddressByIP(t *testing.T) {
}
func TestAddLocalAddress(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer activeConfigPatch.Unpatch()
defer config.SetActiveConfig(originalActiveCfg)
var tests = []struct {
address wire.NetAddress
@@ -233,15 +230,14 @@ func TestAddLocalAddress(t *testing.T) {
}
func TestAttempt(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer activeConfigPatch.Unpatch()
defer config.SetActiveConfig(originalActiveCfg)
n := New("testattempt", lookupFunc, nil)
@@ -265,15 +261,14 @@ func TestAttempt(t *testing.T) {
}
func TestConnected(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer activeConfigPatch.Unpatch()
defer config.SetActiveConfig(originalActiveCfg)
n := New("testconnected", lookupFunc, nil)
@@ -295,15 +290,14 @@ func TestConnected(t *testing.T) {
}
func TestNeedMoreAddresses(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer activeConfigPatch.Unpatch()
defer config.SetActiveConfig(originalActiveCfg)
n := New("testneedmoreaddresses", lookupFunc, nil)
addrsToAdd := 1500
@@ -337,15 +331,14 @@ func TestNeedMoreAddresses(t *testing.T) {
}
func TestGood(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer activeConfigPatch.Unpatch()
defer config.SetActiveConfig(originalActiveCfg)
n := New("testgood", lookupFunc, nil)
addrsToAdd := 64 * 64
@@ -394,15 +387,14 @@ func TestGood(t *testing.T) {
}
func TestGoodChangeSubnetworkID(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer activeConfigPatch.Unpatch()
defer config.SetActiveConfig(originalActiveCfg)
n := New("test_good_change_subnetwork_id", lookupFunc, nil)
addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
@@ -473,15 +465,14 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
}
func TestGetAddress(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer activeConfigPatch.Unpatch()
defer config.SetActiveConfig(originalActiveCfg)
localSubnetworkID := &subnetworkid.SubnetworkID{0xff}
n := New("testgetaddress", lookupFunc, localSubnetworkID)
@@ -555,15 +546,14 @@ func TestGetAddress(t *testing.T) {
}
func TestGetBestLocalAddress(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer activeConfigPatch.Unpatch()
defer config.SetActiveConfig(originalActiveCfg)
localAddrs := []wire.NetAddress{
{IP: net.ParseIP("192.168.0.100")},
@@ -657,7 +647,6 @@ func TestGetBestLocalAddress(t *testing.T) {
// Add a Tor generated IP address
localAddr = wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")}
amgr.AddLocalAddress(&localAddr, ManualPrio)
// Test against want3
for x, test := range tests {
got := amgr.GetBestLocalAddress(&test.remoteAddr)

View File

@@ -1,17 +0,0 @@
#!/bin/sh
# This script uses gocov to generate a test coverage report.
# The gocov tool my be obtained with the following command:
# go get github.com/axw/gocov/gocov
#
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
# Check for gocov.
type gocov >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo >&2 "This script requires the gocov tool."
echo >&2 "You may obtain it with the following command:"
echo >&2 "go get github.com/axw/gocov/gocov"
exit 1
fi
gocov test | gocov report

View File

@@ -1,38 +1,34 @@
// Copyright (c) 2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package addrmgr implements concurrency safe Bitcoin address manager.
Package addrmgr implements concurrency safe Kaspa address manager.
Address Manager Overview
In order maintain the peer-to-peer Bitcoin network, there needs to be a source
of addresses to connect to as nodes come and go. The Bitcoin protocol provides
In order maintain the peer-to-peer Kaspa network, there needs to be a source
of addresses to connect to as nodes come and go. The Kaspa protocol provides
the getaddr and addr messages to allow peers to communicate known addresses with
each other. However, there needs to a mechanism to store those results and
select peers from them. It is also important to note that remote peers can't
each other. However, there needs to a mechanism to store those results and
select peers from them. It is also important to note that remote peers can't
be trusted to send valid peers nor attempt to provide you with only peers they
control with malicious intent.
With that in mind, this package provides a concurrency safe address manager for
caching and selecting peers in a non-deterministic manner. The general idea is
caching and selecting peers in a non-deterministic manner. The general idea is
the caller adds addresses to the address manager and notifies it when addresses
are connected, known good, and attempted. The caller also requests addresses as
are connected, known good, and attempted. The caller also requests addresses as
it needs them.
The address manager internally segregates the addresses into groups and
non-deterministically selects groups in a cryptographically random manner. This
non-deterministically selects groups in a cryptographically random manner. This
reduce the chances multiple addresses from the same nets are selected which
generally helps provide greater peer diversity, and perhaps more importantly,
drastically reduces the chances an attacker is able to coerce your peer into
only connecting to nodes they control.
The address manager also understands routability and Tor addresses and tries
hard to only return routable addresses. In addition, it uses the information
provided by the caller about connected, known good, and attempted addresses to
periodically purge peers which no longer appear to be good peers as well as
bias the selection toward known good peers. The general idea is to make a best
effort at only providing usable addresses.
The address manager also understands routability and tries hard to only return
routable addresses. In addition, it uses the information provided by the caller
about connected, known good, and attempted addresses to periodically purge
peers which no longer appear to be good peers as well as bias the selection
toward known good peers. The general idea is to make a best effort at only
providing usable addresses.
*/
package addrmgr

View File

@@ -7,7 +7,7 @@ package addrmgr
import (
"time"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/wire"
)
func TstKnownAddressIsBad(ka *KnownAddress) bool {

View File

@@ -7,9 +7,9 @@ package addrmgr
import (
"time"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/wire"
)
// KnownAddress tracks information about a known network address that is used
@@ -41,7 +41,7 @@ func (ka *KnownAddress) LastAttempt() time.Time {
return ka.lastattempt
}
// chance returns the selection probability for a known address. The priority
// chance returns the selection probability for a known address. The priority
// depends upon how recently the address has been seen, how recently it was last
// attempted and how often attempts to connect to it have failed.
func (ka *KnownAddress) chance() float64 {

View File

@@ -9,8 +9,8 @@ import (
"testing"
"time"
"github.com/daglabs/btcd/addrmgr"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/wire"
)
func TestChance(t *testing.T) {

View File

@@ -5,9 +5,9 @@
package addrmgr
import (
"github.com/daglabs/btcd/logger"
"github.com/daglabs/btcd/util/panics"
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.ADXR)
var spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -5,12 +5,11 @@
package addrmgr
import (
"fmt"
"net"
"github.com/daglabs/btcd/config"
"github.com/kaspanet/kaspad/config"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/wire"
)
var (
@@ -73,19 +72,6 @@ var (
// rfc6598Net specifies the IPv4 block as defined by RFC6598 (100.64.0.0/10)
rfc6598Net = ipNet("100.64.0.0", 10, 32)
// onionCatNet defines the IPv6 address block used to support Tor.
// bitcoind encodes a .onion address as a 16 byte number by decoding the
// address prior to the .onion (i.e. the key hash) base32 into a ten
// byte number. It then stores the first 6 bytes of the address as
// 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43.
//
// This is the same range used by OnionCat, which is part part of the
// RFC4193 unique local IPv6 range.
//
// In summary the format is:
// { magic 6 bytes, 10 bytes base32 decode of key hash }
onionCatNet = ipNet("fd87:d87e:eb43::", 48, 128)
// zero4Net defines the IPv4 address block for address staring with 0
// (0.0.0.0/8).
zero4Net = ipNet("0.0.0.0", 8, 32)
@@ -111,14 +97,6 @@ func IsLocal(na *wire.NetAddress) bool {
return na.IP.IsLoopback() || zero4Net.Contains(na.IP)
}
// IsOnionCatTor returns whether or not the passed address is in the IPv6 range
// used by bitcoin to support Tor (fd87:d87e:eb43::/48). Note that this range
// is the same range used by OnionCat, which is part of the RFC4193 unique local
// IPv6 range.
func IsOnionCatTor(na *wire.NetAddress) bool {
return onionCatNet.Contains(na.IP)
}
// IsRFC1918 returns whether or not the passed address is part of the IPv4
// private network address space as defined by RFC1918 (10.0.0.0/8,
// 172.16.0.0/12, or 192.168.0.0/16).
@@ -210,7 +188,7 @@ func IsRFC6598(na *wire.NetAddress) bool {
return rfc6598Net.Contains(na.IP)
}
// IsValid returns whether or not the passed address is valid. The address is
// IsValid returns whether or not the passed address is valid. The address is
// considered invalid under the following circumstances:
// IPv4: It is either a zero or all bits set address.
// IPv6: It is either a zero or RFC3849 documentation address.
@@ -222,7 +200,7 @@ func IsValid(na *wire.NetAddress) bool {
}
// IsRoutable returns whether or not the passed address is routable over
// the public internet. This is true as long as the address is valid and is not
// the public internet. This is true as long as the address is valid and is not
// in any reserved ranges.
func IsRoutable(na *wire.NetAddress) bool {
if config.ActiveConfig().NetParams().AcceptUnroutable {
@@ -232,13 +210,12 @@ func IsRoutable(na *wire.NetAddress) bool {
return IsValid(na) && !(IsRFC1918(na) || IsRFC2544(na) ||
IsRFC3927(na) || IsRFC4862(na) || IsRFC3849(na) ||
IsRFC4843(na) || IsRFC5737(na) || IsRFC6598(na) ||
IsLocal(na) || (IsRFC4193(na) && !IsOnionCatTor(na)))
IsLocal(na) || (IsRFC4193(na)))
}
// GroupKey returns a string representing the network group an address is part
// of. This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string
// "local" for a local address, the string "tor:key" where key is the /4 of the
// onion address for Tor address, and the string "unroutable" for an unroutable
// of. This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string
// "local" for a local address, and the string "unroutable" for an unroutable
// address.
func GroupKey(na *wire.NetAddress) string {
if IsLocal(na) {
@@ -270,14 +247,10 @@ func GroupKey(na *wire.NetAddress) string {
}
return ip.Mask(net.CIDRMask(16, 32)).String()
}
if IsOnionCatTor(na) {
// group is keyed off the first 4 bits of the actual onion key.
return fmt.Sprintf("tor:%d", na.IP[6]&((1<<4)-1))
}
// OK, so now we know ourselves to be a IPv6 address.
// bitcoind uses /32 for everything, except for Hurricane Electric's
// (he.net) IP range, which it uses /36 for.
// We use /32 for everything, except for Hurricane Electric's
// (he.net) IP range, which we use /36 for.
bits := 32
if heNet.Contains(na.IP) {
bits = 36

View File

@@ -5,28 +5,26 @@
package addrmgr_test
import (
"bou.ke/monkey"
"github.com/daglabs/btcd/config"
"github.com/daglabs/btcd/dagconfig"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"net"
"testing"
"github.com/daglabs/btcd/addrmgr"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/wire"
)
// TestIPTypes ensures the various functions which determine the type of an IP
// address based on RFCs work as intended.
func TestIPTypes(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer activeConfigPatch.Unpatch()
defer config.SetActiveConfig(originalActiveCfg)
type ipTest struct {
in wire.NetAddress
@@ -52,7 +50,7 @@ func TestIPTypes(t *testing.T) {
rfc4193, rfc4380, rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598,
local, valid, routable bool) ipTest {
nip := net.ParseIP(ip)
na := *wire.NewNetAddressIPPort(nip, 8333, wire.SFNodeNetwork)
na := *wire.NewNetAddressIPPort(nip, 16111, wire.SFNodeNetwork)
test := ipTest{na, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964, rfc4193, rfc4380,
rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598, local, valid, routable}
return test
@@ -158,15 +156,14 @@ func TestIPTypes(t *testing.T) {
// TestGroupKey tests the GroupKey function to ensure it properly groups various
// IP addresses.
func TestGroupKey(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer activeConfigPatch.Unpatch()
defer config.SetActiveConfig(originalActiveCfg)
tests := []struct {
name string
@@ -202,9 +199,9 @@ func TestGroupKey(t *testing.T) {
{name: "ipv6 rfc6145 translated ipv4", ip: "::ffff:0:0c01:0203", expected: "12.1.0.0"},
// Tor.
{name: "ipv6 tor onioncat", ip: "fd87:d87e:eb43:1234::5678", expected: "tor:2"},
{name: "ipv6 tor onioncat 2", ip: "fd87:d87e:eb43:1245::6789", expected: "tor:2"},
{name: "ipv6 tor onioncat 3", ip: "fd87:d87e:eb43:1345::6789", expected: "tor:3"},
{name: "ipv6 tor onioncat", ip: "fd87:d87e:eb43:1234::5678", expected: "unroutable"},
{name: "ipv6 tor onioncat 2", ip: "fd87:d87e:eb43:1245::6789", expected: "unroutable"},
{name: "ipv6 tor onioncat 3", ip: "fd87:d87e:eb43:1345::6789", expected: "unroutable"},
// IPv6 normal.
{name: "ipv6 normal", ip: "2602:100::1", expected: "2602:100::"},

View File

@@ -1,62 +0,0 @@
github.com/conformal/btcd/addrmgr/network.go GroupKey 100.00% (23/23)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.reset 100.00% (6/6)
github.com/conformal/btcd/addrmgr/network.go IsRFC5737 100.00% (4/4)
github.com/conformal/btcd/addrmgr/network.go IsRFC1918 100.00% (4/4)
github.com/conformal/btcd/addrmgr/addrmanager.go New 100.00% (3/3)
github.com/conformal/btcd/addrmgr/addrmanager.go NetAddressKey 100.00% (2/2)
github.com/conformal/btcd/addrmgr/network.go IsRFC4862 100.00% (1/1)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.numAddresses 100.00% (1/1)
github.com/conformal/btcd/addrmgr/log.go init 100.00% (1/1)
github.com/conformal/btcd/addrmgr/log.go DisableLog 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go ipNet 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsIPv4 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsLocal 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsOnionCatTor 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC2544 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC3849 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC3927 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC3964 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC4193 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC4380 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC4843 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC6052 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC6145 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC6598 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsValid 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRoutable 100.00% (1/1)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetBestLocalAddress 94.74% (18/19)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddLocalAddress 90.91% (10/11)
github.com/conformal/btcd/addrmgr/addrmanager.go getReachabilityFrom 51.52% (17/33)
github.com/conformal/btcd/addrmgr/addrmanager.go ipString 50.00% (2/4)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetAddress 9.30% (4/43)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.deserializePeers 0.00% (0/50)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Good 0.00% (0/44)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.savePeers 0.00% (0/39)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.updateAddress 0.00% (0/30)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.expireNew 0.00% (0/22)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddressCache 0.00% (0/16)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.HostToNetAddress 0.00% (0/15)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getNewBucket 0.00% (0/15)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddressByIP 0.00% (0/14)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getTriedBucket 0.00% (0/14)
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.chance 0.00% (0/13)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.loadPeers 0.00% (0/11)
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.isBad 0.00% (0/11)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Connected 0.00% (0/10)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.addressHandler 0.00% (0/9)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.pickTried 0.00% (0/8)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.DeserializeNetAddress 0.00% (0/7)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Stop 0.00% (0/7)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Attempt 0.00% (0/7)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Start 0.00% (0/6)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddresses 0.00% (0/4)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NeedMoreAddresses 0.00% (0/3)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NumAddresses 0.00% (0/3)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddress 0.00% (0/3)
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.LastAttempt 0.00% (0/1)
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.NetAddress 0.00% (0/1)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.find 0.00% (0/1)
github.com/conformal/btcd/addrmgr/log.go UseLogger 0.00% (0/1)
github.com/conformal/btcd/addrmgr --------------------------------- 21.04% (113/537)

View File

@@ -1,6 +0,0 @@
package apimodels
// RawTransaction represents a raw transaction posted to the API server
type RawTransaction struct {
RawTransaction string `json:"rawTransaction"`
}

View File

@@ -1,64 +0,0 @@
package apimodels
// TransactionResponse is a json representation of a transaction
type TransactionResponse struct {
TransactionHash string `json:"transactionHash"`
TransactionID string `json:"transactionId"`
AcceptingBlockHash *string `json:"acceptingBlockHash,omitempty"`
AcceptingBlockBlueScore *uint64 `json:"acceptingBlockBlueScore,omitempty"`
SubnetworkID string `json:"subnetworkId"`
LockTime uint64 `json:"lockTime"`
Gas uint64 `json:"gas,omitempty"`
PayloadHash string `json:"payloadHash,omitempty"`
Payload string `json:"payload,omitempty"`
Inputs []*TransactionInputResponse `json:"inputs"`
Outputs []*TransactionOutputResponse `json:"outputs"`
Mass uint64 `json:"mass"`
}
// TransactionOutputResponse is a json representation of a transaction output
type TransactionOutputResponse struct {
TransactionID string `json:"transactionId,omitempty"`
Value uint64 `json:"value"`
ScriptPubKey string `json:"scriptPubKey"`
Address string `json:"address,omitempty"`
AcceptingBlockHash *string `json:"acceptingBlockHash,omitempty"`
AcceptingBlockBlueScore uint64 `json:"acceptingBlockBlueScore,omitempty"`
Index uint32 `json:"index"`
IsCoinbase *bool `json:"isCoinbase,omitempty"`
IsSpendable *bool `json:"isSpendable,omitempty"`
Confirmations *uint64 `json:"confirmations,omitempty"`
}
// TransactionInputResponse is a json representation of a transaction input
type TransactionInputResponse struct {
TransactionID string `json:"transactionId,omitempty"`
PreviousTransactionID string `json:"previousTransactionId"`
PreviousTransactionOutputIndex uint32 `json:"previousTransactionOutputIndex"`
SignatureScript string `json:"signatureScript"`
Sequence uint64 `json:"sequence"`
Address string `json:"address"`
}
// BlockResponse is a json representation of a block
type BlockResponse struct {
BlockHash string `json:"blockHash"`
Version int32 `json:"version"`
HashMerkleRoot string `json:"hashMerkleRoot"`
AcceptedIDMerkleRoot string `json:"acceptedIDMerkleRoot"`
UTXOCommitment string `json:"utxoCommitment"`
Timestamp uint64 `json:"timestamp"`
Bits uint32 `json:"bits"`
Nonce uint64 `json:"nonce"`
AcceptingBlockHash *string `json:"acceptingBlockHash"`
BlueScore uint64 `json:"blueScore"`
IsChainBlock bool `json:"isChainBlock"`
Mass uint64 `json:"mass"`
}
// FeeEstimateResponse is a json representation of a fee estimate
type FeeEstimateResponse struct {
HighPriority float64 `json:"highPriority"`
NormalPriority float64 `json:"normalPriority"`
LowPriority float64 `json:"lowPriority"`
}

View File

@@ -1,98 +0,0 @@
package config
import (
"github.com/daglabs/btcd/apiserver/logger"
"github.com/daglabs/btcd/config"
"github.com/daglabs/btcd/util"
"github.com/jessevdk/go-flags"
"github.com/pkg/errors"
"path/filepath"
)
const (
defaultLogFilename = "apiserver.log"
defaultErrLogFilename = "apiserver_err.log"
)
var (
// Default configuration options
defaultLogDir = util.AppDataDir("apiserver", false)
defaultDBAddress = "localhost:3306"
defaultHTTPListen = "0.0.0.0:8080"
activeConfig *Config
)
// ActiveConfig returns the active configuration struct
func ActiveConfig() *Config {
return activeConfig
}
// Config defines the configuration options for the API server.
type Config struct {
LogDir string `long:"logdir" description:"Directory to log output."`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
DBAddress string `long:"dbaddress" description:"Database address"`
DBUser string `long:"dbuser" description:"Database user" required:"true"`
DBPassword string `long:"dbpass" description:"Database password" required:"true"`
DBName string `long:"dbname" description:"Database name" required:"true"`
HTTPListen string `long:"listen" description:"HTTP address to listen on (default: 0.0.0.0:8080)"`
Migrate bool `long:"migrate" description:"Migrate the database to the latest version. The server will not start when using this flag."`
MQTTBrokerAddress string `long:"mqttaddress" description:"MQTT broker address" required:"false"`
MQTTUser string `long:"mqttuser" description:"MQTT server user" required:"false"`
MQTTPassword string `long:"mqttpass" description:"MQTT server password" required:"false"`
config.NetworkFlags
}
// Parse parses the CLI arguments and returns a config struct.
func Parse() (*Config, error) {
activeConfig = &Config{
LogDir: defaultLogDir,
DBAddress: defaultDBAddress,
HTTPListen: defaultHTTPListen,
}
parser := flags.NewParser(activeConfig, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return nil, err
}
if !activeConfig.Migrate {
if activeConfig.RPCUser == "" {
return nil, errors.New("--rpcuser is required if --migrate flag is not used")
}
if activeConfig.RPCPassword == "" {
return nil, errors.New("--rpcpass is required if --migrate flag is not used")
}
if activeConfig.RPCServer == "" {
return nil, errors.New("--rpcserver is required if --migrate flag is not used")
}
}
if activeConfig.RPCCert == "" && !activeConfig.DisableTLS {
return nil, errors.New("--notls has to be disabled if --cert is used")
}
if activeConfig.RPCCert != "" && activeConfig.DisableTLS {
return nil, errors.New("--cert should be omitted if --notls is used")
}
if (activeConfig.MQTTBrokerAddress != "" || activeConfig.MQTTUser != "" || activeConfig.MQTTPassword != "") &&
(activeConfig.MQTTBrokerAddress == "" || activeConfig.MQTTUser == "" || activeConfig.MQTTPassword == "") {
return nil, errors.New("--mqttaddress, --mqttuser, and --mqttpass must be passed all together")
}
err = activeConfig.ResolveNetwork(parser)
if err != nil {
return nil, err
}
logFile := filepath.Join(activeConfig.LogDir, defaultLogFilename)
errLogFile := filepath.Join(activeConfig.LogDir, defaultErrLogFilename)
logger.InitLog(logFile, errLogFile)
return activeConfig, nil
}

View File

@@ -1,108 +0,0 @@
package controllers
import (
"encoding/hex"
"net/http"
"github.com/daglabs/btcd/apiserver/apimodels"
"github.com/daglabs/btcd/apiserver/dbmodels"
"github.com/daglabs/btcd/httpserverutils"
"github.com/pkg/errors"
"github.com/daglabs/btcd/apiserver/database"
"github.com/daglabs/btcd/util/daghash"
)
const (
// OrderAscending is parameter that can be used
// in a get list handler to get a list ordered
// in an ascending order.
OrderAscending = "asc"
// OrderDescending is parameter that can be used
// in a get list handler to get a list ordered
// in an ascending order.
OrderDescending = "desc"
)
const maxGetBlocksLimit = 100
// GetBlockByHashHandler returns a block by a given hash.
func GetBlockByHashHandler(blockHash string) (interface{}, error) {
if bytes, err := hex.DecodeString(blockHash); err != nil || len(bytes) != daghash.HashSize {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Errorf("The given block hash is not a hex-encoded %d-byte hash.", daghash.HashSize))
}
db, err := database.DB()
if err != nil {
return nil, err
}
block := &dbmodels.Block{}
dbResult := db.Where(&dbmodels.Block{BlockHash: blockHash}).Preload("AcceptingBlock").First(block)
dbErrors := dbResult.GetErrors()
if httpserverutils.IsDBRecordNotFoundError(dbErrors) {
return nil, httpserverutils.NewHandlerError(http.StatusNotFound, errors.New("No block with the given block hash was found"))
}
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:",
dbResult.GetErrors())
}
return convertBlockModelToBlockResponse(block), nil
}
// GetBlocksHandler searches for all blocks
func GetBlocksHandler(order string, skip uint64, limit uint64) (interface{}, error) {
if limit > maxGetBlocksLimit {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Errorf("The maximum allowed value for the limit is %d", maxGetBlocksLimit))
}
blocks := []*dbmodels.Block{}
db, err := database.DB()
if err != nil {
return nil, err
}
query := db.
Limit(limit).
Offset(skip).
Preload("AcceptingBlock")
if order == OrderAscending {
query = query.Order("`id` ASC")
} else if order == OrderDescending {
query = query.Order("`id` DESC")
} else {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Errorf("'%s' is not a valid order", order))
}
query.Find(&blocks)
blockResponses := make([]*apimodels.BlockResponse, len(blocks))
for i, block := range blocks {
blockResponses[i] = convertBlockModelToBlockResponse(block)
}
return blockResponses, nil
}
// GetAcceptedTransactionIDsByBlockHashHandler returns an array of transaction IDs for a given block hash
func GetAcceptedTransactionIDsByBlockHashHandler(blockHash *daghash.Hash) ([]string, error) {
db, err := database.DB()
if err != nil {
return nil, err
}
var transactions []dbmodels.Transaction
dbResult := db.
Joins("LEFT JOIN `blocks` ON `blocks`.`id` = `transactions`.`accepting_block_id`").
Where("`blocks`.`block_hash` = ?", blockHash).
Find(&transactions)
dbErrors := dbResult.GetErrors()
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Failed to find transactions: ", dbErrors)
}
result := make([]string, len(transactions))
for _, transaction := range transactions {
result = append(result, transaction.TransactionID)
}
return result, nil
}

View File

@@ -1,65 +0,0 @@
package controllers
import (
"encoding/hex"
"github.com/daglabs/btcd/apiserver/apimodels"
"github.com/daglabs/btcd/apiserver/dbmodels"
"github.com/daglabs/btcd/btcjson"
)
func convertTxDBModelToTxResponse(tx *dbmodels.Transaction) *apimodels.TransactionResponse {
txRes := &apimodels.TransactionResponse{
TransactionHash: tx.TransactionHash,
TransactionID: tx.TransactionID,
SubnetworkID: tx.Subnetwork.SubnetworkID,
LockTime: tx.LockTime,
Gas: tx.Gas,
PayloadHash: tx.PayloadHash,
Payload: hex.EncodeToString(tx.Payload),
Inputs: make([]*apimodels.TransactionInputResponse, len(tx.TransactionInputs)),
Outputs: make([]*apimodels.TransactionOutputResponse, len(tx.TransactionOutputs)),
Mass: tx.Mass,
}
if tx.AcceptingBlock != nil {
txRes.AcceptingBlockHash = &tx.AcceptingBlock.BlockHash
txRes.AcceptingBlockBlueScore = &tx.AcceptingBlock.BlueScore
}
for i, txOut := range tx.TransactionOutputs {
txRes.Outputs[i] = &apimodels.TransactionOutputResponse{
Value: txOut.Value,
ScriptPubKey: hex.EncodeToString(txOut.ScriptPubKey),
Address: txOut.Address.Address,
Index: txOut.Index,
}
}
for i, txIn := range tx.TransactionInputs {
txRes.Inputs[i] = &apimodels.TransactionInputResponse{
PreviousTransactionID: txIn.PreviousTransactionOutput.Transaction.TransactionID,
PreviousTransactionOutputIndex: txIn.PreviousTransactionOutput.Index,
SignatureScript: hex.EncodeToString(txIn.SignatureScript),
Sequence: txIn.Sequence,
Address: txIn.PreviousTransactionOutput.Address.Address,
}
}
return txRes
}
func convertBlockModelToBlockResponse(block *dbmodels.Block) *apimodels.BlockResponse {
blockRes := &apimodels.BlockResponse{
BlockHash: block.BlockHash,
Version: block.Version,
HashMerkleRoot: block.HashMerkleRoot,
AcceptedIDMerkleRoot: block.AcceptedIDMerkleRoot,
UTXOCommitment: block.UTXOCommitment,
Timestamp: uint64(block.Timestamp.Unix()),
Bits: block.Bits,
Nonce: block.Nonce,
BlueScore: block.BlueScore,
IsChainBlock: block.IsChainBlock,
Mass: block.Mass,
}
if block.AcceptingBlock != nil {
blockRes.AcceptingBlockHash = btcjson.String(block.AcceptingBlock.BlockHash)
}
return blockRes
}

View File

@@ -1,15 +0,0 @@
package controllers
import (
"github.com/daglabs/btcd/apiserver/apimodels"
)
// GetFeeEstimatesHandler returns the fee estimates for different priorities
// for accepting a transaction in the DAG.
func GetFeeEstimatesHandler() (interface{}, error) {
return &apimodels.FeeEstimateResponse{
HighPriority: 3,
NormalPriority: 2,
LowPriority: 1,
}, nil
}

View File

@@ -1,312 +0,0 @@
package controllers
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"github.com/daglabs/btcd/util"
"net/http"
"github.com/daglabs/btcd/apiserver/apimodels"
"github.com/daglabs/btcd/apiserver/config"
"github.com/daglabs/btcd/apiserver/dbmodels"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/httpserverutils"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/pkg/errors"
"github.com/daglabs/btcd/apiserver/database"
"github.com/daglabs/btcd/apiserver/jsonrpc"
"github.com/daglabs/btcd/btcjson"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"github.com/jinzhu/gorm"
)
const maxGetTransactionsLimit = 1000
// GetTransactionByIDHandler returns a transaction by a given transaction ID.
func GetTransactionByIDHandler(txID string) (interface{}, error) {
if bytes, err := hex.DecodeString(txID); err != nil || len(bytes) != daghash.TxIDSize {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Errorf("The given txid is not a hex-encoded %d-byte hash.", daghash.TxIDSize))
}
db, err := database.DB()
if err != nil {
return nil, err
}
tx := &dbmodels.Transaction{}
query := db.Where(&dbmodels.Transaction{TransactionID: txID})
dbResult := addTxPreloadedFields(query).First(&tx)
dbErrors := dbResult.GetErrors()
if httpserverutils.IsDBRecordNotFoundError(dbErrors) {
return nil, httpserverutils.NewHandlerError(http.StatusNotFound, errors.New("No transaction with the given txid was found"))
}
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transaction from the database:", dbErrors)
}
return convertTxDBModelToTxResponse(tx), nil
}
// GetTransactionByHashHandler returns a transaction by a given transaction hash.
func GetTransactionByHashHandler(txHash string) (interface{}, error) {
if bytes, err := hex.DecodeString(txHash); err != nil || len(bytes) != daghash.HashSize {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Errorf("The given txhash is not a hex-encoded %d-byte hash.", daghash.HashSize))
}
db, err := database.DB()
if err != nil {
return nil, err
}
tx := &dbmodels.Transaction{}
query := db.Where(&dbmodels.Transaction{TransactionHash: txHash})
dbResult := addTxPreloadedFields(query).First(&tx)
dbErrors := dbResult.GetErrors()
if httpserverutils.IsDBRecordNotFoundError(dbErrors) {
return nil, httpserverutils.NewHandlerError(http.StatusNotFound, errors.Errorf("No transaction with the given txhash was found."))
}
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transaction from the database:", dbErrors)
}
return convertTxDBModelToTxResponse(tx), nil
}
// GetTransactionsByAddressHandler searches for all transactions
// where the given address is either an input or an output.
func GetTransactionsByAddressHandler(address string, skip uint64, limit uint64) (interface{}, error) {
if limit > maxGetTransactionsLimit {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Errorf("The maximum allowed value for the limit is %d", maxGetTransactionsLimit))
}
db, err := database.DB()
if err != nil {
return nil, err
}
txs := []*dbmodels.Transaction{}
query := joinTxInputsTxOutputsAndAddresses(db).
Where("`out_addresses`.`address` = ?", address).
Or("`in_addresses`.`address` = ?", address).
Limit(limit).
Offset(skip).
Order("`transactions`.`id` ASC")
dbResult := addTxPreloadedFields(query).Find(&txs)
dbErrors := dbResult.GetErrors()
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:", dbErrors)
}
txResponses := make([]*apimodels.TransactionResponse, len(txs))
for i, tx := range txs {
txResponses[i] = convertTxDBModelToTxResponse(tx)
}
return txResponses, nil
}
func fetchSelectedTip() (*dbmodels.Block, error) {
db, err := database.DB()
if err != nil {
return nil, err
}
block := &dbmodels.Block{}
dbResult := db.Order("blue_score DESC").
Where(&dbmodels.Block{IsChainBlock: true}).
First(block)
dbErrors := dbResult.GetErrors()
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:", dbErrors)
}
return block, nil
}
func areTxsInBlock(blockID uint64, txIDs []uint64) (map[uint64]bool, error) {
db, err := database.DB()
if err != nil {
return nil, err
}
transactionBlocks := []*dbmodels.TransactionBlock{}
dbErrors := db.
Where(&dbmodels.TransactionBlock{BlockID: blockID}).
Where("transaction_id in (?)", txIDs).
Find(&transactionBlocks).GetErrors()
if len(dbErrors) > 0 {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading UTXOs from the database:", dbErrors)
}
isInBlock := make(map[uint64]bool)
for _, transactionBlock := range transactionBlocks {
isInBlock[transactionBlock.TransactionID] = true
}
return isInBlock, nil
}
// GetUTXOsByAddressHandler searches for all UTXOs that belong to a certain address.
func GetUTXOsByAddressHandler(address string) (interface{}, error) {
_, err := util.DecodeAddress(address, config.ActiveConfig().ActiveNetParams.Prefix)
if err != nil {
return nil, httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
errors.Wrap(err, "error decoding address"),
"The given address is not a well-formatted P2PKH or P2SH address.")
}
db, err := database.DB()
if err != nil {
return nil, err
}
var transactionOutputs []*dbmodels.TransactionOutput
dbErrors := db.
Joins("LEFT JOIN `addresses` ON `addresses`.`id` = `transaction_outputs`.`address_id`").
Where("`addresses`.`address` = ? AND `transaction_outputs`.`is_spent` = 0", address).
Preload("Transaction.AcceptingBlock").
Preload("Transaction.Subnetwork").
Find(&transactionOutputs).GetErrors()
if len(dbErrors) > 0 {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading UTXOs from the database:", dbErrors)
}
nonAcceptedTxIds := make([]uint64, len(transactionOutputs))
for i, txOut := range transactionOutputs {
if txOut.Transaction.AcceptingBlock == nil {
nonAcceptedTxIds[i] = txOut.TransactionID
}
}
var selectedTip *dbmodels.Block
var isTxInSelectedTip map[uint64]bool
if len(nonAcceptedTxIds) != 0 {
selectedTip, err = fetchSelectedTip()
if err != nil {
return nil, err
}
isTxInSelectedTip, err = areTxsInBlock(selectedTip.ID, nonAcceptedTxIds)
if err != nil {
return nil, err
}
}
activeNetParams := config.ActiveConfig().NetParams()
UTXOsResponses := make([]*apimodels.TransactionOutputResponse, len(transactionOutputs))
for i, transactionOutput := range transactionOutputs {
subnetworkID := &subnetworkid.SubnetworkID{}
err := subnetworkid.Decode(subnetworkID, transactionOutput.Transaction.Subnetwork.SubnetworkID)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("Couldn't decode subnetwork id %s", transactionOutput.Transaction.Subnetwork.SubnetworkID))
}
var acceptingBlockHash *string
var confirmations uint64
acceptingBlockBlueScore := blockdag.UnacceptedBlueScore
if isTxInSelectedTip[transactionOutput.ID] {
confirmations = 1
} else if transactionOutput.Transaction.AcceptingBlock != nil {
acceptingBlockHash = btcjson.String(transactionOutput.Transaction.AcceptingBlock.BlockHash)
acceptingBlockBlueScore = transactionOutput.Transaction.AcceptingBlock.BlueScore
confirmations = selectedTip.BlueScore - acceptingBlockBlueScore + 2
}
isCoinbase := subnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)
UTXOsResponses[i] = &apimodels.TransactionOutputResponse{
TransactionID: transactionOutput.Transaction.TransactionID,
Value: transactionOutput.Value,
ScriptPubKey: hex.EncodeToString(transactionOutput.ScriptPubKey),
AcceptingBlockHash: acceptingBlockHash,
AcceptingBlockBlueScore: acceptingBlockBlueScore,
Index: transactionOutput.Index,
IsCoinbase: btcjson.Bool(isCoinbase),
Confirmations: btcjson.Uint64(confirmations),
IsSpendable: btcjson.Bool(!isCoinbase || confirmations >= activeNetParams.BlockCoinbaseMaturity),
}
}
return UTXOsResponses, nil
}
func joinTxInputsTxOutputsAndAddresses(query *gorm.DB) *gorm.DB {
return query.
Joins("LEFT JOIN `transaction_outputs` ON `transaction_outputs`.`transaction_id` = `transactions`.`id`").
Joins("LEFT JOIN `addresses` AS `out_addresses` ON `out_addresses`.`id` = `transaction_outputs`.`address_id`").
Joins("LEFT JOIN `transaction_inputs` ON `transaction_inputs`.`transaction_id` = `transactions`.`id`").
Joins("LEFT JOIN `transaction_outputs` AS `inputs_outs` ON `inputs_outs`.`id` = `transaction_inputs`.`previous_transaction_output_id`").
Joins("LEFT JOIN `addresses` AS `in_addresses` ON `in_addresses`.`id` = `inputs_outs`.`address_id`")
}
func addTxPreloadedFields(query *gorm.DB) *gorm.DB {
return query.Preload("AcceptingBlock").
Preload("Subnetwork").
Preload("TransactionOutputs").
Preload("TransactionOutputs.Address").
Preload("TransactionInputs.PreviousTransactionOutput.Transaction").
Preload("TransactionInputs.PreviousTransactionOutput.Address")
}
// PostTransaction forwards a raw transaction to the JSON-RPC API server
func PostTransaction(requestBody []byte) error {
client, err := jsonrpc.GetClient()
if err != nil {
return err
}
rawTx := &apimodels.RawTransaction{}
err = json.Unmarshal(requestBody, rawTx)
if err != nil {
return httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
errors.Wrap(err, "Error unmarshalling request body"),
"The request body is not json-formatted")
}
txBytes, err := hex.DecodeString(rawTx.RawTransaction)
if err != nil {
return httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
errors.Wrap(err, "Error decoding hex raw transaction"),
"The raw transaction is not a hex-encoded transaction")
}
txReader := bytes.NewReader(txBytes)
tx := &wire.MsgTx{}
err = tx.BtcDecode(txReader, 0)
if err != nil {
return httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
errors.Wrap(err, "Error decoding raw transaction"),
"Error decoding raw transaction")
}
_, err = client.SendRawTransaction(tx, true)
if err != nil {
if rpcErr, ok := err.(*btcjson.RPCError); ok && rpcErr.Code == btcjson.ErrRPCVerify {
return httpserverutils.NewHandlerError(http.StatusInternalServerError, err)
}
return err
}
return nil
}
// GetTransactionsByIDsHandler finds transactions by the given transactionIds.
func GetTransactionsByIDsHandler(transactionIds []string) ([]*apimodels.TransactionResponse, error) {
db, err := database.DB()
if err != nil {
return nil, err
}
var txs []*dbmodels.Transaction
query := joinTxInputsTxOutputsAndAddresses(db).
Where("`transactions`.`transaction_id` IN (?)", transactionIds)
dbResult := addTxPreloadedFields(query).Find(&txs)
dbErrors := dbResult.GetErrors()
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:", dbErrors)
}
txResponses := make([]*apimodels.TransactionResponse, len(txs))
for i, tx := range txs {
txResponses[i] = convertTxDBModelToTxResponse(tx)
}
return txResponses, nil
}

View File

@@ -1,142 +0,0 @@
package database
import (
nativeerrors "errors"
"fmt"
"github.com/pkg/errors"
"os"
"github.com/daglabs/btcd/apiserver/config"
"github.com/golang-migrate/migrate/v4/source"
"github.com/jinzhu/gorm"
"github.com/golang-migrate/migrate/v4"
)
// db is the API server database.
var db *gorm.DB
// DB returns a reference to the database connection
func DB() (*gorm.DB, error) {
if db == nil {
return nil, errors.New("Database is not connected")
}
return db, nil
}
type gormLogger struct{}
func (l gormLogger) Print(v ...interface{}) {
str := fmt.Sprint(v...)
log.Errorf(str)
}
// Connect connects to the database mentioned in
// config variable.
func Connect() error {
connectionString := buildConnectionString()
migrator, driver, err := openMigrator(connectionString)
if err != nil {
return err
}
isCurrent, version, err := isCurrent(migrator, driver)
if err != nil {
return errors.Errorf("Error checking whether the database is current: %s", err)
}
if !isCurrent {
return errors.Errorf("Database is not current (version %d). Please migrate"+
" the database by running the server with --migrate flag and then run it again.", version)
}
db, err = gorm.Open("mysql", connectionString)
if err != nil {
return err
}
db.SetLogger(gormLogger{})
return nil
}
// Close closes the connection to the database
func Close() error {
if db == nil {
return nil
}
err := db.Close()
db = nil
return err
}
func buildConnectionString() string {
cfg := config.ActiveConfig()
return fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True",
cfg.DBUser, cfg.DBPassword, cfg.DBAddress, cfg.DBName)
}
// isCurrent resolves whether the database is on the latest
// version of the schema.
func isCurrent(migrator *migrate.Migrate, driver source.Driver) (bool, uint, error) {
// Get the current version
version, isDirty, err := migrator.Version()
if nativeerrors.Is(err, migrate.ErrNilVersion) {
return false, 0, nil
}
if err != nil {
return false, 0, errors.WithStack(err)
}
if isDirty {
return false, 0, errors.Errorf("Database is dirty")
}
// The database is current if Next returns ErrNotExist
_, err = driver.Next(version)
if pathErr, ok := err.(*os.PathError); ok {
if pathErr.Err == os.ErrNotExist {
return true, version, nil
}
}
return false, version, err
}
func openMigrator(connectionString string) (*migrate.Migrate, source.Driver, error) {
driver, err := source.Open("file://migrations")
if err != nil {
return nil, nil, err
}
migrator, err := migrate.NewWithSourceInstance(
"migrations", driver, "mysql://"+connectionString)
if err != nil {
return nil, nil, err
}
return migrator, driver, nil
}
// Migrate database to the latest version.
func Migrate() error {
connectionString := buildConnectionString()
migrator, driver, err := openMigrator(connectionString)
if err != nil {
return err
}
isCurrent, version, err := isCurrent(migrator, driver)
if err != nil {
return errors.Errorf("Error checking whether the database is current: %s", err)
}
if isCurrent {
log.Infof("Database is already up-to-date (version %d)", version)
return nil
}
err = migrator.Up()
if err != nil {
return err
}
version, isDirty, err := migrator.Version()
if err != nil {
return err
}
if isDirty {
return errors.Errorf("error migrating database: database is dirty")
}
log.Infof("Migrated database to the latest version (version %d)", version)
return nil
}

View File

@@ -1,9 +0,0 @@
package database
import "github.com/daglabs/btcd/util/panics"
import "github.com/daglabs/btcd/apiserver/logger"
var (
log = logger.BackendLog.Logger("DTBS")
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
)

View File

@@ -1,111 +0,0 @@
package dbmodels
import (
"time"
)
// Block is the gorm model for the 'blocks' table
type Block struct {
ID uint64 `gorm:"primary_key"`
BlockHash string
AcceptingBlockID *uint64
AcceptingBlock *Block
Version int32
HashMerkleRoot string
AcceptedIDMerkleRoot string
UTXOCommitment string
Timestamp time.Time
Bits uint32
Nonce uint64
BlueScore uint64
IsChainBlock bool
Mass uint64
ParentBlocks []Block `gorm:"many2many:parent_blocks;"`
}
// ParentBlock is the gorm model for the 'parent_blocks' table
type ParentBlock struct {
BlockID uint64
Block Block
ParentBlockID uint64
ParentBlock Block
}
// RawBlock is the gorm model for the 'raw_blocks' table
type RawBlock struct {
BlockID uint64
Block Block
BlockData []byte
}
// Subnetwork is the gorm model for the 'subnetworks' table
type Subnetwork struct {
ID uint64 `gorm:"primary_key"`
SubnetworkID string
GasLimit *uint64
}
// Transaction is the gorm model for the 'transactions' table
type Transaction struct {
ID uint64 `gorm:"primary_key"`
AcceptingBlockID *uint64
AcceptingBlock *Block
TransactionHash string
TransactionID string
LockTime uint64
SubnetworkID uint64
Subnetwork Subnetwork
Gas uint64
PayloadHash string
Payload []byte
Mass uint64
Blocks []Block `gorm:"many2many:transactions_to_blocks;"`
TransactionOutputs []TransactionOutput
TransactionInputs []TransactionInput
}
// TransactionBlock is the gorm model for the 'transactions_to_blocks' table
type TransactionBlock struct {
TransactionID uint64
Transaction Transaction
BlockID uint64
Block Block
Index uint32
}
// TableName returns the table name associated to the
// TransactionBlock gorm model
func (TransactionBlock) TableName() string {
return "transactions_to_blocks"
}
// TransactionOutput is the gorm model for the 'transaction_outputs' table
type TransactionOutput struct {
ID uint64 `gorm:"primary_key"`
TransactionID uint64
Transaction Transaction
Index uint32
Value uint64
ScriptPubKey []byte
IsSpent bool
AddressID uint64
Address Address
}
// TransactionInput is the gorm model for the 'transaction_inputs' table
type TransactionInput struct {
ID uint64 `gorm:"primary_key"`
TransactionID uint64
Transaction Transaction
PreviousTransactionOutputID uint64
PreviousTransactionOutput TransactionOutput
Index uint32
SignatureScript []byte
Sequence uint64
}
// Address is the gorm model for the 'addresses' table
type Address struct {
ID uint64 `gorm:"primary_key"`
Address string
}

View File

@@ -1,28 +0,0 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
RUN mkdir -p /go/src/github.com/daglabs/btcd
WORKDIR /go/src/github.com/daglabs/btcd
RUN apk add --no-cache curl git
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY . .
RUN cd apiserver && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o apiserver .
# --- multistage docker build: stage #2: runtime image
FROM alpine
WORKDIR /app
RUN apk add --no-cache tini
COPY --from=build /go/src/github.com/daglabs/btcd/apiserver/ /app/
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/app/apiserver"]

View File

@@ -1,125 +0,0 @@
package jsonrpc
import (
"github.com/pkg/errors"
"io/ioutil"
"time"
"github.com/daglabs/btcd/apiserver/config"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/rpcclient"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/wire"
)
// Client represents a connection to the JSON-RPC API of a full node
type Client struct {
*rpcclient.Client
OnBlockAdded chan *BlockAddedMsg
OnChainChanged chan *ChainChangedMsg
}
var client *Client
// GetClient returns an instance of the JSON-RPC client, in case we have an active connection
func GetClient() (*Client, error) {
if client == nil {
return nil, errors.New("JSON-RPC is not connected")
}
return client, nil
}
// BlockAddedMsg defines the message received in onBlockAdded
type BlockAddedMsg struct {
ChainHeight uint64
Header *wire.BlockHeader
}
// ChainChangedMsg defines the message received in onChainChanged
type ChainChangedMsg struct {
RemovedChainBlockHashes []*daghash.Hash
AddedChainBlocks []*rpcclient.ChainBlock
}
// Close closes the connection to the JSON-RPC API server
func Close() {
if client == nil {
return
}
client.Disconnect()
client = nil
}
// Connect initiates a connection to the JSON-RPC API Server
func Connect() error {
cfg := config.ActiveConfig()
var cert []byte
if !cfg.DisableTLS {
var err error
cert, err = ioutil.ReadFile(cfg.RPCCert)
if err != nil {
return errors.Errorf("Error reading certificates file: %s", err)
}
}
connCfg := &rpcclient.ConnConfig{
Host: cfg.RPCServer,
Endpoint: "ws",
User: cfg.RPCUser,
Pass: cfg.RPCPassword,
DisableTLS: cfg.DisableTLS,
RequestTimeout: time.Second * 5,
}
if !cfg.DisableTLS {
connCfg.Certificates = cert
}
var err error
client, err = newClient(connCfg)
if err != nil {
return errors.Errorf("Error connecting to address %s: %s", cfg.RPCServer, err)
}
return nil
}
func newClient(connCfg *rpcclient.ConnConfig) (*Client, error) {
client = &Client{
OnBlockAdded: make(chan *BlockAddedMsg),
OnChainChanged: make(chan *ChainChangedMsg),
}
notificationHandlers := &rpcclient.NotificationHandlers{
OnFilteredBlockAdded: func(height uint64, header *wire.BlockHeader,
txs []*util.Tx) {
client.OnBlockAdded <- &BlockAddedMsg{
ChainHeight: height,
Header: header,
}
},
OnChainChanged: func(removedChainBlockHashes []*daghash.Hash,
addedChainBlocks []*rpcclient.ChainBlock) {
client.OnChainChanged <- &ChainChangedMsg{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlocks: addedChainBlocks,
}
},
}
var err error
client.Client, err = rpcclient.New(connCfg, notificationHandlers)
if err != nil {
return nil, errors.Errorf("Error connecting to address %s: %s", connCfg.Host, err)
}
if err = client.NotifyBlocks(); err != nil {
return nil, errors.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err)
}
if err = client.NotifyChainChanges(); err != nil {
return nil, errors.Errorf("Error while registering client %s for chain changes notifications: %s", client.Host(), err)
}
return client, nil
}

View File

@@ -1,11 +0,0 @@
package main
import (
"github.com/daglabs/btcd/apiserver/logger"
"github.com/daglabs/btcd/util/panics"
)
var (
log = logger.BackendLog.Logger("APIS")
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
)

View File

@@ -1,24 +0,0 @@
package logger
import (
"fmt"
"github.com/daglabs/btcd/logs"
"os"
)
// BackendLog is the logging backend used to create all subsystem loggers.
var BackendLog = logs.NewBackend()
// InitLog attaches log file and error log file to the backend log.
func InitLog(logFile, errLogFile string) {
err := BackendLog.AddLogFile(logFile, logs.LevelTrace)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err)
os.Exit(1)
}
err = BackendLog.AddLogFile(errLogFile, logs.LevelWarn)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logs.LevelWarn, err)
os.Exit(1)
}
}

View File

@@ -1,81 +0,0 @@
package main
import (
"fmt"
"github.com/daglabs/btcd/apiserver/mqtt"
"github.com/pkg/errors"
"os"
"github.com/daglabs/btcd/apiserver/config"
"github.com/daglabs/btcd/apiserver/database"
"github.com/daglabs/btcd/apiserver/jsonrpc"
"github.com/daglabs/btcd/apiserver/server"
"github.com/daglabs/btcd/logger"
"github.com/daglabs/btcd/signal"
"github.com/daglabs/btcd/util/panics"
_ "github.com/golang-migrate/migrate/v4/database/mysql"
_ "github.com/golang-migrate/migrate/v4/source/file"
_ "github.com/jinzhu/gorm/dialects/mysql"
)
func main() {
defer panics.HandlePanic(log, logger.BackendLog, nil)
cfg, err := config.Parse()
if err != nil {
errString := fmt.Sprintf("Error parsing command-line arguments: %s", err)
_, fErr := fmt.Fprintf(os.Stderr, errString)
if fErr != nil {
panic(errString)
}
return
}
if cfg.Migrate {
err := database.Migrate()
if err != nil {
panic(errors.Errorf("Error migrating database: %s", err))
}
return
}
err = database.Connect()
if err != nil {
panic(errors.Errorf("Error connecting to database: %s", err))
}
defer func() {
err := database.Close()
if err != nil {
panic(errors.Errorf("Error closing the database: %s", err))
}
}()
err = mqtt.Connect()
if err != nil {
panic(errors.Errorf("Error connecting to MQTT: %s", err))
}
defer mqtt.Close()
err = jsonrpc.Connect()
if err != nil {
panic(errors.Errorf("Error connecting to servers: %s", err))
}
defer jsonrpc.Close()
shutdownServer := server.Start(config.ActiveConfig().HTTPListen)
defer shutdownServer()
doneChan := make(chan struct{}, 1)
spawn(func() {
err := startSync(doneChan)
if err != nil {
panic(err)
}
})
interrupt := signal.InterruptListener()
<-interrupt
// Gracefully stop syncing
doneChan <- struct{}{}
}

View File

@@ -1 +0,0 @@
DROP TABLE `blocks`;

View File

@@ -1,23 +0,0 @@
CREATE TABLE `blocks`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`block_hash` CHAR(64) NOT NULL,
`accepting_block_id` BIGINT UNSIGNED NULL,
`version` INT NOT NULL,
`hash_merkle_root` CHAR(64) NOT NULL,
`accepted_id_merkle_root` CHAR(64) NOT NULL,
`utxo_commitment` CHAR(64) NOT NULL,
`timestamp` DATETIME NOT NULL,
`bits` INT UNSIGNED NOT NULL,
`nonce` BIGINT UNSIGNED NOT NULL,
`blue_score` BIGINT UNSIGNED NOT NULL,
`is_chain_block` TINYINT NOT NULL,
`mass` BIGINT NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `idx_blocks_block_hash` (`block_hash`),
INDEX `idx_blocks_timestamp` (`timestamp`),
INDEX `idx_blocks_is_chain_block` (`is_chain_block`),
CONSTRAINT `fk_blocks_accepting_block_id`
FOREIGN KEY (`accepting_block_id`)
REFERENCES `blocks` (`id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `parent_blocks`;

View File

@@ -1,12 +0,0 @@
CREATE TABLE `parent_blocks`
(
`block_id` BIGINT UNSIGNED NOT NULL,
`parent_block_id` BIGINT UNSIGNED NOT NULL,
PRIMARY KEY (`block_id`, `parent_block_id`),
CONSTRAINT `fk_parent_blocks_block_id`
FOREIGN KEY (`block_id`)
REFERENCES `blocks` (`id`),
CONSTRAINT `fk_parent_blocks_parent_block_id`
FOREIGN KEY (`parent_block_id`)
REFERENCES `blocks` (`id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `raw_blocks`;

View File

@@ -1,9 +0,0 @@
CREATE TABLE `raw_blocks`
(
`block_id` BIGINT UNSIGNED NOT NULL,
`block_data` MEDIUMBLOB NOT NULL,
PRIMARY KEY (`block_id`),
CONSTRAINT `fk_raw_blocks_block_id`
FOREIGN KEY (`block_id`)
REFERENCES `blocks` (`id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `subnetworks`;

View File

@@ -1,8 +0,0 @@
CREATE TABLE `subnetworks`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`subnetwork_id` CHAR(64) NOT NULL,
`gas_limit` BIGINT UNSIGNED NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `idx_subnetworks_subnetwork_id` (`subnetwork_id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `transactions`;

View File

@@ -1,19 +0,0 @@
CREATE TABLE `transactions`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`accepting_block_id` BIGINT UNSIGNED NULL,
`transaction_hash` CHAR(64) NOT NULL,
`transaction_id` CHAR(64) NOT NULL,
`lock_time` BIGINT UNSIGNED NOT NULL,
`subnetwork_id` BIGINT UNSIGNED NOT NULL,
`gas` BIGINT UNSIGNED NOT NULL,
`payload_hash` CHAR(64) NOT NULL,
`payload` BLOB NOT NULL,
`mass` BIGINT NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `idx_transactions_transaction_hash` (`transaction_hash`),
INDEX `idx_transactions_transaction_id` (`transaction_id`),
CONSTRAINT `fk_transactions_accepting_block_id`
FOREIGN KEY (`accepting_block_id`)
REFERENCES `blocks` (`id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `transactions_to_blocks`;

View File

@@ -1,14 +0,0 @@
CREATE TABLE `transactions_to_blocks`
(
`transaction_id` BIGINT UNSIGNED NOT NULL,
`block_id` BIGINT UNSIGNED NOT NULL,
`index` INT UNSIGNED NOT NULL,
PRIMARY KEY (`transaction_id`, `block_id`),
INDEX `idx_transactions_to_blocks_index` (`index`),
CONSTRAINT `fk_transactions_to_blocks_block_id`
FOREIGN KEY (`block_id`)
REFERENCES `blocks` (`id`),
CONSTRAINT `fk_transactions_to_blocks_transaction_id`
FOREIGN KEY (`transaction_id`)
REFERENCES `transactions` (`id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `addresses`;

View File

@@ -1,7 +0,0 @@
CREATE TABLE `addresses`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`address` CHAR(50) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `idx_addresses_address` (`address`)
)

View File

@@ -1 +0,0 @@
DROP TABLE `transaction_outputs`;

View File

@@ -1,18 +0,0 @@
CREATE TABLE `transaction_outputs`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`transaction_id` BIGINT UNSIGNED NOT NULL,
`index` INT UNSIGNED NOT NULL,
`value` BIGINT UNSIGNED NOT NULL,
`script_pub_key` BLOB NOT NULL,
`is_spent` TINYINT NOT NULL,
`address_id` BIGINT UNSIGNED NOT NULL,
PRIMARY KEY (`id`),
INDEX `idx_transaction_outputs_transaction_id` (`transaction_id`),
CONSTRAINT `fk_transaction_outputs_transaction_id`
FOREIGN KEY (`transaction_id`)
REFERENCES `transactions` (`id`),
CONSTRAINT `fk_transaction_outputs_address_id`
FOREIGN KEY (`address_id`)
REFERENCES `addresses` (`id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `transaction_inputs`;

View File

@@ -1,18 +0,0 @@
CREATE TABLE `transaction_inputs`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`transaction_id` BIGINT UNSIGNED NULL,
`previous_transaction_output_id` BIGINT UNSIGNED NOT NULL,
`index` INT UNSIGNED NOT NULL,
`signature_script` BLOB NOT NULL,
`sequence` BIGINT UNSIGNED NOT NULL,
PRIMARY KEY (`id`),
INDEX `idx_transaction_inputs_transaction_id` (`transaction_id`),
INDEX `idx_transaction_inputs_previous_transaction_output_id` (`previous_transaction_output_id`),
CONSTRAINT `fk_transaction_inputs_transaction_id`
FOREIGN KEY (`transaction_id`)
REFERENCES `transactions` (`id`),
CONSTRAINT `fk_transaction_inputs_previous_transaction_output_id`
FOREIGN KEY (`previous_transaction_output_id`)
REFERENCES `transaction_outputs` (`id`)
);

View File

@@ -1,9 +0,0 @@
package mqtt
import "github.com/daglabs/btcd/util/panics"
import "github.com/daglabs/btcd/apiserver/logger"
var (
log = logger.BackendLog.Logger("MQTT")
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
)

View File

@@ -1,74 +0,0 @@
package mqtt
import (
"encoding/json"
"github.com/daglabs/btcd/apiserver/config"
mqtt "github.com/eclipse/paho.mqtt.golang"
"github.com/pkg/errors"
)
// client is an instance of the MQTT client, in case we have an active connection
var client mqtt.Client
const (
qualityOfService = 2
quiesceMilliseconds = 250
)
// GetClient returns an instance of the MQTT client, in case we have an active connection
func GetClient() (mqtt.Client, error) {
if client == nil {
return nil, errors.New("MQTT is not connected")
}
return client, nil
}
func isConnected() bool {
return client != nil
}
// Connect initiates a connection to the MQTT server, if defined
func Connect() error {
cfg := config.ActiveConfig()
if cfg.MQTTBrokerAddress == "" {
// MQTT broker not defined -- nothing to do
return nil
}
options := mqtt.NewClientOptions()
options.AddBroker(cfg.MQTTBrokerAddress)
options.SetUsername(cfg.MQTTUser)
options.SetPassword(cfg.MQTTPassword)
options.SetAutoReconnect(true)
newClient := mqtt.NewClient(options)
if token := newClient.Connect(); token.Wait() && token.Error() != nil {
return token.Error()
}
client = newClient
return nil
}
// Close closes the connection to the MQTT server, if previously connected
func Close() {
if client == nil {
return
}
client.Disconnect(quiesceMilliseconds)
client = nil
}
func publish(topic string, data interface{}) error {
payload, err := json.Marshal(data)
if err != nil {
return err
}
token := client.Publish(topic, qualityOfService, false, payload)
token.Wait()
if token.Error() != nil {
return errors.WithStack(token.Error())
}
return nil
}

View File

@@ -1,19 +0,0 @@
package mqtt
import (
"github.com/daglabs/btcd/apiserver/controllers"
)
const selectedTipTopic = "dag/selected-tip"
// PublishSelectedTipNotification publishes notification for a new selected tip
func PublishSelectedTipNotification(selectedTipHash string) error {
if !isConnected() {
return nil
}
block, err := controllers.GetBlockByHashHandler(selectedTipHash)
if err != nil {
return err
}
return publish(selectedTipTopic, block)
}

View File

@@ -1,117 +0,0 @@
package mqtt
import (
"github.com/daglabs/btcd/apiserver/apimodels"
"github.com/daglabs/btcd/apiserver/controllers"
"github.com/daglabs/btcd/btcjson"
"github.com/daglabs/btcd/rpcclient"
"github.com/daglabs/btcd/util/daghash"
"path"
)
// PublishTransactionsNotifications publishes notification for each transaction of the given block
func PublishTransactionsNotifications(rawTransactions []btcjson.TxRawResult) error {
if !isConnected() {
return nil
}
transactionIDs := make([]string, len(rawTransactions))
for i, tx := range rawTransactions {
transactionIDs[i] = tx.TxID
}
transactions, err := controllers.GetTransactionsByIDsHandler(transactionIDs)
if err != nil {
return err
}
for _, transaction := range transactions {
err = publishTransactionNotifications(transaction, "transactions")
if err != nil {
return err
}
}
return nil
}
func publishTransactionNotifications(transaction *apimodels.TransactionResponse, topic string) error {
addresses := uniqueAddressesForTransaction(transaction)
for _, address := range addresses {
err := publishTransactionNotificationForAddress(transaction, address, topic)
if err != nil {
return err
}
}
return nil
}
func uniqueAddressesForTransaction(transaction *apimodels.TransactionResponse) []string {
addressesMap := make(map[string]struct{})
addresses := []string{}
for _, output := range transaction.Outputs {
if _, exists := addressesMap[output.Address]; !exists {
addresses = append(addresses, output.Address)
addressesMap[output.Address] = struct{}{}
}
}
for _, input := range transaction.Inputs {
if _, exists := addressesMap[input.Address]; !exists {
addresses = append(addresses, input.Address)
addressesMap[input.Address] = struct{}{}
}
}
return addresses
}
func publishTransactionNotificationForAddress(transaction *apimodels.TransactionResponse, address string, topic string) error {
return publish(path.Join(topic, address), transaction)
}
// PublishAcceptedTransactionsNotifications publishes notification for each accepted transaction of the given chain-block
func PublishAcceptedTransactionsNotifications(addedChainBlocks []*rpcclient.ChainBlock) error {
for _, addedChainBlock := range addedChainBlocks {
for _, acceptedBlock := range addedChainBlock.AcceptedBlocks {
transactionIDs := make([]string, len(acceptedBlock.AcceptedTxIDs))
for i, acceptedTxID := range acceptedBlock.AcceptedTxIDs {
transactionIDs[i] = acceptedTxID.String()
}
transactions, err := controllers.GetTransactionsByIDsHandler(transactionIDs)
if err != nil {
return err
}
for _, transaction := range transactions {
err = publishTransactionNotifications(transaction, "transactions/accepted")
if err != nil {
return err
}
}
return nil
}
}
return nil
}
// PublishUnacceptedTransactionsNotifications publishes notification for each unaccepted transaction of the given chain-block
func PublishUnacceptedTransactionsNotifications(removedChainHashes []*daghash.Hash) error {
for _, removedHash := range removedChainHashes {
transactionIDs, err := controllers.GetAcceptedTransactionIDsByBlockHashHandler(removedHash)
if err != nil {
return err
}
transactions, err := controllers.GetTransactionsByIDsHandler(transactionIDs)
if err != nil {
return err
}
for _, transaction := range transactions {
err = publishTransactionNotifications(transaction, "transactions/unaccepted")
if err != nil {
return err
}
}
}
return nil
}

View File

@@ -1,9 +0,0 @@
package server
import "github.com/daglabs/btcd/util/panics"
import "github.com/daglabs/btcd/apiserver/logger"
var (
log = logger.BackendLog.Logger("REST")
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
)

View File

@@ -1,172 +0,0 @@
package server
import (
"fmt"
"github.com/daglabs/btcd/httpserverutils"
"github.com/pkg/errors"
"net/http"
"strconv"
"github.com/daglabs/btcd/apiserver/controllers"
"github.com/gorilla/mux"
)
const (
routeParamTxID = "txID"
routeParamTxHash = "txHash"
routeParamAddress = "address"
routeParamBlockHash = "blockHash"
)
const (
queryParamSkip = "skip"
queryParamLimit = "limit"
queryParamOrder = "order"
)
const (
defaultGetTransactionsLimit = 100
defaultGetBlocksLimit = 25
defaultGetBlocksOrder = controllers.OrderDescending
)
func mainHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, _ map[string]string, _ []byte) (interface{}, error) {
return struct {
Message string `json:"message"`
}{
Message: "API server is running",
}, nil
}
func addRoutes(router *mux.Router) {
router.HandleFunc("/", httpserverutils.MakeHandler(mainHandler))
router.HandleFunc(
fmt.Sprintf("/transaction/id/{%s}", routeParamTxID),
httpserverutils.MakeHandler(getTransactionByIDHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/transaction/hash/{%s}", routeParamTxHash),
httpserverutils.MakeHandler(getTransactionByHashHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/transactions/address/{%s}", routeParamAddress),
httpserverutils.MakeHandler(getTransactionsByAddressHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/utxos/address/{%s}", routeParamAddress),
httpserverutils.MakeHandler(getUTXOsByAddressHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/block/{%s}", routeParamBlockHash),
httpserverutils.MakeHandler(getBlockByHashHandler)).
Methods("GET")
router.HandleFunc(
"/blocks",
httpserverutils.MakeHandler(getBlocksHandler)).
Methods("GET")
router.HandleFunc(
"/fee-estimates",
httpserverutils.MakeHandler(getFeeEstimatesHandler)).
Methods("GET")
router.HandleFunc(
"/transaction",
httpserverutils.MakeHandler(postTransactionHandler)).
Methods("POST")
}
func convertQueryParamToInt(queryParams map[string]string, param string, defaultValue int) (int, error) {
if _, ok := queryParams[param]; ok {
intValue, err := strconv.Atoi(queryParams[param])
if err != nil {
return 0, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Wrap(err, fmt.Sprintf("Couldn't parse the '%s' query parameter", param)))
}
return intValue, nil
}
return defaultValue, nil
}
func getTransactionByIDHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetTransactionByIDHandler(routeParams[routeParamTxID])
}
func getTransactionByHashHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetTransactionByHashHandler(routeParams[routeParamTxHash])
}
func getTransactionsByAddressHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, queryParams map[string]string,
_ []byte) (interface{}, error) {
skip, err := convertQueryParamToInt(queryParams, queryParamSkip, 0)
if err != nil {
return nil, err
}
limit, err := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetTransactionsLimit)
if err != nil {
return nil, err
}
if _, ok := queryParams[queryParamLimit]; ok {
var err error
limit, err = strconv.Atoi(queryParams[queryParamLimit])
if err != nil {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Wrap(err, fmt.Sprintf("Couldn't parse the '%s' query parameter", queryParamLimit)))
}
}
return controllers.GetTransactionsByAddressHandler(routeParams[routeParamAddress], uint64(skip), uint64(limit))
}
func getUTXOsByAddressHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetUTXOsByAddressHandler(routeParams[routeParamAddress])
}
func getBlockByHashHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetBlockByHashHandler(routeParams[routeParamBlockHash])
}
func getFeeEstimatesHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetFeeEstimatesHandler()
}
func getBlocksHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, queryParams map[string]string,
_ []byte) (interface{}, error) {
skip, err := convertQueryParamToInt(queryParams, queryParamSkip, 0)
if err != nil {
return nil, err
}
limit, err := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetBlocksLimit)
if err != nil {
return nil, err
}
order := defaultGetBlocksOrder
if orderParamValue, ok := queryParams[queryParamOrder]; ok {
if orderParamValue != controllers.OrderAscending && orderParamValue != controllers.OrderDescending {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Errorf("'%s' is not a valid value for the '%s' query parameter", orderParamValue, queryParamLimit))
}
order = orderParamValue
}
return controllers.GetBlocksHandler(order, uint64(skip), uint64(limit))
}
func postTransactionHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, _ map[string]string,
requestBody []byte) (interface{}, error) {
return nil, controllers.PostTransaction(requestBody)
}

View File

@@ -1,40 +0,0 @@
package server
import (
"context"
"github.com/daglabs/btcd/httpserverutils"
"net/http"
"time"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
)
const gracefulShutdownTimeout = 30 * time.Second
// Start starts the HTTP REST server and returns a
// function to gracefully shutdown it.
func Start(listenAddr string) func() {
router := mux.NewRouter()
router.Use(httpserverutils.AddRequestMetadataMiddleware)
router.Use(httpserverutils.RecoveryMiddleware)
router.Use(httpserverutils.LoggingMiddleware)
router.Use(httpserverutils.SetJSONMiddleware)
addRoutes(router)
httpServer := &http.Server{
Addr: listenAddr,
Handler: handlers.CORS()(router),
}
spawn(func() {
log.Errorf("%s", httpServer.ListenAndServe())
})
return func() {
ctx, cancel := context.WithTimeout(context.Background(), gracefulShutdownTimeout)
defer cancel()
err := httpServer.Shutdown(ctx)
if err != nil {
log.Errorf("Error shutting down HTTP server: %s", err)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,33 +1,18 @@
blockchain
==========
[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/daglabs/btcd/blockchain)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/blockchain)
Package blockchain implements bitcoin block handling and chain selection rules.
The test coverage is currently only around 60%, but will be increasing over
time. See `test_coverage.txt` for the gocov coverage report. Alternatively, if
you are running a POSIX OS, you can run the `cov_report.sh` script for a
real-time report. Package blockchain is licensed under the liberal ISC license.
Package blockdag implements Kaspa block handling, organization of the blockDAG,
block sorting and UTXO-set maintenance.
The test coverage is currently only around 75%, but will be increasing over
time.
There is an associated blog post about the release of this package
[here](https://blog.conformal.com/btcchain-the-bitcoin-chain-package-from-bctd/).
## Kaspad BlockDAG Processing Overview
This package has intentionally been designed so it can be used as a standalone
package for any projects needing to handle processing of blocks into the bitcoin
block chain.
## Installation and Updating
```bash
$ go get -u github.com/daglabs/btcd/blockchain
```
## Bitcoin Chain Processing Overview
Before a block is allowed into the block chain, it must go through an intensive
series of validation rules. The following list serves as a general outline of
Before a block is allowed into the block DAG, it must go through an intensive
series of validation rules. The following list serves as a general outline of
those rules to provide some intuition into what is going on under the hood, but
is by no means exhaustive:
@@ -35,69 +20,22 @@ is by no means exhaustive:
- Perform a series of sanity checks on the block and its transactions such as
verifying proof of work, timestamps, number and character of transactions,
transaction amounts, script complexity, and merkle root calculations
- Compare the block against predetermined checkpoints for expected timestamps
and difficulty based on elapsed time since the checkpoint
- Save the most recent orphan blocks for a limited time in case their parent
blocks become available
- Stop processing if the block is an orphan as the rest of the processing
depends on the block's position within the block chain
blocks become available.
- Save blocks from the future for delayed processing
- Stop processing if the block is an orphan or delayed as the rest of the
processing depends on the block's position within the block chain
- Make sure the block does not violate finality rules
- Perform a series of more thorough checks that depend on the block's position
within the block chain such as verifying block difficulties adhere to
within the blockDAG such as verifying block difficulties adhere to
difficulty retarget rules, timestamps are after the median of the last
several blocks, all transactions are finalized, checkpoint blocks match, and
block versions are in line with the previous blocks
- Determine how the block fits into the chain and perform different actions
accordingly in order to ensure any side chains which have higher difficulty
than the main chain become the new main chain
- When a block is being connected to the main chain (either through
reorganization of a side chain to the main chain or just extending the
main chain), perform further checks on the block's transactions such as
verifying transaction duplicates, script complexity for the combination of
connected scripts, coinbase maturity, double spends, and connected
transaction values
- Determine how the block fits into the DAG and perform different actions
accordingly
- Run the transaction scripts to verify the spender is allowed to spend the
coins
- Run GhostDAG to fit the block in a canonical sorting
- Build the block's UTXO Set, as well as update the global UTXO Set accordingly
- Insert the block into the block database
## Examples
* [ProcessBlock Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-BlockChain-ProcessBlock)
Demonstrates how to create a new chain instance and use ProcessBlock to
attempt to add a block to the chain. This example intentionally
attempts to insert a duplicate genesis block to illustrate how an invalid
block is handled.
* [CompactToBig Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-CompactToBig)
Demonstrates how to convert the compact "bits" in a block header which
represent the target difficulty to a big integer and display it using the
typical hex notation.
* [BigToCompact Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-BigToCompact)
Demonstrates how to convert a target difficulty into the
compact "bits" in a block header which represent that target difficulty.
## GPG Verification Key
All official release tags are signed by Conformal so users can ensure the code
has not been tampered with and is coming from the btcsuite developers. To
verify the signature perform the following:
- Download the public key from the Conformal website at
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
- Import the public key into your GPG keyring:
```bash
gpg --import GIT-GPG-KEY-conformal.txt
```
- Verify the release tag with the following command where `TAG_NAME` is a
placeholder for the specific tag:
```bash
git tag -v TAG_NAME
```
## License
Package blockchain is licensed under the [copyfree](http://copyfree.org) ISC
License.

View File

@@ -6,13 +6,14 @@ package blockdag
import (
"fmt"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
)
func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error {
blockHeader := &block.MsgBlock().Header
newNode := newBlockNode(blockHeader, newSet(), dag.dagParams.K)
newNode, _ := dag.newBlockNode(blockHeader, newBlockSet())
newNode.status = statusInvalidAncestor
dag.index.AddNode(newNode)
return dag.index.flushToDB()
@@ -23,14 +24,15 @@ func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error
// the block DAG before adding it. The block is expected to have already
// gone through ProcessBlock before calling this function with it.
//
// The flags are also passed to checkBlockContext and connectToDAG. See
// The flags are also passed to checkBlockContext and connectToDAG. See
// their documentation for how the flags modify their behavior.
//
// This function MUST be called with the dagLock held (for writes).
func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error {
parents, err := lookupParentNodes(block, dag)
if err != nil {
if rErr, ok := err.(RuleError); ok && rErr.ErrorCode == ErrInvalidAncestorBlock {
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); ok && ruleErr.ErrorCode == ErrInvalidAncestorBlock {
err := dag.addNodeToIndexWithInvalidAncestor(block)
if err != nil {
return err
@@ -47,17 +49,17 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
}
// Create a new block node for the block and add it to the node index.
newNode := newBlockNode(&block.MsgBlock().Header, parents, dag.dagParams.K)
newNode, selectedParentAnticone := dag.newBlockNode(&block.MsgBlock().Header, parents)
newNode.status = statusDataStored
dag.index.AddNode(newNode)
// Insert the block into the database if it's not already there. Even
// Insert the block into the database if it's not already there. Even
// though it is possible the block will ultimately fail to connect, it
// has already passed all proof-of-work and validity tests which means
// it would be prohibitively expensive for an attacker to fill up the
// disk with a bunch of blocks that fail to connect. This is necessary
// disk with a bunch of blocks that fail to connect. This is necessary
// since it allows block download to be decoupled from the much more
// expensive connection logic. It also has some other nice properties
// expensive connection logic. It also has some other nice properties
// such as making blocks that never become part of the DAG or
// blocks that fail to connect available for further analysis.
err = dag.db.Update(func(dbTx database.Tx) error {
@@ -80,17 +82,17 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
}
}
block.SetChainHeight(newNode.chainHeight)
block.SetBlueScore(newNode.blueScore)
// Connect the passed block to the DAG. This also handles validation of the
// transaction scripts.
chainUpdates, err := dag.addBlock(newNode, parents, block, flags)
chainUpdates, err := dag.addBlock(newNode, block, selectedParentAnticone, flags)
if err != nil {
return err
}
// Notify the caller that the new block was accepted into the block
// DAG. The caller would typically want to react by relaying the
// DAG. The caller would typically want to react by relaying the
// inventory to other peers.
dag.dagLock.Unlock()
dag.sendNotification(NTBlockAdded, &BlockAddedNotificationData{
@@ -112,14 +114,14 @@ func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error)
header := block.MsgBlock().Header
parentHashes := header.ParentHashes
nodes := newSet()
nodes := newBlockSet()
for _, parentHash := range parentHashes {
node := blockDAG.index.LookupNode(parentHash)
if node == nil {
str := fmt.Sprintf("parent block %s is unknown", parentHashes)
str := fmt.Sprintf("parent block %s is unknown", parentHash)
return nil, ruleError(ErrParentBlockUnknown, str)
} else if blockDAG.index.NodeStatus(node).KnownInvalid() {
str := fmt.Sprintf("parent block %s is known to be invalid", parentHashes)
str := fmt.Sprintf("parent block %s is known to be invalid", parentHash)
return nil, ruleError(ErrInvalidAncestorBlock, str)
}

View File

@@ -1,21 +1,17 @@
package blockdag
import (
"github.com/pkg/errors"
"errors"
"path/filepath"
"strings"
"testing"
"bou.ke/monkey"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/kaspanet/kaspad/dagconfig"
)
func TestMaybeAcceptBlockErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{
DAGParams: &dagconfig.SimNetParams,
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("TestMaybeAcceptBlockErrors: Failed to setup DAG instance: %v", err)
@@ -38,8 +34,8 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Expected: %s, got: <nil>", ErrParentBlockUnknown)
}
ruleErr, ok := err.(RuleError)
if !ok {
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrParentBlockUnknown {
@@ -57,11 +53,11 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
// Add a valid block and mark it as invalid
block1 := blocks[1]
isOrphan, delay, err := dag.ProcessBlock(block1, BFNone)
isOrphan, isDelayed, err := dag.ProcessBlock(block1, BFNone)
if err != nil {
t.Fatalf("TestMaybeAcceptBlockErrors: Valid block unexpectedly returned an error: %s", err)
}
if delay != 0 {
if isDelayed {
t.Fatalf("TestMaybeAcceptBlockErrors: block 1 is too far in the future")
}
if isOrphan {
@@ -76,8 +72,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
"Expected: %s, got: <nil>", ErrInvalidAncestorBlock)
}
ruleErr, ok = err.(RuleError)
if !ok {
if ok := errors.As(err, &ruleErr); !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
"Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrInvalidAncestorBlock {
@@ -96,8 +91,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
"Expected: %s, got: <nil>", ErrUnexpectedDifficulty)
}
ruleErr, ok = err.(RuleError)
if !ok {
if ok := errors.As(err, &ruleErr); !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
"Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrUnexpectedDifficulty {
@@ -107,37 +101,4 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
// Set block2's bits back to valid for next tests
block2.MsgBlock().Header.Bits = originalBits
// Test rejecting the node due to database error
databaseErrorMessage := "database error"
guard := monkey.Patch(dbStoreBlock, func(dbTx database.Tx, block *util.Block) error {
return errors.New(databaseErrorMessage)
})
defer guard.Unpatch()
err = dag.maybeAcceptBlock(block2, BFNone)
if err == nil {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+
"Expected: %s, got: <nil>", databaseErrorMessage)
}
if !strings.Contains(err.Error(), databaseErrorMessage) {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+
"Unexpected error. Want: %s, got: %s", databaseErrorMessage, err)
}
guard.Unpatch()
// Test rejecting the node due to index error
indexErrorMessage := "index error"
guard = monkey.Patch((*blockIndex).flushToDB, func(_ *blockIndex) error {
return errors.New(indexErrorMessage)
})
defer guard.Unpatch()
err = dag.maybeAcceptBlock(block2, BFNone)
if err == nil {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+
"Expected %s, got: <nil>", indexErrorMessage)
}
if !strings.Contains(err.Error(), indexErrorMessage) {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+
"Unexpected error. Want: %s, got: %s", indexErrorMessage, err)
}
}

View File

@@ -2,8 +2,6 @@ package blockdag
import (
"container/heap"
"github.com/daglabs/btcd/util/daghash"
)
// baseHeap is an implementation for heap.Interface that sorts blocks by their height
@@ -28,22 +26,14 @@ func (h *baseHeap) Pop() interface{} {
type upHeap struct{ baseHeap }
func (h upHeap) Less(i, j int) bool {
if h.baseHeap[i].blueScore == h.baseHeap[j].blueScore {
return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) < 0
}
return h.baseHeap[i].blueScore < h.baseHeap[j].blueScore
return h.baseHeap[i].less(h.baseHeap[j])
}
// downHeap extends baseHeap to include Less operation that traverses from top to bottom
type downHeap struct{ baseHeap }
func (h downHeap) Less(i, j int) bool {
if h.baseHeap[i].blueScore == h.baseHeap[j].blueScore {
return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) > 0
}
return h.baseHeap[i].blueScore > h.baseHeap[j].blueScore
return !h.baseHeap[i].less(h.baseHeap[j])
}
// blockHeap represents a mutable heap of Blocks, sorted by their height
@@ -77,7 +67,7 @@ func (bh blockHeap) Push(block *blockNode) {
// pushSet pushes a blockset to the heap.
func (bh blockHeap) pushSet(bs blockSet) {
for _, block := range bs {
for block := range bs {
heap.Push(bh.impl, block)
}
}

View File

@@ -3,19 +3,28 @@ package blockdag
import (
"testing"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/util/daghash"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestBlockHeap tests pushing, popping, and determining the length of the heap.
func TestBlockHeap(t *testing.T) {
block0Header := dagconfig.MainNetParams.GenesisBlock.Header
block0 := newBlockNode(&block0Header, newSet(), dagconfig.MainNetParams.K)
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestBlockHeap", Config{
DAGParams: &dagconfig.MainnetParams,
})
if err != nil {
t.Fatalf("TestBlockHeap: Failed to setup DAG instance: %s", err)
}
defer teardownFunc()
block0Header := dagconfig.MainnetParams.GenesisBlock.Header
block0, _ := dag.newBlockNode(&block0Header, newBlockSet())
block100000Header := Block100000.Header
block100000 := newBlockNode(&block100000Header, setFromSlice(block0), dagconfig.MainNetParams.K)
block100000, _ := dag.newBlockNode(&block100000Header, blockSetFromSlice(block0))
block0smallHash := newBlockNode(&block0Header, newSet(), dagconfig.MainNetParams.K)
block0smallHash, _ := dag.newBlockNode(&block0Header, newBlockSet())
block0smallHash.hash = &daghash.Hash{}
tests := []struct {

View File

@@ -1,8 +1,8 @@
package blockdag
import (
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util/daghash"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)

View File

@@ -7,16 +7,13 @@ package blockdag
import (
"sync"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util/daghash"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
// blockIndex provides facilities for keeping track of an in-memory index of the
// block chain. Although the name block chain suggests a single chain of
// blocks, it is actually a tree-shaped structure where any node can have
// multiple children. However, there can only be one active branch which does
// indeed form a chain from the tip all the way back to the genesis block.
// block DAG.
type blockIndex struct {
// The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a
@@ -29,7 +26,7 @@ type blockIndex struct {
dirty map[*blockNode]struct{}
}
// newBlockIndex returns a new empty instance of a block index. The index will
// newBlockIndex returns a new empty instance of a block index. The index will
// be dynamically populated as block nodes are loaded from the database and
// manually added.
func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
@@ -46,19 +43,19 @@ func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
// This function is safe for concurrent access.
func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool {
bi.RLock()
defer bi.RUnlock()
_, hasBlock := bi.index[*hash]
bi.RUnlock()
return hasBlock
}
// LookupNode returns the block node identified by the provided hash. It will
// LookupNode returns the block node identified by the provided hash. It will
// return nil if there is no entry for the hash.
//
// This function is safe for concurrent access.
func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode {
bi.RLock()
defer bi.RUnlock()
node := bi.index[*hash]
bi.RUnlock()
return node
}
@@ -68,9 +65,9 @@ func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode {
// This function is safe for concurrent access.
func (bi *blockIndex) AddNode(node *blockNode) {
bi.Lock()
defer bi.Unlock()
bi.addNode(node)
bi.dirty[node] = struct{}{}
bi.Unlock()
}
// addNode adds the provided node to the block index, but does not mark it as
@@ -86,8 +83,8 @@ func (bi *blockIndex) addNode(node *blockNode) {
// This function is safe for concurrent access.
func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
bi.RLock()
defer bi.RUnlock()
status := node.status
bi.RUnlock()
return status
}
@@ -98,9 +95,9 @@ func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
// This function is safe for concurrent access.
func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
bi.Lock()
defer bi.Unlock()
node.status |= flags
bi.dirty[node] = struct{}{}
bi.Unlock()
}
// UnsetStatusFlags flips the provided status flags on the block node to off,
@@ -109,9 +106,9 @@ func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
// This function is safe for concurrent access.
func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
bi.Lock()
defer bi.Unlock()
node.status &^= flags
bi.dirty[node] = struct{}{}
bi.Unlock()
}
// flushToDB writes all dirty block nodes to the database. If all writes
@@ -137,8 +134,9 @@ func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
return err
}
}
// If write was successful, clear the dirty set.
bi.dirty = make(map[*blockNode]struct{})
return nil
}
func (bi *blockIndex) clearDirtyEntries() {
bi.dirty = make(map[*blockNode]struct{})
}

View File

@@ -1,58 +1,27 @@
package blockdag
import (
"github.com/pkg/errors"
"strings"
"testing"
"time"
"bou.ke/monkey"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
"github.com/kaspanet/kaspad/dagconfig"
)
func TestAncestorErrors(t *testing.T) {
node := newTestNode(newSet(), int32(0x10000000), 0, time.Unix(0, 0), dagconfig.MainNetParams.K)
node.chainHeight = 2
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("TestAncestorErrors: Failed to setup DAG instance: %s", err)
}
defer teardownFunc()
node := newTestNode(dag, newBlockSet(), int32(0x10000000), 0, time.Unix(0, 0))
node.blueScore = 2
ancestor := node.SelectedAncestor(3)
if ancestor != nil {
t.Errorf("TestAncestorErrors: Ancestor() unexpectedly returned a node. Expected: <nil>")
}
}
func TestFlushToDBErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestFlushToDBErrors", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Fatalf("TestFlushToDBErrors: Failed to setup DAG instance: %s", err)
}
defer teardownFunc()
// Call flushToDB without anything to flush. This should succeed
err = dag.index.flushToDB()
if err != nil {
t.Errorf("TestFlushToDBErrors: flushToDB without anything to flush: "+
"Unexpected flushToDB error: %s", err)
}
// Mark the genesis block as dirty
dag.index.SetStatusFlags(dag.genesis, statusValid)
// Test flushToDB failure due to database error
databaseErrorMessage := "database error"
guard := monkey.Patch(dbStoreBlockNode, func(_ database.Tx, _ *blockNode) error {
return errors.New(databaseErrorMessage)
})
defer guard.Unpatch()
err = dag.index.flushToDB()
if err == nil {
t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+
"Expected: %s, got: <nil>", databaseErrorMessage)
}
if !strings.Contains(err.Error(), databaseErrorMessage) {
t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+
"Unexpected flushToDB error. Expected: %s, got: %s", databaseErrorMessage, err)
}
}

View File

@@ -1,11 +1,11 @@
package blockdag
import (
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
// BlockLocator is used to help locate a specific block. The algorithm for
// BlockLocator is used to help locate a specific block. The algorithm for
// building the block locator is to add block hashes in reverse order on the
// block's selected parent chain until the desired stop block is reached.
// In order to keep the list of locator hashes to a reasonable number of entries,
@@ -21,100 +21,59 @@ import (
// [17 16 14 11 7 2 genesis]
type BlockLocator []*daghash.Hash
// BlockLocatorFromHashes returns a block locator from start and stop hash.
// BlockLocatorFromHashes returns a block locator from high and low hash.
// See BlockLocator for details on the algorithm used to create a block locator.
//
// In addition to the general algorithm referenced above, this function will
// return the block locator for the selected tip if the passed hash is not currently
// known.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) BlockLocatorFromHashes(startHash, stopHash *daghash.Hash) BlockLocator {
func (dag *BlockDAG) BlockLocatorFromHashes(highHash, lowHash *daghash.Hash) (BlockLocator, error) {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
startNode := dag.index.LookupNode(startHash)
var stopNode *blockNode
if !stopHash.IsEqual(&daghash.ZeroHash) {
stopNode = dag.index.LookupNode(stopHash)
}
return dag.blockLocator(startNode, stopNode)
highNode := dag.index.LookupNode(highHash)
lowNode := dag.index.LookupNode(lowHash)
return dag.blockLocator(highNode, lowNode)
}
// LatestBlockLocator returns a block locator for the current tips of the DAG.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) LatestBlockLocator() BlockLocator {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
return dag.blockLocator(nil, nil)
}
// blockLocator returns a block locator for the passed start and stop nodes.
// The default value for the start node is the selected tip, and the default
// values of the stop node is the genesis block.
//
// blockLocator returns a block locator for the passed high and low nodes.
// See the BlockLocator type comments for more details.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) blockLocator(startNode, stopNode *blockNode) BlockLocator {
// Use the selected tip if requested.
if startNode == nil {
startNode = dag.virtual.selectedParent
}
if stopNode == nil {
stopNode = dag.genesis
}
// We use the selected parent of the start node, so the
// block locator won't contain the start node.
startNode = startNode.selectedParent
// If the start node or the stop node are not in the
// virtual's selected parent chain, we replace them with their
// closest selected parent that is part of the virtual's
// selected parent chain.
for !dag.IsInSelectedParentChain(stopNode.hash) {
stopNode = stopNode.selectedParent
}
for !dag.IsInSelectedParentChain(startNode.hash) {
startNode = startNode.selectedParent
}
// Calculate the max number of entries that will ultimately be in the
// block locator. See the description of the algorithm for how these
// numbers are derived.
// startNode.hash + stopNode.hash.
// Then floor(log2(startNode.chainHeight-stopNode.chainHeight)) entries for the skip portion.
maxEntries := 2 + util.FastLog2Floor(startNode.chainHeight-stopNode.chainHeight)
locator := make(BlockLocator, 0, maxEntries)
func (dag *BlockDAG) blockLocator(highNode, lowNode *blockNode) (BlockLocator, error) {
// We use the selected parent of the high node, so the
// block locator won't contain the high node.
highNode = highNode.selectedParent
node := highNode
step := uint64(1)
for node := startNode; node != nil; {
locator := make(BlockLocator, 0)
for node != nil {
locator = append(locator, node.hash)
// Nothing more to add once the stop node has been added.
if node.chainHeight == stopNode.chainHeight {
// Nothing more to add once the low node has been added.
if node.blueScore <= lowNode.blueScore {
if node != lowNode {
return nil, errors.Errorf("highNode and lowNode are " +
"not in the same selected parent chain.")
}
break
}
// Calculate chainHeight of previous node to include ensuring the
// final node is stopNode.
nextChainHeight := node.chainHeight - step
if nextChainHeight < stopNode.chainHeight {
nextChainHeight = stopNode.chainHeight
// Calculate blueScore of previous node to include ensuring the
// final node is lowNode.
nextBlueScore := node.blueScore - step
if nextBlueScore < lowNode.blueScore {
nextBlueScore = lowNode.blueScore
}
// walk backwards through the nodes to the correct ancestor.
node = node.SelectedAncestor(nextChainHeight)
node = node.SelectedAncestor(nextBlueScore)
// Double the distance between included hashes.
step *= 2
}
return locator
return locator, nil
}
// FindNextLocatorBoundaries returns the lowest unknown block locator, hash
@@ -123,21 +82,21 @@ func (dag *BlockDAG) blockLocator(startNode, stopNode *blockNode) BlockLocator {
// sync peer.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (startHash, stopHash *daghash.Hash) {
// Find the most recent locator block hash in the DAG. In the case none of
func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (highHash, lowHash *daghash.Hash) {
// Find the most recent locator block hash in the DAG. In the case none of
// the hashes in the locator are in the DAG, fall back to the genesis block.
stopNode := dag.genesis
lowNode := dag.genesis
nextBlockLocatorIndex := int64(len(locator) - 1)
for i, hash := range locator {
node := dag.index.LookupNode(hash)
if node != nil {
stopNode = node
lowNode = node
nextBlockLocatorIndex = int64(i) - 1
break
}
}
if nextBlockLocatorIndex < 0 {
return nil, stopNode.hash
return nil, lowNode.hash
}
return locator[nextBlockLocatorIndex], stopNode.hash
return locator[nextBlockLocatorIndex], lowNode.hash
}

View File

@@ -6,10 +6,13 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/pkg/errors"
"math"
"time"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
// blockStatus is a bit field representing the validation state of the block.
@@ -28,11 +31,6 @@ const (
// statusInvalidAncestor indicates that one of the block's ancestors has
// has failed validation, thus the block is also invalid.
statusInvalidAncestor
// statusNone indicates that the block has no validation state flags set.
//
// NOTE: This must be defined last in order to avoid influencing iota.
statusNone blockStatus = 0
)
// KnownValid returns whether the block is known to be valid. This will return
@@ -54,8 +52,8 @@ func (status blockStatus) KnownInvalid() bool {
type blockNode struct {
// NOTE: Additions, deletions, or modifications to the order of the
// definitions in this struct should not be changed without considering
// how it affects alignment on 64-bit platforms. The current order is
// specifically crafted to result in minimal padding. There will be
// how it affects alignment on 64-bit platforms. The current order is
// specifically crafted to result in minimal padding. There will be
// hundreds of thousands of these in memory, so a few extra bytes of
// padding adds up.
@@ -75,19 +73,16 @@ type blockNode struct {
// blueScore is the count of all the blue blocks in this block's past
blueScore uint64
// bluesAnticoneSizes is a map holding the set of blues affected by this block and their
// modified blue anticone size.
bluesAnticoneSizes map[*blockNode]dagconfig.KType
// hash is the double sha 256 of the block.
hash *daghash.Hash
// height is the position in the block DAG.
height uint64
// chainHeight is the number of hops you need to go down the selected parent chain in order to get to the genesis block.
chainHeight uint64
// Some fields from block headers to aid in best chain selection and
// reconstructing headers from memory. These must be treated as
// immutable and are intentionally ordered to avoid padding on 64-bit
// platforms.
// Some fields from block headers to aid in reconstructing headers
// from memory. These must be treated as immutable and are intentionally
// ordered to avoid padding on 64-bit platforms.
version int32
bits uint32
nonce uint64
@@ -106,14 +101,17 @@ type blockNode struct {
isFinalized bool
}
// initBlockNode initializes a block node from the given header and parent nodes.
// This function is NOT safe for concurrent access. It must only be called when
// initially creating a node.
func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) {
*node = blockNode{
parents: parents,
children: make(blockSet),
timestamp: time.Now().Unix(),
// newBlockNode returns a new block node for the given block header and parents, and the
// anticone of its selected parent (parent with highest blue score).
// selectedParentAnticone is used to update reachability data we store for future reachability queries.
// This function is NOT safe for concurrent access.
func (dag *BlockDAG) newBlockNode(blockHeader *wire.BlockHeader, parents blockSet) (node *blockNode, selectedParentAnticone []*blockNode) {
node = &blockNode{
parents: parents,
children: make(blockSet),
blueScore: math.MaxUint64, // Initialized to the max value to avoid collisions with the genesis block
timestamp: dag.AdjustedTime().Unix(),
bluesAnticoneSizes: make(map[*blockNode]dagconfig.KType),
}
// blockHeader is nil only for the virtual block
@@ -130,42 +128,34 @@ func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents block
node.hash = &daghash.ZeroHash
}
if len(parents) > 0 {
node.blues, node.selectedParent, node.blueScore = phantom(node, phantomK)
node.height = calculateNodeHeight(node)
node.chainHeight = calculateChainHeight(node)
if len(parents) == 0 {
// The genesis block is defined to have a blueScore of 0
node.blueScore = 0
return node, nil
}
}
func calculateNodeHeight(node *blockNode) uint64 {
if node.isGenesis() {
return 0
selectedParentAnticone, err := dag.ghostdag(node)
if err != nil {
panic(errors.Wrap(err, "unexpected error in GHOSTDAG"))
}
return node.parents.maxHeight() + 1
}
func calculateChainHeight(node *blockNode) uint64 {
if node.isGenesis() {
return 0
}
return node.selectedParent.chainHeight + 1
}
// newBlockNode returns a new block node for the given block header and parent
//nodes. This function is NOT safe for concurrent access.
func newBlockNode(blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) *blockNode {
var node blockNode
initBlockNode(&node, blockHeader, parents, phantomK)
return &node
return node, selectedParentAnticone
}
// updateParentsChildren updates the node's parents to point to new node
func (node *blockNode) updateParentsChildren() {
for _, parent := range node.parents {
for parent := range node.parents {
parent.children.add(node)
}
}
func (node *blockNode) less(other *blockNode) bool {
if node.blueScore == other.blueScore {
return daghash.Less(node.hash, other.hash)
}
return node.blueScore < other.blueScore
}
// Header constructs a block header from the node and returns it.
//
// This function is safe for concurrent access.
@@ -183,31 +173,31 @@ func (node *blockNode) Header() *wire.BlockHeader {
}
}
// SelectedAncestor returns the ancestor block node at the provided chain-height by following
// SelectedAncestor returns the ancestor block node at the provided blue score by following
// the selected-parents chain backwards from this node. The returned block will be nil when a
// height is requested that is after the height of the passed node.
// blue score is requested that is higher than the blue score of the passed node.
//
// This function is safe for concurrent access.
func (node *blockNode) SelectedAncestor(chainHeight uint64) *blockNode {
if chainHeight < 0 || chainHeight > node.chainHeight {
func (node *blockNode) SelectedAncestor(blueScore uint64) *blockNode {
if blueScore > node.blueScore {
return nil
}
n := node
for ; n != nil && n.chainHeight != chainHeight; n = n.selectedParent {
// Intentionally left blank
for n != nil && n.blueScore > blueScore {
n = n.selectedParent
}
return n
}
// RelativeAncestor returns the ancestor block node a relative 'distance' of
// chain-blocks before this node. This is equivalent to calling Ancestor with
// the node's chain-height minus provided distance.
// blue blocks before this node. This is equivalent to calling Ancestor with
// the node's blue score minus provided distance.
//
// This function is safe for concurrent access.
func (node *blockNode) RelativeAncestor(distance uint64) *blockNode {
return node.SelectedAncestor(node.chainHeight - distance)
return node.SelectedAncestor(node.blueScore - distance)
}
// CalcPastMedianTime returns the median time of the previous few blocks

View File

@@ -1,86 +1,41 @@
package blockdag
import (
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/daghash"
"testing"
)
func TestChainHeight(t *testing.T) {
phantomK := uint32(2)
buildNode := buildNodeGenerator(phantomK, true)
// This test is to ensure the size BlueAnticoneSizesSize is serialized to the size of KType.
// We verify that by serializing and deserializing the block while making sure that we stay within the expected range.
func TestBlueAnticoneSizesSize(t *testing.T) {
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("TestBlueAnticoneSizesSize: Failed to setup DAG instance: %s", err)
}
defer teardownFunc()
node0 := buildNode(setFromSlice())
node1 := buildNode(setFromSlice(node0))
node2 := buildNode(setFromSlice(node0))
node3 := buildNode(setFromSlice(node0))
node4 := buildNode(setFromSlice(node1, node2, node3))
node5 := buildNode(setFromSlice(node1, node2, node3))
node6 := buildNode(setFromSlice(node1, node2, node3))
node7 := buildNode(setFromSlice(node0))
node8 := buildNode(setFromSlice(node7))
node9 := buildNode(setFromSlice(node8))
node10 := buildNode(setFromSlice(node9, node6))
k := dagconfig.KType(0)
k--
// Because nodes 7 & 8 were mined secretly, node10's selected
// parent will be node6, although node9 is higher. So in this
// case, node10.height and node10.chainHeight will be different
tests := []struct {
node *blockNode
expectedChainHeight uint64
}{
{
node: node0,
expectedChainHeight: 0,
},
{
node: node1,
expectedChainHeight: 1,
},
{
node: node2,
expectedChainHeight: 1,
},
{
node: node3,
expectedChainHeight: 1,
},
{
node: node4,
expectedChainHeight: 2,
},
{
node: node5,
expectedChainHeight: 2,
},
{
node: node6,
expectedChainHeight: 2,
},
{
node: node7,
expectedChainHeight: 1,
},
{
node: node8,
expectedChainHeight: 2,
},
{
node: node9,
expectedChainHeight: 3,
},
{
node: node10,
expectedChainHeight: 3,
},
if k < dagconfig.KType(0) {
t.Fatalf("KType must be unsigned")
}
for _, test := range tests {
if test.node.chainHeight != test.expectedChainHeight {
t.Errorf("block %v expected chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight)
}
if calculateChainHeight(test.node) != test.expectedChainHeight {
t.Errorf("block %v expected calculated chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight)
}
blockHeader := dagconfig.SimnetParams.GenesisBlock.Header
node, _ := dag.newBlockNode(&blockHeader, newBlockSet())
fakeBlue := &blockNode{hash: &daghash.Hash{1}}
dag.index.AddNode(fakeBlue)
// Setting maxKType to maximum value of KType.
// As we verify above that KType is unsigned we can be sure that maxKType is indeed the maximum value of KType.
maxKType := ^dagconfig.KType(0)
node.bluesAnticoneSizes[fakeBlue] = maxKType
serializedNode, _ := serializeBlockNode(node)
deserializedNode, _ := dag.deserializeBlockNode(serializedNode)
if deserializedNode.bluesAnticoneSizes[fakeBlue] != maxKType {
t.Fatalf("TestBlueAnticoneSizesSize: BlueAnticoneSize should not change when deserializing. Expected: %v but got %v",
maxKType, deserializedNode.bluesAnticoneSizes[fakeBlue])
}
}

View File

@@ -3,83 +3,72 @@ package blockdag
import (
"strings"
"github.com/daglabs/btcd/util/daghash"
"github.com/kaspanet/kaspad/util/daghash"
)
// blockSet implements a basic unsorted set of blocks
type blockSet map[daghash.Hash]*blockNode
type blockSet map[*blockNode]struct{}
// newSet creates a new, empty BlockSet
func newSet() blockSet {
return map[daghash.Hash]*blockNode{}
// newBlockSet creates a new, empty BlockSet
func newBlockSet() blockSet {
return map[*blockNode]struct{}{}
}
// setFromSlice converts a slice of blocks into an unordered set represented as map
func setFromSlice(blocks ...*blockNode) blockSet {
set := newSet()
for _, block := range blocks {
set.add(block)
// blockSetFromSlice converts a slice of blockNodes into an unordered set represented as map
func blockSetFromSlice(nodes ...*blockNode) blockSet {
set := newBlockSet()
for _, node := range nodes {
set.add(node)
}
return set
}
// maxHeight returns the height of the highest block in the block set
func (bs blockSet) maxHeight() uint64 {
var maxHeight uint64
for _, node := range bs {
if maxHeight < node.height {
maxHeight = node.height
}
}
return maxHeight
// add adds a blockNode to this BlockSet
func (bs blockSet) add(node *blockNode) {
bs[node] = struct{}{}
}
// add adds a block to this BlockSet
func (bs blockSet) add(block *blockNode) {
bs[*block.hash] = block
}
// remove removes a block from this BlockSet, if exists
// Does nothing if this set does not contain the block
func (bs blockSet) remove(block *blockNode) {
delete(bs, *block.hash)
// remove removes a blockNode from this BlockSet, if exists
// Does nothing if this set does not contain the blockNode
func (bs blockSet) remove(node *blockNode) {
delete(bs, node)
}
// clone clones thie block set
func (bs blockSet) clone() blockSet {
clone := newSet()
for _, block := range bs {
clone.add(block)
clone := newBlockSet()
for node := range bs {
clone.add(node)
}
return clone
}
// subtract returns the difference between the BlockSet and another BlockSet
func (bs blockSet) subtract(other blockSet) blockSet {
diff := newSet()
for _, block := range bs {
if !other.contains(block) {
diff.add(block)
diff := newBlockSet()
for node := range bs {
if !other.contains(node) {
diff.add(node)
}
}
return diff
}
// addSet adds all blocks in other set to this set
// addSet adds all blockNodes in other set to this set
func (bs blockSet) addSet(other blockSet) {
for _, block := range other {
bs.add(block)
for node := range other {
bs.add(node)
}
}
// addSlice adds provided slice to this set
func (bs blockSet) addSlice(slice []*blockNode) {
for _, block := range slice {
bs.add(block)
for _, node := range slice {
bs.add(node)
}
}
// union returns a BlockSet that contains all blocks included in this set,
// union returns a BlockSet that contains all blockNodes included in this set,
// the other set, or both
func (bs blockSet) union(other blockSet) blockSet {
union := bs.clone()
@@ -89,39 +78,16 @@ func (bs blockSet) union(other blockSet) blockSet {
return union
}
// contains returns true iff this set contains block
func (bs blockSet) contains(block *blockNode) bool {
_, ok := bs[*block.hash]
// contains returns true iff this set contains node
func (bs blockSet) contains(node *blockNode) bool {
_, ok := bs[node]
return ok
}
// containsHash returns true iff this set contains a block hash
func (bs blockSet) containsHash(hash *daghash.Hash) bool {
_, ok := bs[*hash]
return ok
}
// hashesEqual returns true if the given hashes are equal to the hashes
// of the blocks in this set.
// NOTE: The given hash slice must not contain duplicates.
func (bs blockSet) hashesEqual(hashes []*daghash.Hash) bool {
if len(hashes) != len(bs) {
return false
}
for _, hash := range hashes {
if _, wasFound := bs[*hash]; !wasFound {
return false
}
}
return true
}
// hashes returns the hashes of the blocks in this set.
// hashes returns the hashes of the blockNodes in this set.
func (bs blockSet) hashes() []*daghash.Hash {
hashes := make([]*daghash.Hash, 0, len(bs))
for _, node := range bs {
for node := range bs {
hashes = append(hashes, node.hash)
}
daghash.Sort(hashes)
@@ -130,27 +96,16 @@ func (bs blockSet) hashes() []*daghash.Hash {
func (bs blockSet) String() string {
nodeStrs := make([]string, 0, len(bs))
for _, node := range bs {
for node := range bs {
nodeStrs = append(nodeStrs, node.String())
}
return strings.Join(nodeStrs, ",")
}
// anyChildInSet returns true iff any child of block is contained within this set
func (bs blockSet) anyChildInSet(block *blockNode) bool {
for _, child := range block.children {
if bs.contains(child) {
return true
}
}
return false
}
func (bs blockSet) bluest() *blockNode {
var bluestNode *blockNode
var maxScore uint64
for _, node := range bs {
for node := range bs {
if bluestNode == nil ||
node.blueScore > maxScore ||
(node.blueScore == maxScore && daghash.Less(node.hash, bluestNode.hash)) {

View File

@@ -4,11 +4,11 @@ import (
"reflect"
"testing"
"github.com/daglabs/btcd/util/daghash"
"github.com/kaspanet/kaspad/util/daghash"
)
func TestHashes(t *testing.T) {
bs := setFromSlice(
bs := blockSetFromSlice(
&blockNode{
hash: &daghash.Hash{3},
},
@@ -49,33 +49,33 @@ func TestBlockSetSubtract(t *testing.T) {
}{
{
name: "both sets empty",
setA: setFromSlice(),
setB: setFromSlice(),
expectedResult: setFromSlice(),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(),
},
{
name: "subtract an empty set",
setA: setFromSlice(node1),
setB: setFromSlice(),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(node1),
},
{
name: "subtract from empty set",
setA: setFromSlice(),
setB: setFromSlice(node1),
expectedResult: setFromSlice(),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(),
},
{
name: "subtract unrelated set",
setA: setFromSlice(node1),
setB: setFromSlice(node2),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1),
setB: blockSetFromSlice(node2),
expectedResult: blockSetFromSlice(node1),
},
{
name: "typical case",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node2, node3),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node2, node3),
expectedResult: blockSetFromSlice(node1),
},
}
@@ -101,33 +101,33 @@ func TestBlockSetAddSet(t *testing.T) {
}{
{
name: "both sets empty",
setA: setFromSlice(),
setB: setFromSlice(),
expectedResult: setFromSlice(),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(),
},
{
name: "add an empty set",
setA: setFromSlice(node1),
setB: setFromSlice(),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(node1),
},
{
name: "add to empty set",
setA: setFromSlice(),
setB: setFromSlice(node1),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(node1),
},
{
name: "add already added member",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node1),
expectedResult: setFromSlice(node1, node2),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(node1, node2),
},
{
name: "typical case",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node2, node3),
expectedResult: setFromSlice(node1, node2, node3),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node2, node3),
expectedResult: blockSetFromSlice(node1, node2, node3),
},
}
@@ -153,33 +153,33 @@ func TestBlockSetAddSlice(t *testing.T) {
}{
{
name: "add empty slice to empty set",
set: setFromSlice(),
set: blockSetFromSlice(),
slice: []*blockNode{},
expectedResult: setFromSlice(),
expectedResult: blockSetFromSlice(),
},
{
name: "add an empty slice",
set: setFromSlice(node1),
set: blockSetFromSlice(node1),
slice: []*blockNode{},
expectedResult: setFromSlice(node1),
expectedResult: blockSetFromSlice(node1),
},
{
name: "add to empty set",
set: setFromSlice(),
set: blockSetFromSlice(),
slice: []*blockNode{node1},
expectedResult: setFromSlice(node1),
expectedResult: blockSetFromSlice(node1),
},
{
name: "add already added member",
set: setFromSlice(node1, node2),
set: blockSetFromSlice(node1, node2),
slice: []*blockNode{node1},
expectedResult: setFromSlice(node1, node2),
expectedResult: blockSetFromSlice(node1, node2),
},
{
name: "typical case",
set: setFromSlice(node1, node2),
set: blockSetFromSlice(node1, node2),
slice: []*blockNode{node2, node3},
expectedResult: setFromSlice(node1, node2, node3),
expectedResult: blockSetFromSlice(node1, node2, node3),
},
}
@@ -205,33 +205,33 @@ func TestBlockSetUnion(t *testing.T) {
}{
{
name: "both sets empty",
setA: setFromSlice(),
setB: setFromSlice(),
expectedResult: setFromSlice(),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(),
},
{
name: "union against an empty set",
setA: setFromSlice(node1),
setB: setFromSlice(),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(node1),
},
{
name: "union from an empty set",
setA: setFromSlice(),
setB: setFromSlice(node1),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(node1),
},
{
name: "union with subset",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node1),
expectedResult: setFromSlice(node1, node2),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(node1, node2),
},
{
name: "typical case",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node2, node3),
expectedResult: setFromSlice(node1, node2, node3),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node2, node3),
expectedResult: blockSetFromSlice(node1, node2, node3),
},
}
@@ -243,54 +243,3 @@ func TestBlockSetUnion(t *testing.T) {
}
}
}
func TestBlockSetHashesEqual(t *testing.T) {
node1 := &blockNode{hash: &daghash.Hash{10}}
node2 := &blockNode{hash: &daghash.Hash{20}}
tests := []struct {
name string
set blockSet
hashes []*daghash.Hash
expectedResult bool
}{
{
name: "empty set, no hashes",
set: setFromSlice(),
hashes: []*daghash.Hash{},
expectedResult: true,
},
{
name: "empty set, one hash",
set: setFromSlice(),
hashes: []*daghash.Hash{node1.hash},
expectedResult: false,
},
{
name: "set and hashes of different length",
set: setFromSlice(node1, node2),
hashes: []*daghash.Hash{node1.hash},
expectedResult: false,
},
{
name: "set equal to hashes",
set: setFromSlice(node1, node2),
hashes: []*daghash.Hash{node1.hash, node2.hash},
expectedResult: true,
},
{
name: "set equal to hashes, different order",
set: setFromSlice(node1, node2),
hashes: []*daghash.Hash{node2.hash, node1.hash},
expectedResult: true,
},
}
for _, test := range tests {
result := test.set.hashesEqual(test.hashes)
if result != test.expectedResult {
t.Errorf("blockSet.hashesEqual: unexpected result in test '%s'. "+
"Expected: %t, got: %t", test.name, test.expectedResult, result)
}
}
}

View File

@@ -1,7 +1,7 @@
package blockdag
import (
"github.com/daglabs/btcd/util"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"math"
"math/big"
@@ -11,7 +11,7 @@ import (
type blockWindow []*blockNode
// blueBlockWindow returns a blockWindow of the given size that contains the
// blues in the past of startindNode, sorted by phantom order.
// blues in the past of startindNode, sorted by GHOSTDAG order.
// If the number of blues in the past of startingNode is less then windowSize,
// the window will be padded by genesis blocks to achieve a size of windowSize.
func blueBlockWindow(startingNode *blockNode, windowSize uint64) blockWindow {

View File

@@ -1,8 +1,8 @@
package blockdag
import (
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/util/daghash"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"reflect"
"testing"
@@ -10,9 +10,17 @@ import (
)
func TestBlueBlockWindow(t *testing.T) {
params := dagconfig.SimNetParams
params := dagconfig.SimnetParams
params.K = 1
dag := newTestDAG(&params)
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("Failed to setup dag instance: %v", err)
}
defer teardownFunc()
resetExtraNonceForTest()
windowSize := uint64(10)
genesisNode := dag.genesis
@@ -21,7 +29,6 @@ func TestBlueBlockWindow(t *testing.T) {
idByBlockMap := make(map[*blockNode]string)
blockByIDMap["A"] = genesisNode
idByBlockMap[genesisNode] = "A"
blockVersion := int32(0x10000000)
blocksData := []*struct {
parents []string
@@ -46,12 +53,12 @@ func TestBlueBlockWindow(t *testing.T) {
{
parents: []string{"C", "D"},
id: "E",
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"C", "D"},
id: "F",
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"A"},
@@ -66,37 +73,37 @@ func TestBlueBlockWindow(t *testing.T) {
{
parents: []string{"H", "F"},
id: "I",
expectedWindowWithGenesisPadding: []string{"F", "D", "C", "B", "A", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"F", "C", "D", "B", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"I"},
id: "J",
expectedWindowWithGenesisPadding: []string{"I", "F", "D", "C", "B", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"I", "F", "C", "D", "B", "A", "A", "A", "A", "A"},
},
{
parents: []string{"J"},
id: "K",
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "D", "C", "B", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "C", "D", "B", "A", "A", "A", "A"},
},
{
parents: []string{"K"},
id: "L",
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "D", "C", "B", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "C", "D", "B", "A", "A", "A"},
},
{
parents: []string{"L"},
id: "M",
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "D", "C", "B", "A", "A"},
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "C", "D", "B", "A", "A"},
},
{
parents: []string{"M"},
id: "N",
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "D", "C", "B", "A"},
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "C", "D", "B", "A"},
},
{
parents: []string{"N"},
id: "O",
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "B"},
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "B"},
},
}
@@ -107,14 +114,26 @@ func TestBlueBlockWindow(t *testing.T) {
parent := blockByIDMap[parentID]
parents.add(parent)
}
node := newTestNode(parents, blockVersion, 0, blockTime, dag.dagParams.K)
node.hash = &daghash.Hash{} // It helps to predict hash order
for i, char := range blockData.id {
node.hash[i] = byte(char)
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
if err != nil {
t.Fatalf("block %v got unexpected error from PrepareBlockForTest: %v", blockData.id, err)
}
dag.index.AddNode(node)
node.updateParentsChildren()
utilBlock := util.NewBlock(block)
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
if err != nil {
t.Fatalf("dag.ProcessBlock got unexpected error for block %v: %v", blockData.id, err)
}
if isDelayed {
t.Fatalf("block %s "+
"is too far in the future", blockData.id)
}
if isOrphan {
t.Fatalf("block %v was unexpectedly orphan", blockData.id)
}
node := dag.index.LookupNode(utilBlock.Hash())
blockByIDMap[blockData.id] = node
idByBlockMap[node] = blockData.id

View File

@@ -1,258 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"fmt"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/pkg/errors"
)
// CheckpointConfirmations is the number of blocks before the end of the current
// best block chain that a good checkpoint candidate must be.
const CheckpointConfirmations = 2016
// newHashFromStr converts the passed big-endian hex string into a
// daghash.Hash. It only differs from the one available in daghash in that
// it ignores the error since it will only (and must only) be called with
// hard-coded, and therefore known good, hashes.
func newHashFromStr(hexStr string) *daghash.Hash {
hash, _ := daghash.NewHashFromStr(hexStr)
return hash
}
// newTxIDFromStr converts the passed big-endian hex string into a
// daghash.TxID. It only differs from the one available in daghash in that
// it ignores the error since it will only (and must only) be called with
// hard-coded, and therefore known good, IDs.
func newTxIDFromStr(hexStr string) *daghash.TxID {
txID, _ := daghash.NewTxIDFromStr(hexStr)
return txID
}
// Checkpoints returns a slice of checkpoints (regardless of whether they are
// already known). When there are no checkpoints for the chain, it will return
// nil.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) Checkpoints() []dagconfig.Checkpoint {
return dag.checkpoints
}
// HasCheckpoints returns whether this BlockDAG has checkpoints defined.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) HasCheckpoints() bool {
return len(dag.checkpoints) > 0
}
// LatestCheckpoint returns the most recent checkpoint (regardless of whether it
// is already known). When there are no defined checkpoints for the active chain
// instance, it will return nil.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) LatestCheckpoint() *dagconfig.Checkpoint {
if !dag.HasCheckpoints() {
return nil
}
return &dag.checkpoints[len(dag.checkpoints)-1]
}
// verifyCheckpoint returns whether the passed block chain height and hash combination
// match the checkpoint data. It also returns true if there is no checkpoint
// data for the passed block chain height.
func (dag *BlockDAG) verifyCheckpoint(chainHeight uint64, hash *daghash.Hash) bool {
if !dag.HasCheckpoints() {
return true
}
// Nothing to check if there is no checkpoint data for the block chainHeight.
checkpoint, exists := dag.checkpointsByChainHeight[chainHeight]
if !exists {
return true
}
if !checkpoint.Hash.IsEqual(hash) {
return false
}
log.Infof("Verified checkpoint at chainHeight %d/block %s", checkpoint.ChainHeight,
checkpoint.Hash)
return true
}
// findPreviousCheckpoint finds the most recent checkpoint that is already
// available in the downloaded portion of the block chain and returns the
// associated block node. It returns nil if a checkpoint can't be found (this
// should really only happen for blocks before the first checkpoint).
//
// This function MUST be called with the DAG lock held (for reads).
func (dag *BlockDAG) findPreviousCheckpoint() (*blockNode, error) {
if !dag.HasCheckpoints() {
return nil, nil
}
// Perform the initial search to find and cache the latest known
// checkpoint if the best chain is not known yet or we haven't already
// previously searched.
checkpoints := dag.checkpoints
numCheckpoints := len(checkpoints)
if dag.checkpointNode == nil && dag.nextCheckpoint == nil {
// Loop backwards through the available checkpoints to find one
// that is already available.
for i := numCheckpoints - 1; i >= 0; i-- {
node := dag.index.LookupNode(checkpoints[i].Hash)
if node == nil {
continue
}
// Checkpoint found. Cache it for future lookups and
// set the next expected checkpoint accordingly.
dag.checkpointNode = node
if i < numCheckpoints-1 {
dag.nextCheckpoint = &checkpoints[i+1]
}
return dag.checkpointNode, nil
}
// No known latest checkpoint. This will only happen on blocks
// before the first known checkpoint. So, set the next expected
// checkpoint to the first checkpoint and return the fact there
// is no latest known checkpoint block.
dag.nextCheckpoint = &checkpoints[0]
return nil, nil
}
// At this point we've already searched for the latest known checkpoint,
// so when there is no next checkpoint, the current checkpoint lockin
// will always be the latest known checkpoint.
if dag.nextCheckpoint == nil {
return dag.checkpointNode, nil
}
// When there is a next checkpoint and the chain height of the current
// selected tip of the DAG does not exceed it, the current checkpoint
// lockin is still the latest known checkpoint.
if dag.selectedTip().chainHeight < dag.nextCheckpoint.ChainHeight {
return dag.checkpointNode, nil
}
// We've reached or exceeded the next checkpoint height. Note that
// once a checkpoint lockin has been reached, forks are prevented from
// any blocks before the checkpoint, so we don't have to worry about the
// checkpoint going away out from under us due to a chain reorganize.
// Cache the latest known checkpoint for future lookups. Note that if
// this lookup fails something is very wrong since the chain has already
// passed the checkpoint which was verified as accurate before inserting
// it.
checkpointNode := dag.index.LookupNode(dag.nextCheckpoint.Hash)
if checkpointNode == nil {
return nil, AssertError(fmt.Sprintf("findPreviousCheckpoint "+
"failed lookup of known good block node %s",
dag.nextCheckpoint.Hash))
}
dag.checkpointNode = checkpointNode
// Set the next expected checkpoint.
checkpointIndex := -1
for i := numCheckpoints - 1; i >= 0; i-- {
if checkpoints[i].Hash.IsEqual(dag.nextCheckpoint.Hash) {
checkpointIndex = i
break
}
}
dag.nextCheckpoint = nil
if checkpointIndex != -1 && checkpointIndex < numCheckpoints-1 {
dag.nextCheckpoint = &checkpoints[checkpointIndex+1]
}
return dag.checkpointNode, nil
}
// isNonstandardTransaction determines whether a transaction contains any
// scripts which are not one of the standard types.
func isNonstandardTransaction(tx *util.Tx) bool {
// Check all of the output public key scripts for non-standard scripts.
for _, txOut := range tx.MsgTx().TxOut {
scriptClass := txscript.GetScriptClass(txOut.ScriptPubKey)
if scriptClass == txscript.NonStandardTy {
return true
}
}
return false
}
// IsCheckpointCandidate returns whether or not the passed block is a good
// checkpoint candidate.
//
// The factors used to determine a good checkpoint are:
// - The block must be in the main chain
// - The block must be at least 'CheckpointConfirmations' blocks prior to the
// current end of the main chain
// - The timestamps for the blocks before and after the checkpoint must have
// timestamps which are also before and after the checkpoint, respectively
// (due to the median time allowance this is not always the case)
// - The block must not contain any strange transaction such as those with
// nonstandard scripts
//
// The intent is that candidates are reviewed by a developer to make the final
// decision and then manually added to the list of checkpoints for a network.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
// A checkpoint must be in the DAG.
node := dag.index.LookupNode(block.Hash())
if node == nil {
return false, nil
}
// Ensure the chain height of the passed block and the entry for the block
// in the DAG match. This should always be the case unless the
// caller provided an invalid block.
if node.chainHeight != block.ChainHeight() {
return false, errors.Errorf("passed block chain height of %d does not "+
"match the its height in the DAG: %d", block.ChainHeight(),
node.chainHeight)
}
// A checkpoint must be at least CheckpointConfirmations blocks
// before the end of the main chain.
dagChainHeight := dag.selectedTip().chainHeight
if node.chainHeight > (dagChainHeight - CheckpointConfirmations) {
return false, nil
}
// A checkpoint must be have at least one block after it.
//
// This should always succeed since the check above already made sure it
// is CheckpointConfirmations back, but be safe in case the constant
// changes.
if len(node.children) == 0 {
return false, nil
}
// A checkpoint must be have at least one block before it.
if &node.selectedParent == nil {
return false, nil
}
// A checkpoint must have transactions that only contain standard
// scripts.
for _, tx := range block.Transactions() {
if isNonstandardTransaction(tx) {
return false, nil
}
}
// All of the checks passed, so the block is a candidate.
return true, nil
}

View File

@@ -4,16 +4,16 @@ import (
"bufio"
"bytes"
"encoding/binary"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/txsort"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/txsort"
"github.com/kaspanet/kaspad/wire"
)
// compactFeeData is a specialized data type to store a compact list of fees
@@ -257,7 +257,7 @@ func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
}
}
totalReward := CalcBlockSubsidy(blueBlock.height, dag.dagParams) + totalFees
totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.dagParams) + totalFees
if totalReward == 0 {
return txIn, nil, nil

View File

@@ -16,24 +16,13 @@ import (
"testing"
"time"
"github.com/daglabs/btcd/dagconfig"
_ "github.com/daglabs/btcd/database/ffldb"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/dagconfig"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
func loadBlocksWithLog(t *testing.T, filename string) ([]*util.Block, error) {
blocks, err := LoadBlocks(filename)
if err == nil {
t.Logf("Loaded %d blocks from file %s", len(blocks), filename)
for i, b := range blocks {
t.Logf("Block #%d: %s", i, b.Hash())
}
}
return blocks, err
}
// loadUTXOSet returns a utxo view loaded from a file.
func loadUTXOSet(filename string) (UTXOSet, error) {
// The utxostore file format is:
@@ -108,19 +97,14 @@ func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
dag.dagParams.BlockCoinbaseMaturity = maturity
}
// newTestDAG returns a DAG that is usable for syntetic tests. It is
// important to note that this chain has no database associated with it, so
// newTestDAG returns a DAG that is usable for syntetic tests. It is
// important to note that this DAG has no database associated with it, so
// it is not usable with all functions and the tests must take care when making
// use of it.
func newTestDAG(params *dagconfig.Params) *BlockDAG {
// Create a genesis block node and block index index populated with it
// for use when creating the fake chain below.
node := newBlockNode(&params.GenesisBlock.Header, newSet(), params.K)
index := newBlockIndex(nil, params)
index.AddNode(node)
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
return &BlockDAG{
dag := &BlockDAG{
dagParams: params,
timeSource: NewMedianTime(),
targetTimePerBlock: targetTimePerBlock,
@@ -128,16 +112,22 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
powMaxBits: util.BigToCompact(params.PowMax),
index: index,
virtual: newVirtualBlock(setFromSlice(node), params.K),
genesis: index.LookupNode(params.GenesisHash),
warningCaches: newThresholdCaches(vbNumBits),
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
}
// Create a genesis block node and block index index populated with it
// on the above fake DAG.
dag.genesis, _ = dag.newBlockNode(&params.GenesisBlock.Header, newBlockSet())
index.AddNode(dag.genesis)
dag.virtual = newVirtualBlock(dag, blockSetFromSlice(dag.genesis))
return dag
}
// newTestNode creates a block node connected to the passed parent with the
// provided fields populated and fake values for the other fields.
func newTestNode(parents blockSet, blockVersion int32, bits uint32, timestamp time.Time, phantomK uint32) *blockNode {
func newTestNode(dag *BlockDAG, parents blockSet, blockVersion int32, bits uint32, timestamp time.Time) *blockNode {
// Make up a header and create a block node from it.
header := &wire.BlockHeader{
Version: blockVersion,
@@ -148,36 +138,16 @@ func newTestNode(parents blockSet, blockVersion int32, bits uint32, timestamp ti
AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash,
}
return newBlockNode(header, parents, phantomK)
node, _ := dag.newBlockNode(header, parents)
return node
}
func addNodeAsChildToParents(node *blockNode) {
for _, parent := range node.parents {
for parent := range node.parents {
parent.children.add(node)
}
}
func buildNodeGenerator(phantomK uint32, withChildren bool) func(parents blockSet) *blockNode {
// For the purposes of these tests, we'll create blockNodes whose hashes are a
// series of numbers from 1 to 255.
hashCounter := byte(1)
buildNode := func(parents blockSet) *blockNode {
block := newBlockNode(nil, parents, phantomK)
block.hash = &daghash.Hash{hashCounter}
hashCounter++
return block
}
if withChildren {
return func(parents blockSet) *blockNode {
node := buildNode(parents)
addNodeAsChildToParents(node)
return node
}
}
return buildNode
}
// checkRuleError ensures the type of the two passed errors are of the
// same type (either both nil or both of type RuleError) and their error codes
// match when not nil.
@@ -198,7 +168,7 @@ func checkRuleError(gotErr, wantErr error) error {
return errors.Errorf("unexpected test error type %T", wantErr)
}
// Ensure the error codes match. It's safe to use a raw type assert
// Ensure the error codes match. It's safe to use a raw type assert
// here since the code above already proved they are the same type and
// the want error is a script error.
gotErrorCode := gotErr.(RuleError).ErrorCode
@@ -209,3 +179,35 @@ func checkRuleError(gotErr, wantErr error) error {
return nil
}
func prepareAndProcessBlock(t *testing.T, dag *BlockDAG, parents ...*wire.MsgBlock) *wire.MsgBlock {
parentHashes := make([]*daghash.Hash, len(parents))
for i, parent := range parents {
parentHashes[i] = parent.BlockHash()
}
daghash.Sort(parentHashes)
block, err := PrepareBlockForTest(dag, parentHashes, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
utilBlock := util.NewBlock(block)
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
if err != nil {
t.Fatalf("unexpected error in ProcessBlock: %s", err)
}
if isDelayed {
t.Fatalf("block is too far in the future")
}
if isOrphan {
t.Fatalf("block was unexpectedly orphan")
}
return block
}
func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNode {
node := dag.index.LookupNode(block.BlockHash())
if node == nil {
t.Fatalf("couldn't find block node with hash %s", block.BlockHash())
}
return node
}

View File

@@ -5,22 +5,22 @@
package blockdag
import (
"github.com/daglabs/btcd/btcec"
"github.com/daglabs/btcd/txscript"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/txscript"
)
// -----------------------------------------------------------------------------
// A variable length quantity (VLQ) is an encoding that uses an arbitrary number
// of binary octets to represent an arbitrarily large integer. The scheme
// of binary octets to represent an arbitrarily large integer. The scheme
// employs a most significant byte (MSB) base-128 encoding where the high bit in
// each byte indicates whether or not the byte is the final one. In addition,
// each byte indicates whether or not the byte is the final one. In addition,
// to ensure there are no redundant encodings, an offset is subtracted every
// time a group of 7 bits is shifted out. Therefore each integer can be
// time a group of 7 bits is shifted out. Therefore each integer can be
// represented in exactly one way, and each representation stands for exactly
// one integer.
//
// Another nice property of this encoding is that it provides a compact
// representation of values that are typically used to indicate sizes. For
// representation of values that are typically used to indicate sizes. For
// example, the values 0 - 127 are represented with a single byte, 128 - 16511
// with two bytes, and 16512 - 2113663 with three bytes.
//
@@ -60,7 +60,7 @@ func serializeSizeVLQ(n uint64) int {
// putVLQ serializes the provided number to a variable-length quantity according
// to the format described above and returns the number of bytes of the encoded
// value. The result is placed directly into the passed byte slice which must
// value. The result is placed directly into the passed byte slice which must
// be at least large enough to handle the number of bytes returned by the
// serializeSizeVLQ function or it will panic.
func putVLQ(target []byte, n uint64) int {
@@ -88,7 +88,7 @@ func putVLQ(target []byte, n uint64) int {
}
// deserializeVLQ deserializes the provided variable-length quantity according
// to the format described above. It also returns the number of bytes
// to the format described above. It also returns the number of bytes
// deserialized.
func deserializeVLQ(serialized []byte) (uint64, int) {
var n uint64
@@ -108,8 +108,7 @@ func deserializeVLQ(serialized []byte) (uint64, int) {
// -----------------------------------------------------------------------------
// In order to reduce the size of stored scripts, a domain specific compression
// algorithm is used which recognizes standard scripts and stores them using
// less bytes than the original script. The compression algorithm used here was
// obtained from Bitcoin Core, so all credits for the algorithm go to it.
// less bytes than the original script.
//
// The general serialized format is:
//
@@ -147,22 +146,22 @@ const (
cstPayToScriptHash = 1
// cstPayToPubKeyComp2 identifies a compressed pay-to-pubkey script to
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyComp2 = 2
// cstPayToPubKeyComp3 identifies a compressed pay-to-pubkey script to
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyComp3 = 3
// cstPayToPubKeyUncomp4 identifies a compressed pay-to-pubkey script to
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyUncomp4 = 4
// cstPayToPubKeyUncomp5 identifies a compressed pay-to-pubkey script to
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyUncomp5 = 5
@@ -206,8 +205,8 @@ func isScriptHash(script []byte) (bool, []byte) {
// key along with the serialized pubkey it is paying to if it is.
//
// NOTE: This function ensures the public key is actually valid since the
// compression algorithm requires valid pubkeys. It does not support hybrid
// pubkeys. This means that even if the script has the correct form for a
// compression algorithm requires valid pubkeys. It does not support hybrid
// pubkeys. This means that even if the script has the correct form for a
// pay-to-pubkey script, this function will only return true when it is paying
// to a valid compressed or uncompressed pubkey.
func isPubKey(script []byte) (bool, []byte) {
@@ -218,7 +217,7 @@ func isPubKey(script []byte) (bool, []byte) {
// Ensure the public key is valid.
serializedPubKey := script[1:34]
_, err := btcec.ParsePubKey(serializedPubKey, btcec.S256())
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
if err == nil {
return true, serializedPubKey
}
@@ -230,7 +229,7 @@ func isPubKey(script []byte) (bool, []byte) {
// Ensure the public key is valid.
serializedPubKey := script[1:66]
_, err := btcec.ParsePubKey(serializedPubKey, btcec.S256())
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
if err == nil {
return true, serializedPubKey
}
@@ -293,7 +292,7 @@ func decodeCompressedScriptSize(serialized []byte) int {
// putCompressedScript compresses the passed script according to the domain
// specific compression algorithm described above directly into the passed
// target byte slice. The target byte slice must be at least large enough to
// target byte slice. The target byte slice must be at least large enough to
// handle the number of bytes returned by the compressedScriptSize function or
// it will panic.
func putCompressedScript(target, scriptPubKey []byte) int {
@@ -343,7 +342,7 @@ func putCompressedScript(target, scriptPubKey []byte) int {
//
// NOTE: The script parameter must already have been proven to be long enough
// to contain the number of bytes returned by decodeCompressedScriptSize or it
// will panic. This is acceptable since it is only an internal function.
// will panic. This is acceptable since it is only an internal function.
func decompressScript(compressedScriptPubKey []byte) []byte {
// In practice this function will not be called with a zero-length or
// nil script since the nil script encoding includes the length, however
@@ -357,7 +356,7 @@ func decompressScript(compressedScriptPubKey []byte) []byte {
// Decode the script size and examine it for the special cases.
encodedScriptSize, bytesRead := deserializeVLQ(compressedScriptPubKey)
switch encodedScriptSize {
// Pay-to-pubkey-hash script. The resulting script is:
// Pay-to-pubkey-hash script. The resulting script is:
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
case cstPayToPubKeyHash:
scriptPubKey := make([]byte, 25)
@@ -369,7 +368,7 @@ func decompressScript(compressedScriptPubKey []byte) []byte {
scriptPubKey[24] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-script-hash script. The resulting script is:
// Pay-to-script-hash script. The resulting script is:
// <OP_HASH160><20 byte script hash><OP_EQUAL>
case cstPayToScriptHash:
scriptPubKey := make([]byte, 23)
@@ -379,7 +378,7 @@ func decompressScript(compressedScriptPubKey []byte) []byte {
scriptPubKey[22] = txscript.OpEqual
return scriptPubKey
// Pay-to-compressed-pubkey script. The resulting script is:
// Pay-to-compressed-pubkey script. The resulting script is:
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
scriptPubKey := make([]byte, 35)
@@ -389,17 +388,17 @@ func decompressScript(compressedScriptPubKey []byte) []byte {
scriptPubKey[34] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-uncompressed-pubkey script. The resulting script is:
// Pay-to-uncompressed-pubkey script. The resulting script is:
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyUncomp4, cstPayToPubKeyUncomp5:
// Change the leading byte to the appropriate compressed pubkey
// identifier (0x02 or 0x03) so it can be decoded as a
// compressed pubkey. This really should never fail since the
// compressed pubkey. This really should never fail since the
// encoding ensures it is valid before compressing to this type.
compressedKey := make([]byte, 33)
compressedKey[0] = byte(encodedScriptSize - 2)
copy(compressedKey[1:], compressedScriptPubKey[1:])
key, err := btcec.ParsePubKey(compressedKey, btcec.S256())
key, err := ecc.ParsePubKey(compressedKey, ecc.S256())
if err != nil {
return nil
}
@@ -423,39 +422,38 @@ func decompressScript(compressedScriptPubKey []byte) []byte {
// -----------------------------------------------------------------------------
// In order to reduce the size of stored amounts, a domain specific compression
// algorithm is used which relies on there typically being a lot of zeroes at
// end of the amounts. The compression algorithm used here was obtained from
// Bitcoin Core, so all credits for the algorithm go to it.
// end of the amounts.
//
// While this is simply exchanging one uint64 for another, the resulting value
// for typical amounts has a much smaller magnitude which results in fewer bytes
// when encoded as variable length quantity. For example, consider the amount
// of 0.1 BTC which is 10000000 satoshi. Encoding 10000000 as a VLQ would take
// when encoded as variable length quantity. For example, consider the amount
// of 0.1 KAS which is 10000000 sompi. Encoding 10000000 as a VLQ would take
// 4 bytes while encoding the compressed value of 8 as a VLQ only takes 1 byte.
//
// Essentially the compression is achieved by splitting the value into an
// exponent in the range [0-9] and a digit in the range [1-9], when possible,
// and encoding them in a way that can be decoded. More specifically, the
// and encoding them in a way that can be decoded. More specifically, the
// encoding is as follows:
// - 0 is 0
// - Find the exponent, e, as the largest power of 10 that evenly divides the
// value up to a maximum of 9
// - When e < 9, the final digit can't be 0 so store it as d and remove it by
// dividing the value by 10 (call the result n). The encoded value is thus:
// dividing the value by 10 (call the result n). The encoded value is thus:
// 1 + 10*(9*n + d-1) + e
// - When e==9, the only thing known is the amount is not 0. The encoded value
// - When e==9, the only thing known is the amount is not 0. The encoded value
// is thus:
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
//
// Example encodings:
// (The numbers in parenthesis are the number of bytes when serialized as a VLQ)
// 0 (1) -> 0 (1) * 0.00000000 BTC
// 1000 (2) -> 4 (1) * 0.00001000 BTC
// 10000 (2) -> 5 (1) * 0.00010000 BTC
// 12345678 (4) -> 111111101(4) * 0.12345678 BTC
// 50000000 (4) -> 47 (1) * 0.50000000 BTC
// 100000000 (4) -> 9 (1) * 1.00000000 BTC
// 500000000 (5) -> 49 (1) * 5.00000000 BTC
// 1000000000 (5) -> 10 (1) * 10.00000000 BTC
// 0 (1) -> 0 (1) * 0.00000000 KAS
// 1000 (2) -> 4 (1) * 0.00001000 KAS
// 10000 (2) -> 5 (1) * 0.00010000 KAS
// 12345678 (4) -> 111111101(4) * 0.12345678 KAS
// 50000000 (4) -> 47 (1) * 0.50000000 KAS
// 100000000 (4) -> 9 (1) * 1.00000000 KAS
// 500000000 (5) -> 49 (1) * 5.00000000 KAS
// 1000000000 (5) -> 10 (1) * 10.00000000 KAS
// -----------------------------------------------------------------------------
// compressTxOutAmount compresses the passed amount according to the domain
@@ -550,7 +548,7 @@ func compressedTxOutSize(amount uint64, scriptPubKey []byte) int {
// putCompressedTxOut compresses the passed amount and script according to their
// domain specific compression algorithms and encodes them directly into the
// passed target byte slice with the format described above. The target byte
// passed target byte slice with the format described above. The target byte
// slice must be at least large enough to handle the number of bytes returned by
// the compressedTxOutSize function or it will panic.
func putCompressedTxOut(target []byte, amount uint64, scriptPubKey []byte) int {

View File

@@ -11,7 +11,7 @@ import (
)
// hexToBytes converts the passed hex string into bytes and will panic if there
// is an error. This is only provided for the hard-coded constants so errors in
// is an error. This is only provided for the hard-coded constants so errors in
// the source code can be detected. It will only (and must only) be called with
// hard-coded values.
func hexToBytes(s string) []byte {
@@ -260,47 +260,47 @@ func TestAmountCompression(t *testing.T) {
compressed uint64
}{
{
name: "0 BTC",
name: "0 KAS",
uncompressed: 0,
compressed: 0,
},
{
name: "546 Satoshi (current network dust value)",
name: "546 Sompi (current network dust value)",
uncompressed: 546,
compressed: 4911,
},
{
name: "0.00001 BTC (typical transaction fee)",
name: "0.00001 KAS (typical transaction fee)",
uncompressed: 1000,
compressed: 4,
},
{
name: "0.0001 BTC (typical transaction fee)",
name: "0.0001 KAS (typical transaction fee)",
uncompressed: 10000,
compressed: 5,
},
{
name: "0.12345678 BTC",
name: "0.12345678 KAS",
uncompressed: 12345678,
compressed: 111111101,
},
{
name: "0.5 BTC",
name: "0.5 KAS",
uncompressed: 50000000,
compressed: 48,
},
{
name: "1 BTC",
name: "1 KAS",
uncompressed: 100000000,
compressed: 9,
},
{
name: "5 BTC",
name: "5 KAS",
uncompressed: 500000000,
compressed: 49,
},
{
name: "21000000 BTC (max minted coins)",
name: "21000000 KAS (max minted coins)",
uncompressed: 2100000000000000,
compressed: 21000000,
},
@@ -345,7 +345,7 @@ func TestCompressedTxOut(t *testing.T) {
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
},
{
name: "pay-to-pubkey uncompressed 1 BTC",
name: "pay-to-pubkey uncompressed 1 KAS",
amount: 100000000,
scriptPubKey: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -9,20 +9,21 @@ import (
"encoding/binary"
"encoding/json"
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/pkg/errors"
"io"
"sync"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/binaryserializer"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
)
const (
// blockHdrSize is the size of a block header. This is simply the
// blockHdrSize is the size of a block header. This is simply the
// constant from wire and is only provided here for convenience since
// wire.MaxBlockHeaderPayload is quite long.
blockHdrSize = wire.MaxBlockHeaderPayload
@@ -33,7 +34,7 @@ const (
)
var (
// blockIndexBucketName is the name of the db bucket used to house to the
// blockIndexBucketName is the name of the database bucket used to house the
// block headers and contextual information.
blockIndexBucketName = []byte("blockheaderidx")
@@ -45,15 +46,19 @@ var (
// version of the utxo set currently in the database.
utxoSetVersionKeyName = []byte("utxosetversion")
// utxoSetBucketName is the name of the db bucket used to house the
// utxoSetBucketName is the name of the database bucket used to house the
// unspent transaction output set.
utxoSetBucketName = []byte("utxoset")
// utxoDiffsBucketName is the name of the db bucket used to house the
// utxoDiffsBucketName is the name of the database bucket used to house the
// diffs and diff children of blocks.
utxoDiffsBucketName = []byte("utxodiffs")
// subnetworksBucketName is the name of the db bucket used to store the
// reachabilityDataBucketName is the name of the database bucket used to house the
// reachability tree nodes and future covering sets of blocks.
reachabilityDataBucketName = []byte("reachability")
// subnetworksBucketName is the name of the database bucket used to store the
// subnetwork registry.
subnetworksBucketName = []byte("subnetworks")
@@ -78,8 +83,8 @@ func (e errNotInDAG) Error() string {
// isNotInDAGErr returns whether or not the passed error is an
// errNotInDAG error.
func isNotInDAGErr(err error) bool {
_, ok := err.(errNotInDAG)
return ok
var notInDAGErr errNotInDAG
return errors.As(err, &notInDAGErr)
}
// errDeserialize signifies that a problem was encountered when deserializing
@@ -94,12 +99,12 @@ func (e errDeserialize) Error() string {
// isDeserializeErr returns whether or not the passed error is an errDeserialize
// error.
func isDeserializeErr(err error) bool {
_, ok := err.(errDeserialize)
return ok
var deserializeErr errDeserialize
return errors.As(err, &deserializeErr)
}
// dbPutVersion uses an existing database transaction to update the provided
// key in the metadata bucket to the given version. It is primarily used to
// key in the metadata bucket to the given version. It is primarily used to
// track versions on entities such as buckets.
func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
var serialized [4]byte
@@ -110,10 +115,9 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
// -----------------------------------------------------------------------------
// The unspent transaction output (UTXO) set consists of an entry for each
// unspent output using a format that is optimized to reduce space using domain
// specific compression algorithms. This format is a slightly modified version
// of the format used in Bitcoin Core.
// specific compression algorithms.
//
// Each entry is keyed by an outpoint as specified below. It is important to
// Each entry is keyed by an outpoint as specified below. It is important to
// note that the key encoding uses a VLQ, which employs an MSB encoding so
// iteration of UTXOs when doing byte-wise comparisons will produce them in
// order.
@@ -140,8 +144,7 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
// bits 1-x - height of the block that contains the unspent txout
//
// Example 1:
// From tx in main blockchain:
// Blk 1, b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
// b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
//
// 03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
// <><------------------------------------------------------------------>
@@ -150,13 +153,12 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
//
// - header code: 0x03 (coinbase, height 1)
// - compressed txout:
// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC)
// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 KAS)
// - 0x04: special script type pay-to-pubkey
// - 0x96...52: x-coordinate of the pubkey
//
// Example 2:
// From tx in main blockchain:
// Blk 113931, 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2
// 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2
//
// 8cf316800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
// <----><------------------------------------------>
@@ -165,13 +167,12 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
//
// - header code: 0x8cf316 (not coinbase, height 113931)
// - compressed txout:
// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC)
// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 KAS)
// - 0x00: special script type pay-to-pubkey-hash
// - 0xb8...58: pubkey hash
//
// Example 3:
// From tx in main blockchain:
// Blk 338156, 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22
// 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22
//
// a8a2588ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
// <----><-------------------------------------------------->
@@ -180,7 +181,7 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
//
// - header code: 0xa8a258 (not coinbase, height 338156)
// - compressed txout:
// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC)
// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 KAS)
// - 0x01: special script type pay-to-script-hash
// - 0x1d...e6: script hash
// -----------------------------------------------------------------------------
@@ -199,8 +200,8 @@ var outpointKeyPool = sync.Pool{
}
// outpointKey returns a key suitable for use as a database key in the UTXO set
// while making use of a free list. A new buffer is allocated if there are not
// already any available on the free list. The returned byte slice should be
// while making use of a free list. A new buffer is allocated if there are not
// already any available on the free list. The returned byte slice should be
// returned to the free list by using the recycleOutpointKey function when the
// caller is done with it _unless_ the slice will need to live for longer than
// the caller can calculate such as when used to write to the database.
@@ -223,7 +224,7 @@ func recycleOutpointKey(key *[]byte) {
}
// dbPutUTXODiff uses an existing database transaction to update the UTXO set
// in the database based on the provided UTXO view contents and state. In
// in the database based on the provided UTXO view contents and state. In
// particular, only the entries that have been marked as modified are written
// to the database.
func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
@@ -244,7 +245,7 @@ func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
key := outpointKey(outpoint)
err := utxoBucket.Put(*key, serialized)
// NOTE: The key is intentionally not recycled here since the
// database interface contract prohibits modifications. It will
// database interface contract prohibits modifications. It will
// be garbage collected normally when the database is done with
// it.
if err != nil {
@@ -295,7 +296,7 @@ func dbPutDAGState(dbTx database.Tx, state *dagState) error {
}
// createDAGState initializes both the database and the DAG state to the
// genesis block. This includes creating the necessary buckets, so it
// genesis block. This includes creating the necessary buckets, so it
// must only be called on an uninitialized database.
func (dag *BlockDAG) createDAGState() error {
// Create the initial the database DAG state including creating the
@@ -321,6 +322,11 @@ func (dag *BlockDAG) createDAGState() error {
return err
}
_, err = meta.CreateBucket(reachabilityDataBucketName)
if err != nil {
return err
}
err = dbPutVersion(dbTx, utxoSetVersionKeyName,
latestUTXOSetBucketVersion)
if err != nil {
@@ -371,6 +377,11 @@ func (dag *BlockDAG) removeDAGState() error {
return err
}
err = meta.DeleteBucket(reachabilityDataBucketName)
if err != nil {
return err
}
err = dbTx.Metadata().Delete(utxoSetVersionKeyName)
if err != nil {
return err
@@ -403,7 +414,7 @@ func dbPutLocalSubnetworkID(dbTx database.Tx, subnetworkID *subnetworkid.Subnetw
}
// initDAGState attempts to load and initialize the DAG state from the
// database. When the db does not yet contain any DAG state, both it and the
// database. When the db does not yet contain any DAG state, both it and the
// DAG state are initialized to the genesis block.
func (dag *BlockDAG) initDAGState() error {
// Determine the state of the DAG database. We may need to initialize
@@ -419,10 +430,10 @@ func (dag *BlockDAG) initDAGState() error {
localSubnetworkID.SetBytes(localSubnetworkIDBytes)
}
if !localSubnetworkID.IsEqual(dag.subnetworkID) {
return errors.Errorf("Cannot start btcd with subnetwork ID %s because"+
return errors.Errorf("Cannot start kaspad with subnetwork ID %s because"+
" its database is already built with subnetwork ID %s. If you"+
" want to switch to a new database, please reset the"+
" database by starting btcd with --reset-db flag", dag.subnetworkID, localSubnetworkID)
" database by starting kaspad with --reset-db flag", dag.subnetworkID, localSubnetworkID)
}
}
return nil
@@ -433,7 +444,7 @@ func (dag *BlockDAG) initDAGState() error {
if !initialized {
// At this point the database has not already been initialized, so
// initialize both it and the chain state to the genesis block.
// initialize both it and the DAG state to the genesis block.
return dag.createDAGState()
}
@@ -451,7 +462,7 @@ func (dag *BlockDAG) initDAGState() error {
}
// Load all of the headers from the data for the known DAG
// and construct the block index accordingly. Since the
// and construct the block index accordingly. Since the
// number of nodes are already known, perform a single alloc
// for them versus a whole bunch of little ones to reduce
// pressure on the GC.
@@ -459,8 +470,6 @@ func (dag *BlockDAG) initDAGState() error {
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
var i int32
var lastNode *blockNode
var unprocessedBlockNodes []*blockNode
cursor := blockIndexBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
@@ -476,7 +485,14 @@ func (dag *BlockDAG) initDAGState() error {
continue
}
if lastNode == nil {
// If the node is known to be invalid add it as-is to the block
// index and continue.
if node.status.KnownInvalid() {
dag.index.addNode(node)
continue
}
if dag.blockCount == 0 {
if !node.hash.IsEqual(dag.dagParams.GenesisHash) {
return AssertError(fmt.Sprintf("initDAGState: Expected "+
"first entry in block index to be genesis block, "+
@@ -494,16 +510,11 @@ func (dag *BlockDAG) initDAGState() error {
node.updateParentsChildren()
dag.index.addNode(node)
if node.status.KnownValid() {
dag.blockCount++
}
lastNode = node
i++
dag.blockCount++
}
// Load all of the known UTXO entries and construct the full
// UTXO set accordingly. Since the number of entries is already
// UTXO set accordingly. Since the number of entries is already
// known, perform a single alloc for them versus a whole bunch
// of little ones to reduce pressure on the GC.
log.Infof("Loading UTXO set...")
@@ -553,6 +564,12 @@ func (dag *BlockDAG) initDAGState() error {
fullUTXOCollection[*outpoint] = entry
}
// Initialize the reachability store
err = dag.reachabilityStore.init(dbTx)
if err != nil {
return err
}
// Apply the loaded utxoCollection to the virtual block.
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
if err != nil {
@@ -560,7 +577,7 @@ func (dag *BlockDAG) initDAGState() error {
}
// Apply the stored tips to the virtual block.
tips := newSet()
tips := newBlockSet()
for _, tipHash := range state.TipHashes {
tip := dag.index.LookupNode(tipHash)
if tip == nil {
@@ -591,7 +608,10 @@ func (dag *BlockDAG) initDAGState() error {
// Attempt to accept the block.
block, err := dbFetchBlockByNode(dbTx, node)
isOrphan, delay, err := dag.ProcessBlock(block, BFWasStored)
if err != nil {
return err
}
isOrphan, isDelayed, err := dag.ProcessBlock(block, BFWasStored)
if err != nil {
log.Warnf("Block %s, which was not previously processed, "+
"failed to be accepted to the DAG: %s", node.hash, err)
@@ -605,7 +625,7 @@ func (dag *BlockDAG) initDAGState() error {
"previously processed, turned out to be an orphan, which is "+
"impossible.", node.hash))
}
if delay != 0 {
if isDelayed {
return AssertError(fmt.Sprintf("Block %s, which was not "+
"previously processed, turned out to be delayed, which is "+
"impossible.", node.hash))
@@ -637,8 +657,8 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
utxoCommitment: header.UTXOCommitment,
}
node.children = newSet()
node.parents = newSet()
node.children = newBlockSet()
node.parents = newBlockSet()
for _, hash := range header.ParentHashes {
parent := dag.index.LookupNode(hash)
@@ -684,8 +704,27 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
node.blues[i] = dag.index.LookupNode(hash)
}
node.height = calculateNodeHeight(node)
node.chainHeight = calculateChainHeight(node)
bluesAnticoneSizesLen, err := wire.ReadVarInt(buffer)
if err != nil {
return nil, err
}
node.bluesAnticoneSizes = make(map[*blockNode]dagconfig.KType)
for i := uint64(0); i < bluesAnticoneSizesLen; i++ {
hash := &daghash.Hash{}
if _, err := io.ReadFull(buffer, hash[:]); err != nil {
return nil, err
}
bluesAnticoneSize, err := binaryserializer.Uint8(buffer)
if err != nil {
return nil, err
}
blue := dag.index.LookupNode(hash)
if blue == nil {
return nil, errors.Errorf("couldn't find block with hash %s", hash)
}
node.bluesAnticoneSizes[blue] = dagconfig.KType(bluesAnticoneSize)
}
return node, nil
}
@@ -708,20 +747,17 @@ func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error)
return block, nil
}
// dbStoreBlockNode stores the block node data into the block
// index bucket. This overwrites the current entry if there exists one.
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
// Serialize block data to be stored.
func serializeBlockNode(node *blockNode) ([]byte, error) {
w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1))
header := node.Header()
err := header.Serialize(w)
if err != nil {
return err
return nil, err
}
err = w.WriteByte(byte(node.status))
if err != nil {
return err
return nil, err
}
// Because genesis doesn't have selected parent, it's serialized as zero hash
@@ -731,32 +767,55 @@ func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
}
_, err = w.Write(selectedParentHash[:])
if err != nil {
return err
return nil, err
}
err = binaryserializer.PutUint64(w, byteOrder, node.blueScore)
if err != nil {
return err
return nil, err
}
err = wire.WriteVarInt(w, uint64(len(node.blues)))
if err != nil {
return err
return nil, err
}
for _, blue := range node.blues {
_, err = w.Write(blue.hash[:])
if err != nil {
return err
return nil, err
}
}
value := w.Bytes()
err = wire.WriteVarInt(w, uint64(len(node.bluesAnticoneSizes)))
if err != nil {
return nil, err
}
for blue, blueAnticoneSize := range node.bluesAnticoneSizes {
_, err = w.Write(blue.hash[:])
if err != nil {
return nil, err
}
err = binaryserializer.PutUint8(w, uint8(blueAnticoneSize))
if err != nil {
return nil, err
}
}
return w.Bytes(), nil
}
// dbStoreBlockNode stores the block node data into the block
// index bucket. This overwrites the current entry if there exists one.
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
serializedNode, err := serializeBlockNode(node)
if err != nil {
return err
}
// Write block header data to block index bucket.
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
key := BlockIndexKey(node.hash, node.blueScore)
return blockIndexBucket.Put(key, value)
return blockIndexBucket.Put(key, serializedNode)
}
// dbStoreBlock stores the provided block in the database if it is not already
@@ -794,7 +853,7 @@ func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
// Lookup the block hash in block index and ensure it is in the DAG
node := dag.index.LookupNode(hash)
if node == nil {
str := fmt.Sprintf("block %s is not in the main chain", hash)
str := fmt.Sprintf("block %s is not in the DAG", hash)
return nil, errNotInDAG(str)
}
@@ -808,33 +867,33 @@ func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
return block, err
}
// BlockHashesFrom returns a slice of blocks starting from startHash
// ordered by blueScore. If startHash is nil then the genesis block is used.
// BlockHashesFrom returns a slice of blocks starting from lowHash
// ordered by blueScore. If lowHash is nil then the genesis block is used.
//
// This method MUST be called with the DAG lock held
func (dag *BlockDAG) BlockHashesFrom(startHash *daghash.Hash, limit int) ([]*daghash.Hash, error) {
func (dag *BlockDAG) BlockHashesFrom(lowHash *daghash.Hash, limit int) ([]*daghash.Hash, error) {
blockHashes := make([]*daghash.Hash, 0, limit)
if startHash == nil {
startHash = dag.genesis.hash
if lowHash == nil {
lowHash = dag.genesis.hash
// If we're starting from the beginning we should include the
// genesis hash in the result
blockHashes = append(blockHashes, dag.genesis.hash)
}
if !dag.BlockExists(startHash) {
return nil, errors.Errorf("block %s not found", startHash)
if !dag.IsInDAG(lowHash) {
return nil, errors.Errorf("block %s not found", lowHash)
}
blueScore, err := dag.BlueScoreByBlockHash(startHash)
blueScore, err := dag.BlueScoreByBlockHash(lowHash)
if err != nil {
return nil, err
}
err = dag.index.db.View(func(dbTx database.Tx) error {
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
startKey := BlockIndexKey(startHash, blueScore)
lowKey := BlockIndexKey(lowHash, blueScore)
cursor := blockIndexBucket.Cursor()
cursor.Seek(startKey)
cursor.Seek(lowKey)
for ok := cursor.Next(); ok; ok = cursor.Next() {
key := cursor.Key()
blockHash, err := blockHashFromBlockIndexKey(key)

View File

@@ -10,8 +10,8 @@ import (
"reflect"
"testing"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util/daghash"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestErrNotInDAG ensures the functions related to errNotInDAG work
@@ -46,8 +46,6 @@ func TestUtxoSerialization(t *testing.T) {
entry *UTXOEntry
serialized []byte
}{
// From tx in main blockchain:
// b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
{
name: "blue score 1, coinbase",
entry: &UTXOEntry{
@@ -58,8 +56,6 @@ func TestUtxoSerialization(t *testing.T) {
},
serialized: hexToBytes("03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"),
},
// From tx in main blockchain:
// 8131ffb0a2c945ecaf9b9063e59558784f9c3a74741ce6ae2a18d0571dac15bb:1
{
name: "blue score 100001, not coinbase",
entry: &UTXOEntry{
@@ -252,15 +248,28 @@ func TestDAGStateDeserializeErrors(t *testing.T) {
test.name, err, test.errType)
continue
}
if derr, ok := err.(database.Error); ok {
var dbErr database.Error
if ok := errors.As(err, &dbErr); ok {
tderr := test.errType.(database.Error)
if derr.ErrorCode != tderr.ErrorCode {
if dbErr.ErrorCode != tderr.ErrorCode {
t.Errorf("deserializeDAGState (%s): "+
"wrong error code got: %v, want: %v",
test.name, derr.ErrorCode,
test.name, dbErr.ErrorCode,
tderr.ErrorCode)
continue
}
}
}
}
// newHashFromStr converts the passed big-endian hex string into a
// daghash.Hash. It only differs from the one available in daghash in that
// it panics in case of an error since it will only (and must only) be
// called with hard-coded, and therefore known good, hashes.
func newHashFromStr(hexStr string) *daghash.Hash {
hash, err := daghash.NewHashFromStr(hexStr)
if err != nil {
panic(err)
}
return hash
}

View File

@@ -0,0 +1,73 @@
package blockdag
import (
"container/heap"
)
type baseDelayedBlocksHeap []*delayedBlock
func (h baseDelayedBlocksHeap) Len() int {
return len(h)
}
func (h baseDelayedBlocksHeap) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h *baseDelayedBlocksHeap) Push(x interface{}) {
*h = append(*h, x.(*delayedBlock))
}
func (h *baseDelayedBlocksHeap) Pop() interface{} {
oldHeap := *h
oldLength := len(oldHeap)
popped := oldHeap[oldLength-1]
*h = oldHeap[0 : oldLength-1]
return popped
}
func (h baseDelayedBlocksHeap) peek() interface{} {
if h.Len() > 0 {
return h[h.Len()-1]
}
return nil
}
func (h baseDelayedBlocksHeap) Less(i, j int) bool {
return h[j].processTime.After(h[i].processTime)
}
type delayedBlocksHeap struct {
baseDelayedBlocksHeap *baseDelayedBlocksHeap
impl heap.Interface
}
// newDelayedBlocksHeap initializes and returns a new delayedBlocksHeap
func newDelayedBlocksHeap() delayedBlocksHeap {
baseHeap := &baseDelayedBlocksHeap{}
h := delayedBlocksHeap{impl: baseHeap, baseDelayedBlocksHeap: baseHeap}
heap.Init(h.impl)
return h
}
// pop removes the block with lowest height from this heap and returns it
func (dbh delayedBlocksHeap) pop() *delayedBlock {
return heap.Pop(dbh.impl).(*delayedBlock)
}
// Push pushes the block onto the heap
func (dbh delayedBlocksHeap) Push(block *delayedBlock) {
heap.Push(dbh.impl, block)
}
// Len returns the length of this heap
func (dbh delayedBlocksHeap) Len() int {
return dbh.impl.Len()
}
// peek returns the topmost element in the queue without poping it
func (dbh delayedBlocksHeap) peek() *delayedBlock {
if dbh.baseDelayedBlocksHeap.peek() == nil {
return nil
}
return dbh.baseDelayedBlocksHeap.peek().(*delayedBlock)
}

View File

@@ -8,7 +8,7 @@ import (
"math/big"
"time"
"github.com/daglabs/btcd/util"
"github.com/kaspanet/kaspad/util"
)
// requiredDifficulty calculates the required difficulty for a

View File

@@ -5,14 +5,12 @@
package blockdag
import (
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/dagconfig"
"math/big"
"testing"
"time"
"github.com/daglabs/btcd/util"
"github.com/kaspanet/kaspad/util"
)
// TestBigToCompact ensures BigToCompact converts big integers to the expected
@@ -81,65 +79,77 @@ func TestCalcWork(t *testing.T) {
}
func TestDifficulty(t *testing.T) {
params := dagconfig.SimNetParams
params := dagconfig.SimnetParams
params.K = 1
dag := newTestDAG(&params)
nonce := uint64(0)
params.DifficultyAdjustmentWindowSize = 264
dag, teardownFunc, err := DAGSetup("TestDifficulty", Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
zeroTime := time.Unix(0, 0)
addNode := func(parents blockSet, blockTime time.Time) *blockNode {
bluestParent := parents.bluest()
if blockTime == zeroTime {
blockTime = time.Unix(bluestParent.timestamp+1, 0)
}
header := &wire.BlockHeader{
ParentHashes: parents.hashes(),
Bits: dag.requiredDifficulty(bluestParent, blockTime),
Nonce: nonce,
Timestamp: blockTime,
HashMerkleRoot: &daghash.ZeroHash,
AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash,
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
if err != nil {
t.Fatalf("unexpected error in PrepareBlockForTest: %s", err)
}
node := newBlockNode(header, parents, dag.dagParams.K)
node.updateParentsChildren()
nonce++
return node
block.Header.Timestamp = blockTime
block.Header.Bits = dag.requiredDifficulty(bluestParent, blockTime)
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(block), BFNoPoWCheck)
if err != nil {
t.Fatalf("unexpected error in ProcessBlock: %s", err)
}
if isDelayed {
t.Fatalf("block is too far in the future")
}
if isOrphan {
t.Fatalf("block was unexpectedly orphan")
}
return dag.index.LookupNode(block.BlockHash())
}
tip := dag.genesis
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != dag.genesis.bits {
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment window size, the difficulty should be the same as genesis'")
}
}
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize+1000; i++ {
tip = addNode(setFromSlice(tip), zeroTime)
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize+100; i++ {
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != dag.genesis.bits {
t.Fatalf("As long as the block rate remains the same, the difficulty shouldn't change")
}
}
nodeInThePast := addNode(setFromSlice(tip), tip.PastMedianTime(dag))
nodeInThePast := addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
if nodeInThePast.bits != tip.bits {
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
}
tip = nodeInThePast
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != nodeInThePast.bits {
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
}
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if compareBits(tip.bits, nodeInThePast.bits) >= 0 {
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the block rate, so the difficulty should increase as well")
}
expectedBits := uint32(0x207ff395)
expectedBits := uint32(0x207f83df)
if tip.bits != expectedBits {
t.Errorf("tip.bits was expected to be %x but got %x", expectedBits, tip.bits)
}
// Increase block rate to increase difficulty
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
tip = addNode(setFromSlice(tip), tip.PastMedianTime(dag))
tip = addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
if compareBits(tip.bits, tip.parents.bluest().bits) > 0 {
t.Fatalf("Because we're increasing the block rate, the difficulty can't decrease")
}
@@ -149,7 +159,7 @@ func TestDifficulty(t *testing.T) {
lastBits := tip.bits
sameBitsCount := uint64(0)
for sameBitsCount < dag.difficultyAdjustmentWindowSize+1 {
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits == lastBits {
sameBitsCount++
} else {
@@ -157,35 +167,35 @@ func TestDifficulty(t *testing.T) {
sameBitsCount = 0
}
}
slowNode := addNode(setFromSlice(tip), time.Unix(tip.timestamp+2, 0))
slowNode := addNode(blockSetFromSlice(tip), time.Unix(tip.timestamp+2, 0))
if slowNode.bits != tip.bits {
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
}
tip = slowNode
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != slowNode.bits {
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
}
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if compareBits(tip.bits, slowNode.bits) <= 0 {
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block rate, so the difficulty should decrease as well")
}
splitNode := addNode(setFromSlice(tip), zeroTime)
splitNode := addNode(blockSetFromSlice(tip), zeroTime)
tip = splitNode
for i := 0; i < 100; i++ {
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
}
blueTip := tip
redChainTip := splitNode
for i := 0; i < 10; i++ {
redChainTip = addNode(setFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
redChainTip = addNode(blockSetFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
}
tipWithRedPast := addNode(setFromSlice(redChainTip, blueTip), zeroTime)
tipWithoutRedPast := addNode(setFromSlice(blueTip), zeroTime)
tipWithRedPast := addNode(blockSetFromSlice(redChainTip, blueTip), zeroTime)
tipWithoutRedPast := addNode(blockSetFromSlice(blueTip), zeroTime)
if tipWithoutRedPast.bits != tipWithRedPast.bits {
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks shouldn't affect the difficulty")
}

View File

@@ -1,35 +1,26 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package blockdag implements bitcoin block handling and chain selection rules.
Package blockdag implements kaspa block handling and DAG selection rules.
The bitcoin block handling and chain selection rules are an integral, and quite
likely the most important, part of bitcoin. Unfortunately, at the time of
this writing, these rules are also largely undocumented and had to be
ascertained from the bitcoind source code. At its core, bitcoin is a
distributed consensus of which blocks are valid and which ones will comprise the
main block chain (public ledger) that ultimately determines accepted
transactions, so it is extremely important that fully validating nodes agree on
all rules.
The kaspa block handling and DAG selection rules are an integral, and quite
likely the most important, part of kaspa. At its core, kaspa is a distributed
consensus of which blocks are valid and which ones will comprise the DAG
(public ledger) that ultimately determines accepted transactions, so it is
extremely important that fully validating nodes agree on all rules.
At a high level, this package provides support for inserting new blocks into
the block chain according to the aforementioned rules. It includes
functionality such as rejecting duplicate blocks, ensuring blocks and
transactions follow all rules, orphan handling, and best chain selection along
with reorganization.
the block DAG according to the aforementioned rules. It includes functionality
such as rejecting duplicate blocks, ensuring blocks and transactions follow all
rules, orphan handling, and DAG order along with reorganization.
Since this package does not deal with other bitcoin specifics such as network
communication or wallets, it provides a notification system which gives the
caller a high level of flexibility in how they want to react to certain events
such as orphan blocks which need their parents requested and newly connected
main chain blocks which might result in wallet updates.
Since this package does not deal with other kaspa specifics such as network
communication, it provides a notification system which gives the caller a high
level of flexibility in how they want to react to certain events such as orphan
blocks which need their parents requested and newly connected DAG blocks.
Bitcoin Chain Processing Overview
Kaspa DAG Processing Overview
Before a block is allowed into the block chain, it must go through an intensive
series of validation rules. The following list serves as a general outline of
Before a block is allowed into the block DAG, it must go through an intensive
series of validation rules. The following list serves as a general outline of
those rules to provide some intuition into what is going on under the hood, but
is by no means exhaustive:
@@ -37,26 +28,19 @@ is by no means exhaustive:
- Perform a series of sanity checks on the block and its transactions such as
verifying proof of work, timestamps, number and character of transactions,
transaction amounts, script complexity, and merkle root calculations
- Compare the block against predetermined checkpoints for expected timestamps
and difficulty based on elapsed time since the checkpoint
- Save the most recent orphan blocks for a limited time in case their parent
blocks become available
- Stop processing if the block is an orphan as the rest of the processing
depends on the block's position within the block chain
depends on the block's position within the block DAG
- Perform a series of more thorough checks that depend on the block's position
within the block chain such as verifying block difficulties adhere to
within the block DAG such as verifying block difficulties adhere to
difficulty retarget rules, timestamps are after the median of the last
several blocks, all transactions are finalized, checkpoint blocks match, and
several blocks, all transactions are finalized, and
block versions are in line with the previous blocks
- Determine how the block fits into the chain and perform different actions
accordingly in order to ensure any side chains which have higher difficulty
than the main chain become the new main chain
- When a block is being connected to the main chain (either through
reorganization of a side chain to the main chain or just extending the
main chain), perform further checks on the block's transactions such as
verifying transaction duplicates, script complexity for the combination of
connected scripts, coinbase maturity, double spends, and connected
transaction values
- When a block is being connected to the DAG, perform further checks on the
block's transactions such as verifying transaction duplicates, script
complexity for the combination of connected scripts, coinbase maturity,
double spends, and connected transaction values
- Run the transaction scripts to verify the spender is allowed to spend the
coins
- Insert the block into the block database
@@ -64,18 +48,10 @@ is by no means exhaustive:
Errors
Errors returned by this package are either the raw errors provided by underlying
calls or of type blockchain.RuleError. This allows the caller to differentiate
calls or of type blockdag.RuleError. This allows the caller to differentiate
between unexpected errors, such as database errors, versus errors due to rule
violations through type assertions. In addition, callers can programmatically
violations through type assertions. In addition, callers can programmatically
determine the specific rule violation by examining the ErrorCode field of the
type asserted blockchain.RuleError.
Bitcoin Improvement Proposals
This package includes spec changes outlined by the following BIPs:
BIP0016 (https://en.bitcoin.it/wiki/BIP_0016)
BIP0030 (https://en.bitcoin.it/wiki/BIP_0030)
BIP0034 (https://en.bitcoin.it/wiki/BIP_0034)
type asserted blockdag.RuleError.
*/
package blockdag

View File

@@ -47,13 +47,12 @@ const (
ErrBlockVersionTooOld
// ErrInvalidTime indicates the time in the passed block has a precision
// that is more than one second. The chain consensus rules require
// that is more than one second. The DAG consensus rules require
// timestamps to have a maximum precision of one second.
ErrInvalidTime
// ErrTimeTooOld indicates the time is either before the median time of
// the last several blocks per the chain consensus rules or prior to the
// most recent checkpoint.
// the last several blocks per the DAG consensus rules.
ErrTimeTooOld
// ErrTimeTooNew indicates the time is too far in the future as compared
@@ -67,7 +66,7 @@ const (
ErrWrongParentsOrder
// ErrDifficultyTooLow indicates the difficulty for the block is lower
// than the difficulty required by the most recent checkpoint.
// than the difficulty required.
ErrDifficultyTooLow
// ErrUnexpectedDifficulty indicates specified bits do not align with
@@ -88,20 +87,16 @@ const (
// the expected value.
ErrBadUTXOCommitment
// ErrBadCheckpoint indicates a block that is expected to be at a
// checkpoint height does not match the expected one.
ErrBadCheckpoint
// ErrFinalityPointTimeTooOld indicates a block has a timestamp before the
// last finality point.
ErrFinalityPointTimeTooOld
// ErrNoTransactions indicates the block does not have a least one
// transaction. A valid block must have at least the coinbase
// transaction. A valid block must have at least the coinbase
// transaction.
ErrNoTransactions
// ErrNoTxInputs indicates a transaction does not have any inputs. A
// ErrNoTxInputs indicates a transaction does not have any inputs. A
// valid transaction must have at least one input.
ErrNoTxInputs
@@ -131,7 +126,7 @@ const (
ErrUnfinalizedTx
// ErrDuplicateTx indicates a block contains an identical transaction
// (or at least two transactions which hash to the same value). A
// (or at least two transactions which hash to the same value). A
// valid block may only contain unique transactions.
ErrDuplicateTx
@@ -172,12 +167,12 @@ const (
ErrBadCoinbaseTransaction
// ErrScriptMalformed indicates a transaction script is malformed in
// some way. For example, it might be longer than the maximum allowed
// some way. For example, it might be longer than the maximum allowed
// length or fail to parse.
ErrScriptMalformed
// ErrScriptValidation indicates the result of executing transaction
// script failed. The error covers any failure when executing scripts
// script failed. The error covers any failure when executing scripts
// such signature verification failures and execution past the end of
// the stack.
ErrScriptValidation
@@ -222,6 +217,10 @@ const (
// ErrInvalidParentsRelation indicates that one of the parents of a block
// is also an ancestor of another parent
ErrInvalidParentsRelation
// ErrDelayedBlockIsNotAllowed indicates that a block with a delayed timestamp was
// submitted with BFDisallowDelay flag raised.
ErrDelayedBlockIsNotAllowed
)
// Map of ErrorCode values back to their constant names for pretty printing.
@@ -238,7 +237,6 @@ var errorCodeStrings = map[ErrorCode]string{
ErrUnexpectedDifficulty: "ErrUnexpectedDifficulty",
ErrHighHash: "ErrHighHash",
ErrBadMerkleRoot: "ErrBadMerkleRoot",
ErrBadCheckpoint: "ErrBadCheckpoint",
ErrFinalityPointTimeTooOld: "ErrFinalityPointTimeTooOld",
ErrNoTransactions: "ErrNoTransactions",
ErrNoTxInputs: "ErrNoTxInputs",
@@ -270,6 +268,7 @@ var errorCodeStrings = map[ErrorCode]string{
ErrInvalidPayload: "ErrInvalidPayload",
ErrInvalidPayloadHash: "ErrInvalidPayloadHash",
ErrInvalidParentsRelation: "ErrInvalidParentsRelation",
ErrDelayedBlockIsNotAllowed: "ErrDelayedBlockIsNotAllowed",
}
// String returns the ErrorCode as a human-readable name.
@@ -280,9 +279,9 @@ func (e ErrorCode) String() string {
return fmt.Sprintf("Unknown ErrorCode (%d)", int(e))
}
// RuleError identifies a rule violation. It is used to indicate that
// RuleError identifies a rule violation. It is used to indicate that
// processing of a block or transaction failed due to one of the many validation
// rules. The caller can use type assertions to determine if a failure was
// rules. The caller can use type assertions to determine if a failure was
// specifically due to a rule violation and access the ErrorCode field to
// ascertain the specific reason for the rule violation.
type RuleError struct {

View File

@@ -27,7 +27,6 @@ func TestErrorCodeStringer(t *testing.T) {
{ErrUnexpectedDifficulty, "ErrUnexpectedDifficulty"},
{ErrHighHash, "ErrHighHash"},
{ErrBadMerkleRoot, "ErrBadMerkleRoot"},
{ErrBadCheckpoint, "ErrBadCheckpoint"},
{ErrFinalityPointTimeTooOld, "ErrFinalityPointTimeTooOld"},
{ErrNoTransactions, "ErrNoTransactions"},
{ErrNoTxInputs, "ErrNoTxInputs"},
@@ -35,7 +34,6 @@ func TestErrorCodeStringer(t *testing.T) {
{ErrBadTxOutValue, "ErrBadTxOutValue"},
{ErrDuplicateTxInputs, "ErrDuplicateTxInputs"},
{ErrBadTxInput, "ErrBadTxInput"},
{ErrBadCheckpoint, "ErrBadCheckpoint"},
{ErrMissingTxOut, "ErrMissingTxOut"},
{ErrUnfinalizedTx, "ErrUnfinalizedTx"},
{ErrDuplicateTx, "ErrDuplicateTx"},
@@ -60,6 +58,7 @@ func TestErrorCodeStringer(t *testing.T) {
{ErrInvalidPayload, "ErrInvalidPayload"},
{ErrInvalidPayloadHash, "ErrInvalidPayloadHash"},
{ErrInvalidParentsRelation, "ErrInvalidParentsRelation"},
{ErrDelayedBlockIsNotAllowed, "ErrDelayedBlockIsNotAllowed"},
{0xffff, "Unknown ErrorCode (65535)"},
}

View File

@@ -6,17 +6,17 @@ import (
"math"
"testing"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/testtools"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/testtools"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/mining"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
)
// TestFinality checks that the finality mechanism works as expected.
@@ -37,7 +37,7 @@ import (
// gets rejected because it doesn't have the last finality point in
// its selected parent chain.
func TestFinality(t *testing.T) {
params := dagconfig.SimNetParams
params := dagconfig.SimnetParams
params.K = 1
params.FinalityInterval = 100
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
@@ -54,11 +54,11 @@ func TestFinality(t *testing.T) {
}
block := util.NewBlock(msgBlock)
isOrphan, delay, err := dag.ProcessBlock(block, blockdag.BFNoPoWCheck)
isOrphan, isDelayed, err := dag.ProcessBlock(block, blockdag.BFNoPoWCheck)
if err != nil {
return nil, err
}
if delay != 0 {
if isDelayed {
return nil, errors.Errorf("ProcessBlock: block " +
"is too far in the future")
}
@@ -73,7 +73,7 @@ func TestFinality(t *testing.T) {
currentNode := genesis
// First we build a chain of params.FinalityInterval blocks for future use
for i := 0; i < params.FinalityInterval; i++ {
for i := uint64(0); i < params.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -85,7 +85,7 @@ func TestFinality(t *testing.T) {
// Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and
// we expect the block with height 1 * params.FinalityInterval to be the last finality point
currentNode = genesis
for i := 0; i < params.FinalityInterval; i++ {
for i := uint64(0); i < params.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -94,7 +94,7 @@ func TestFinality(t *testing.T) {
expectedFinalityPoint := currentNode
for i := 0; i < params.FinalityInterval; i++ {
for i := uint64(0); i < params.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -137,10 +137,10 @@ func TestFinality(t *testing.T) {
if err == nil {
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
}
rErr, ok := err.(blockdag.RuleError)
if ok {
if rErr.ErrorCode != blockdag.ErrFinality {
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
var ruleErr blockdag.RuleError
if errors.As(err, &ruleErr) {
if ruleErr.ErrorCode != blockdag.ErrFinality {
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
}
} else {
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", err)
@@ -152,13 +152,12 @@ func TestFinality(t *testing.T) {
if err == nil {
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
}
rErr, ok = err.(blockdag.RuleError)
if ok {
if rErr.ErrorCode != blockdag.ErrFinality {
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
if errors.As(err, &ruleErr) {
if ruleErr.ErrorCode != blockdag.ErrFinality {
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
}
} else {
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", rErr)
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", ruleErr)
}
}
@@ -167,15 +166,23 @@ func TestFinality(t *testing.T) {
// a getblocks message it should always be able to send
// all the necessary invs.
func TestFinalityInterval(t *testing.T) {
params := dagconfig.SimNetParams
if params.FinalityInterval > wire.MaxInvPerMsg {
t.Errorf("dagconfig.SimNetParams.FinalityInterval should be lower or equal to wire.MaxInvPerMsg")
netParams := []*dagconfig.Params{
&dagconfig.MainnetParams,
&dagconfig.TestnetParams,
&dagconfig.DevnetParams,
&dagconfig.RegressionNetParams,
&dagconfig.SimnetParams,
}
for _, params := range netParams {
if params.FinalityInterval > wire.MaxInvPerMsg {
t.Errorf("FinalityInterval in %s should be lower or equal to wire.MaxInvPerMsg", params.Name)
}
}
}
// TestSubnetworkRegistry tests the full subnetwork registry flow
func TestSubnetworkRegistry(t *testing.T) {
params := dagconfig.SimNetParams
params := dagconfig.SimnetParams
params.K = 1
params.BlockCoinbaseMaturity = 0
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
@@ -201,7 +208,7 @@ func TestSubnetworkRegistry(t *testing.T) {
}
func TestChainedTransactions(t *testing.T) {
params := dagconfig.SimNetParams
params := dagconfig.SimnetParams
params.BlockCoinbaseMaturity = 0
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", blockdag.Config{
@@ -216,11 +223,11 @@ func TestChainedTransactions(t *testing.T) {
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(block1), blockdag.BFNoPoWCheck)
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(block1), blockdag.BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock: %v", err)
}
if delay != 0 {
if isDelayed {
t.Fatalf("ProcessBlock: block1 " +
"is too far in the future")
}
@@ -266,17 +273,20 @@ func TestChainedTransactions(t *testing.T) {
}
//Checks that dag.ProcessBlock fails because we don't allow a transaction to spend another transaction from the same block
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
if err == nil {
t.Errorf("ProcessBlock expected an error")
} else if rErr, ok := err.(blockdag.RuleError); ok {
if rErr.ErrorCode != blockdag.ErrMissingTxOut {
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, rErr.ErrorCode)
}
} else {
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
var ruleErr blockdag.RuleError
if ok := errors.As(err, &ruleErr); ok {
if ruleErr.ErrorCode != blockdag.ErrMissingTxOut {
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, ruleErr.ErrorCode)
}
} else {
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
}
}
if delay != 0 {
if isDelayed {
t.Fatalf("ProcessBlock: block2 " +
"is too far in the future")
}
@@ -301,11 +311,11 @@ func TestChainedTransactions(t *testing.T) {
}
//Checks that dag.ProcessBlock doesn't fail because all of its transaction are dependant on transactions from previous blocks
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(block3), blockdag.BFNoPoWCheck)
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(block3), blockdag.BFNoPoWCheck)
if err != nil {
t.Errorf("ProcessBlock: %v", err)
}
if delay != 0 {
if isDelayed {
t.Fatalf("ProcessBlock: block3 " +
"is too far in the future")
}
@@ -319,8 +329,8 @@ func TestChainedTransactions(t *testing.T) {
// before txB.
func TestOrderInDiffFromAcceptanceData(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimNetParams
params.K = math.MaxUint32
params := dagconfig.SimnetParams
params.K = math.MaxUint8
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", blockdag.Config{
DAGParams: &params,
})
@@ -359,11 +369,11 @@ func TestOrderInDiffFromAcceptanceData(t *testing.T) {
// Add the block to the DAG
newBlock := util.NewBlock(msgBlock)
isOrphan, delay, err := dag.ProcessBlock(newBlock, blockdag.BFNoPoWCheck)
isOrphan, isDelayed, err := dag.ProcessBlock(newBlock, blockdag.BFNoPoWCheck)
if err != nil {
t.Errorf("TestOrderInDiffFromAcceptanceData: %s", err)
}
if delay != 0 {
if isDelayed {
t.Fatalf("TestOrderInDiffFromAcceptanceData: block is too far in the future")
}
if isOrphan {
@@ -388,7 +398,7 @@ func TestOrderInDiffFromAcceptanceData(t *testing.T) {
// TestGasLimit tests the gas limit rules
func TestGasLimit(t *testing.T) {
params := dagconfig.SimNetParams
params := dagconfig.SimnetParams
params.K = 1
params.BlockCoinbaseMaturity = 0
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
@@ -412,11 +422,11 @@ func TestGasLimit(t *testing.T) {
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(fundsBlock), blockdag.BFNoPoWCheck)
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(fundsBlock), blockdag.BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock: %v", err)
}
if delay != 0 {
if isDelayed {
t.Fatalf("ProcessBlock: the funds block " +
"is too far in the future")
}
@@ -464,17 +474,17 @@ func TestGasLimit(t *testing.T) {
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(overLimitBlock), blockdag.BFNoPoWCheck)
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(overLimitBlock), blockdag.BFNoPoWCheck)
if err == nil {
t.Fatalf("ProcessBlock expected to have an error in block that exceeds gas limit")
}
rErr, ok := err.(blockdag.RuleError)
if !ok {
var ruleErr blockdag.RuleError
if !errors.As(err, &ruleErr) {
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
} else if ruleErr.ErrorCode != blockdag.ErrInvalidGas {
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, ruleErr.ErrorCode)
}
if delay != 0 {
if isDelayed {
t.Fatalf("ProcessBlock: overLimitBlock " +
"is too far in the future")
}
@@ -499,19 +509,22 @@ func TestGasLimit(t *testing.T) {
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(overflowGasBlock), blockdag.BFNoPoWCheck)
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(overflowGasBlock), blockdag.BFNoPoWCheck)
if err == nil {
t.Fatalf("ProcessBlock expected to have an error")
}
rErr, ok = err.(blockdag.RuleError)
if !ok {
if !errors.As(err, &ruleErr) {
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
} else if ruleErr.ErrorCode != blockdag.ErrInvalidGas {
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, ruleErr.ErrorCode)
}
if isOrphan {
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
}
if isDelayed {
t.Fatalf("ProcessBlock: overflowGasBlock " +
"is too far in the future")
}
nonExistentSubnetwork := &subnetworkid.SubnetworkID{123}
nonExistentSubnetworkTxIn := &wire.TxIn{
@@ -532,23 +545,30 @@ func TestGasLimit(t *testing.T) {
}
// Here we check that we can't process a block with a transaction from a non-existent subnetwork
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
expectedErrStr := fmt.Sprintf("Error getting gas limit for subnetworkID '%s': subnetwork '%s' not found",
nonExistentSubnetwork, nonExistentSubnetwork)
if err.Error() != expectedErrStr {
t.Fatalf("ProcessBlock expected error \"%v\" but got \"%v\"", expectedErrStr, err)
}
if isDelayed {
t.Fatalf("ProcessBlock: nonExistentSubnetworkBlock " +
"is too far in the future")
}
if isOrphan {
t.Fatalf("ProcessBlock: nonExistentSubnetworkBlock got unexpectedly orphan")
}
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit
validBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1}, true)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(validBlock), blockdag.BFNoPoWCheck)
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(validBlock), blockdag.BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock: %v", err)
}
if delay != 0 {
if isDelayed {
t.Fatalf("ProcessBlock: overLimitBlock " +
"is too far in the future")
}

View File

@@ -1,318 +0,0 @@
// Copyright (c) 2016 The Decred developers
// Copyright (c) 2016-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag_test
import (
"bytes"
"github.com/pkg/errors"
"os"
"path/filepath"
"testing"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/blockdag/fullblocktests"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
_ "github.com/daglabs/btcd/database/ffldb"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
const (
// testDbType is the database backend type to use for the tests.
testDbType = "ffldb"
// testDbRoot is the root directory used to create all test databases.
testDbRoot = "testdbs"
// blockDataNet is the expected network in the test block data.
blockDataNet = wire.MainNet
)
// filesExists returns whether or not the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// isSupportedDbType returns whether or not the passed database type is
// currently supported.
func isSupportedDbType(dbType string) bool {
supportedDrivers := database.SupportedDrivers()
for _, driver := range supportedDrivers {
if dbType == driver {
return true
}
}
return false
}
// DAGSetup is used to create a new db and chain instance with the genesis
// block already inserted. In addition to the new chain instance, it returns
// a teardown function the caller should invoke when done testing to clean up.
func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func(), error) {
if !isSupportedDbType(testDbType) {
return nil, nil, errors.Errorf("unsupported db type %v", testDbType)
}
// Handle memory database specially since it doesn't need the disk
// specific handling.
var db database.DB
var teardown func()
if testDbType == "memdb" {
ndb, err := database.Create(testDbType)
if err != nil {
return nil, nil, errors.Errorf("error creating db: %v", err)
}
db = ndb
// Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing.
teardown = func() {
db.Close()
}
} else {
// Create the root directory for test databases.
if !fileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
err := errors.Errorf("unable to create test db "+
"root: %v", err)
return nil, nil, err
}
}
// Create a new database to store the accepted blocks into.
dbPath := filepath.Join(testDbRoot, dbName)
_ = os.RemoveAll(dbPath)
ndb, err := database.Create(testDbType, dbPath, blockDataNet)
if err != nil {
return nil, nil, errors.Errorf("error creating db: %v", err)
}
db = ndb
// Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing.
teardown = func() {
db.Close()
os.RemoveAll(dbPath)
os.RemoveAll(testDbRoot)
}
}
// Copy the chain params to ensure any modifications the tests do to
// the DAG parameters do not affect the global instance.
paramsCopy := *params
// Create the main chain instance.
chain, err := blockdag.New(&blockdag.Config{
DB: db,
DAGParams: &paramsCopy,
Checkpoints: nil,
TimeSource: blockdag.NewMedianTime(),
SigCache: txscript.NewSigCache(1000),
})
if err != nil {
teardown()
err := errors.Errorf("failed to create chain instance: %v", err)
return nil, nil, err
}
return chain, teardown, nil
}
// TestFullBlocks ensures all tests generated by the fullblocktests package
// have the expected result when processed via ProcessBlock.
func TestFullBlocks(t *testing.T) {
// TODO: (Stas) This test was disabled for until we have implemented Phantom
// Ticket: https://daglabs.atlassian.net/browse/DEV-60
t.SkipNow()
tests, err := fullblocktests.Generate(false)
if err != nil {
t.Fatalf("failed to generate tests: %v", err)
}
// Create a new database and chain instance to run tests against.
dag, teardownFunc, err := DAGSetup("fullblocktest",
&dagconfig.RegressionNetParams)
if err != nil {
t.Errorf("Failed to setup chain instance: %v", err)
return
}
defer teardownFunc()
// testAcceptedBlock attempts to process the block in the provided test
// instance and ensures that it was accepted according to the flags
// specified in the test.
testAcceptedBlock := func(item fullblocktests.AcceptedBlock) {
blockHeight := item.Height
block := util.NewBlock(item.Block)
block.SetChainHeight(blockHeight)
t.Logf("Testing block %s (hash %s, height %d)",
item.Name, block.Hash(), blockHeight)
isOrphan, delay, err := dag.ProcessBlock(block,
blockdag.BFNone)
if err != nil {
t.Fatalf("block %q (hash %s, height %d) should "+
"have been accepted: %v", item.Name,
block.Hash(), blockHeight, err)
}
if delay != item.Delay {
t.Fatalf("block %q (hash %s, height %d) unexpected "+
"delay -- got %v, want %v", item.Name,
block.Hash(), blockHeight, delay,
item.Delay)
}
if isOrphan != item.IsOrphan {
t.Fatalf("block %q (hash %s, height %d) unexpected "+
"orphan flag -- got %v, want %v", item.Name,
block.Hash(), blockHeight, isOrphan,
item.IsOrphan)
}
}
// testRejectedBlock attempts to process the block in the provided test
// instance and ensures that it was rejected with the reject code
// specified in the test.
testRejectedBlock := func(item fullblocktests.RejectedBlock) {
blockHeight := item.Height
block := util.NewBlock(item.Block)
block.SetChainHeight(blockHeight)
t.Logf("Testing block %s (hash %s, height %d)",
item.Name, block.Hash(), blockHeight)
_, _, err := dag.ProcessBlock(block, blockdag.BFNone)
if err == nil {
t.Fatalf("block %q (hash %s, height %d) should not "+
"have been accepted", item.Name, block.Hash(),
blockHeight)
}
// Ensure the error code is of the expected type and the reject
// code matches the value specified in the test instance.
rerr, ok := err.(blockdag.RuleError)
if !ok {
t.Fatalf("block %q (hash %s, height %d) returned "+
"unexpected error type -- got %T, want "+
"blockchain.RuleError", item.Name, block.Hash(),
blockHeight, err)
}
if rerr.ErrorCode != item.RejectCode {
t.Fatalf("block %q (hash %s, height %d) does not have "+
"expected reject code -- got %v, want %v",
item.Name, block.Hash(), blockHeight,
rerr.ErrorCode, item.RejectCode)
}
}
// testRejectedNonCanonicalBlock attempts to decode the block in the
// provided test instance and ensures that it failed to decode with a
// message error.
testRejectedNonCanonicalBlock := func(item fullblocktests.RejectedNonCanonicalBlock) {
headerLen := len(item.RawBlock)
if headerLen > 80 {
headerLen = 80
}
blockHash := daghash.DoubleHashH(item.RawBlock[0:headerLen])
blockHeight := item.Height
t.Logf("Testing block %s (hash %s, height %d)", item.Name,
blockHash, blockHeight)
// Ensure there is an error due to deserializing the block.
var msgBlock wire.MsgBlock
err := msgBlock.BtcDecode(bytes.NewReader(item.RawBlock), 0)
if _, ok := err.(*wire.MessageError); !ok {
t.Fatalf("block %q (hash %s, height %d) should have "+
"failed to decode", item.Name, blockHash,
blockHeight)
}
}
// testOrphanOrRejectedBlock attempts to process the block in the
// provided test instance and ensures that it was either accepted as an
// orphan or rejected with a rule violation.
testOrphanOrRejectedBlock := func(item fullblocktests.OrphanOrRejectedBlock) {
blockHeight := item.Height
block := util.NewBlock(item.Block)
block.SetChainHeight(blockHeight)
t.Logf("Testing block %s (hash %s, height %d)",
item.Name, block.Hash(), blockHeight)
isOrphan, delay, err := dag.ProcessBlock(block, blockdag.BFNone)
if err != nil {
// Ensure the error code is of the expected type.
if _, ok := err.(blockdag.RuleError); !ok {
t.Fatalf("block %q (hash %s, height %d) "+
"returned unexpected error type -- "+
"got %T, want blockchain.RuleError",
item.Name, block.Hash(), blockHeight,
err)
}
}
if delay != 0 {
t.Fatalf("block %q (hash %s, height %d) "+
"is too far in the future",
item.Name, block.Hash(), blockHeight)
}
if !isOrphan {
t.Fatalf("block %q (hash %s, height %d) was accepted, "+
"but is not considered an orphan", item.Name,
block.Hash(), blockHeight)
}
}
// testExpectedTip ensures the current tip of the blockchain is the
// block specified in the provided test instance.
testExpectedTip := func(item fullblocktests.ExpectedTip) {
blockHeight := item.Height
block := util.NewBlock(item.Block)
block.SetChainHeight(blockHeight)
t.Logf("Testing tip for block %s (hash %s, height %d)",
item.Name, block.Hash(), blockHeight)
// Ensure hash and height match.
if dag.SelectedTipHash() != item.Block.BlockHash() ||
dag.ChainHeight() != blockHeight { //TODO: (Ori) the use of dag.ChainHeight() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
t.Fatalf("block %q (hash %s, height %d) should be "+
"the current tip -- got (hash %s, height %d)",
item.Name, block.Hash(), blockHeight, dag.SelectedTipHash(),
dag.ChainHeight()) //TODO: (Ori) the use of dag.ChainHeight() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
}
}
for testNum, test := range tests {
for itemNum, item := range test {
switch item := item.(type) {
case fullblocktests.AcceptedBlock:
testAcceptedBlock(item)
case fullblocktests.RejectedBlock:
testRejectedBlock(item)
case fullblocktests.RejectedNonCanonicalBlock:
testRejectedNonCanonicalBlock(item)
case fullblocktests.OrphanOrRejectedBlock:
testOrphanOrRejectedBlock(item)
case fullblocktests.ExpectedTip:
testExpectedTip(item)
default:
t.Fatalf("test #%d, item #%d is not one of "+
"the supported test instance types -- "+
"got type: %T", testNum, itemNum, item)
}
}
}
}

View File

@@ -1,29 +0,0 @@
fullblocktests
==============
[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/daglabs/btcd/blockchain/fullblocktests)
Package fullblocktests provides a set of full block tests to be used for testing
the consensus validation rules. The tests are intended to be flexible enough to
allow both unit-style tests directly against the blockchain code as well as
integration style tests over the peer-to-peer network. To achieve that goal,
each test contains additional information about the expected result, however
that information can be ignored when doing comparison tests between two
independent versions over the peer-to-peer network.
This package has intentionally been designed so it can be used as a standalone
package for any projects needing to test their implementation against a full set
of blocks that exercise the consensus validation rules.
## Installation and Updating
```bash
$ go get -u github.com/daglabs/btcd/blockchain/fullblocktests
```
## License
Package fullblocktests is licensed under the [copyfree](http://copyfree.org) ISC
License.

View File

@@ -1,20 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package fullblocktests provides a set of block consensus validation tests.
All of the generated test instances involve full blocks that are to be used for
testing the consensus validation rules. The tests are intended to be flexible
enough to allow both unit-style tests directly against the blockchain code as
well as integration style tests over the peer-to-peer network. To achieve that
goal, each test contains additional information about the expected result,
however that information can be ignored when doing comparison tests between two
independent versions over the peer-to-peer network.
This package has intentionally been designed so it can be used as a standalone
package for any projects needing to test their implementation against a full set
of blocks that exercise the consensus validation rules.
*/
package fullblocktests

File diff suppressed because it is too large Load Diff

View File

@@ -1,143 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package fullblocktests
import (
"encoding/hex"
"math"
"math/big"
"time"
"github.com/daglabs/btcd/util/hdkeychain"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
// newHashFromStr converts the passed big-endian hex string into a
// wire.Hash. It only differs from the one available in daghash in that
// it panics on an error since it will only (and must only) be called with
// hard-coded, and therefore known good, hashes.
func newHashFromStr(hexStr string) *daghash.Hash {
hash, err := daghash.NewHashFromStr(hexStr)
if err != nil {
panic(err)
}
return hash
}
// newTxIDFromStr converts the passed big-endian hex string into a
// wire.TxID. It only differs from the one available in daghash in that
// it panics on an error since it will only (and must only) be called with
// hard-coded, and therefore known good, hashes.
func newTxIDFromStr(hexStr string) *daghash.TxID {
txID, err := daghash.NewTxIDFromStr(hexStr)
if err != nil {
panic(err)
}
return txID
}
// fromHex converts the passed hex string into a byte slice and will panic if
// there is an error. This is only provided for the hard-coded constants so
// errors in the source code can be detected. It will only (and must only) be
// called for initialization purposes.
func fromHex(s string) []byte {
r, err := hex.DecodeString(s)
if err != nil {
panic("invalid hex in source file: " + s)
}
return r
}
var (
// bigOne is 1 represented as a big.Int. It is defined here to avoid
// the overhead of creating it multiple times.
bigOne = big.NewInt(1)
// regressionPowLimit is the highest proof of work value a Bitcoin block
// can have for the regression test network. It is the value 2^255 - 1.
regressionPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne)
// regTestGenesisBlock defines the genesis block of the block chain which serves
// as the public transaction ledger for the regression test network.
regTestGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"),
Timestamp: time.Unix(0x5b28c636, 0), // 2018-06-19 09:00:38 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
Nonce: 1,
},
Transactions: []*wire.MsgTx{{
Version: 1,
TxIn: []*wire.TxIn{{
PreviousOutpoint: wire.Outpoint{
TxID: daghash.TxID{},
Index: 0xffffffff,
},
SignatureScript: fromHex("04ffff001d010445" +
"5468652054696d65732030332f4a616e2f" +
"32303039204368616e63656c6c6f72206f" +
"6e206272696e6b206f66207365636f6e64" +
"206261696c6f757420666f72206261686b73"),
Sequence: math.MaxUint64,
}},
TxOut: []*wire.TxOut{{
Value: 0,
ScriptPubKey: fromHex("4104678afdb0fe5548271967f1" +
"a67130b7105cd6a828e03909a67962e0ea1f" +
"61deb649f6bc3f4cef38c4f35504e51ec138" +
"c4f35504e51ec112de5c384df7ba0b8d578a" +
"4c702b6bf11d5fac"),
}},
LockTime: 0,
SubnetworkID: *subnetworkid.SubnetworkIDNative,
}},
}
)
// regressionNetParams defines the network parameters for the regression test
// network.
//
// NOTE: The test generator intentionally does not use the existing definitions
// in the dagconfig package since the intent is to be able to generate known
// good tests which exercise that code. Using the dagconfig parameters would
// allow them to change out from under the tests potentially invalidating them.
var regressionNetParams = &dagconfig.Params{
Name: "regtest",
Net: wire.RegTest,
DefaultPort: "18444",
// DAG parameters
GenesisBlock: &regTestGenesisBlock,
GenesisHash: newHashFromStr("5bec7567af40504e0994db3b573c186fffcc4edefe096ff2e58d00523bd7e8a6"),
PowMax: regressionPowLimit,
BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 150,
TargetTimePerBlock: time.Second * 10, // 10 seconds
DifficultyAdjustmentWindowSize: 2640,
TimestampDeviationTolerance: 132,
GenerateSupported: true,
// Checkpoints ordered from oldest to newest.
Checkpoints: nil,
// Mempool parameters
RelayNonStdTxs: true,
// Address encoding magics
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair: hdkeychain.HDKeyPairRegressionNet,
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 1,
}

176
blockdag/ghostdag.go Normal file
View File

@@ -0,0 +1,176 @@
package blockdag
import (
"github.com/kaspanet/kaspad/dagconfig"
"github.com/pkg/errors"
"sort"
)
// ghostdag runs the GHOSTDAG protocol and updates newNode.blues,
// newNode.selectedParent and newNode.bluesAnticoneSizes accordingly.
// The function updates newNode.blues by iterating over the blocks in
// the anticone of newNode.selectedParent (which is the parent with the
// highest blue score) and adds any block to newNode.blues if by adding
// it to newNode.blues these conditions will not be violated:
//
// 1) |anticone-of-candidate-block ∩ blue-set-of-newNode| ≤ K
//
// 2) For every blue block in blue-set-of-newNode:
// |(anticone-of-blue-block ∩ blue-set-newNode) {candidate-block}| ≤ K.
// We validate this condition by maintaining a map bluesAnticoneSizes for
// each block which holds all the blue anticone sizes that were affected by
// the new added blue blocks.
// So to find out what is |anticone-of-blue ∩ blue-set-of-newNode| we just iterate in
// the selected parent chain of newNode until we find an existing entry in
// bluesAnticoneSizes.
//
// For further details see the article https://eprint.iacr.org/2018/104.pdf
func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blockNode, err error) {
newNode.selectedParent = newNode.parents.bluest()
newNode.bluesAnticoneSizes[newNode.selectedParent] = 0
newNode.blues = []*blockNode{newNode.selectedParent}
selectedParentAnticone, err = dag.selectedParentAnticone(newNode)
if err != nil {
return nil, err
}
sort.Slice(selectedParentAnticone, func(i, j int) bool {
return selectedParentAnticone[i].less(selectedParentAnticone[j])
})
for _, blueCandidate := range selectedParentAnticone {
candidateBluesAnticoneSizes := make(map[*blockNode]dagconfig.KType)
var candidateAnticoneSize dagconfig.KType
possiblyBlue := true
// Iterate over all blocks in the blue set of newNode that are not in the past
// of blueCandidate, and check for each one of them if blueCandidate potentially
// enlarges their blue anticone to be over K, or that they enlarge the blue anticone
// of blueCandidate to be over K.
for chainBlock := newNode; possiblyBlue; chainBlock = chainBlock.selectedParent {
// If blueCandidate is in the future of chainBlock, it means
// that all remaining blues are in the past of chainBlock and thus
// in the past of blueCandidate. In this case we know for sure that
// the anticone of blueCandidate will not exceed K, and we can mark
// it as blue.
//
// newNode is always in the future of blueCandidate, so there's
// no point in checking it.
if chainBlock != newNode {
if isAncestorOfBlueCandidate, err := dag.isAncestorOf(chainBlock, blueCandidate); err != nil {
return nil, err
} else if isAncestorOfBlueCandidate {
break
}
}
for _, block := range chainBlock.blues {
// Skip blocks that exist in the past of blueCandidate.
if isAncestorOfBlueCandidate, err := dag.isAncestorOf(block, blueCandidate); err != nil {
return nil, err
} else if isAncestorOfBlueCandidate {
continue
}
candidateBluesAnticoneSizes[block], err = dag.blueAnticoneSize(block, newNode)
if err != nil {
return nil, err
}
candidateAnticoneSize++
if candidateAnticoneSize > dag.dagParams.K {
// k-cluster violation: The candidate's blue anticone exceeded k
possiblyBlue = false
break
}
if candidateBluesAnticoneSizes[block] == dag.dagParams.K {
// k-cluster violation: A block in candidate's blue anticone already
// has k blue blocks in its own anticone
possiblyBlue = false
break
}
// This is a sanity check that validates that a blue
// block's blue anticone is not already larger than K.
if candidateBluesAnticoneSizes[block] > dag.dagParams.K {
return nil, errors.New("found blue anticone size larger than k")
}
}
}
if possiblyBlue {
// No k-cluster violation found, we can now set the candidate block as blue
newNode.blues = append(newNode.blues, blueCandidate)
newNode.bluesAnticoneSizes[blueCandidate] = candidateAnticoneSize
for blue, blueAnticoneSize := range candidateBluesAnticoneSizes {
newNode.bluesAnticoneSizes[blue] = blueAnticoneSize + 1
}
// The maximum length of node.blues can be K+1 because
// it contains the selected parent.
if dagconfig.KType(len(newNode.blues)) == dag.dagParams.K+1 {
break
}
}
}
newNode.blueScore = newNode.selectedParent.blueScore + uint64(len(newNode.blues))
return selectedParentAnticone, nil
}
// selectedParentAnticone returns the blocks in the anticone of the selected parent of the given node.
// The function work as follows.
// We start by adding all parents of the node (other than the selected parent) to a process queue.
// For each node in the queue:
// we check whether it is in the past of the selected parent.
// If not, we add the node to the resulting anticone-set and queue it for processing.
func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, error) {
anticoneSet := newBlockSet()
var anticoneSlice []*blockNode
selectedParentPast := newBlockSet()
var queue []*blockNode
// Queueing all parents (other than the selected parent itself) for processing.
for parent := range node.parents {
if parent == node.selectedParent {
continue
}
anticoneSet.add(parent)
anticoneSlice = append(anticoneSlice, parent)
queue = append(queue, parent)
}
for len(queue) > 0 {
var current *blockNode
current, queue = queue[0], queue[1:]
// For each parent of a the current node we check whether it is in the past of the selected parent. If not,
// we add the it to the resulting anticone-set and queue it for further processing.
for parent := range current.parents {
if anticoneSet.contains(parent) || selectedParentPast.contains(parent) {
continue
}
isAncestorOfSelectedParent, err := dag.isAncestorOf(parent, node.selectedParent)
if err != nil {
return nil, err
}
if isAncestorOfSelectedParent {
selectedParentPast.add(parent)
continue
}
anticoneSet.add(parent)
anticoneSlice = append(anticoneSlice, parent)
queue = append(queue, parent)
}
}
return anticoneSlice, nil
}
// blueAnticoneSize returns the blue anticone size of 'block' from the worldview of 'context'.
// Expects 'block' to be in the blue set of 'context'
func (dag *BlockDAG) blueAnticoneSize(block, context *blockNode) (dagconfig.KType, error) {
for current := context; current != nil; current = current.selectedParent {
if blueAnticoneSize, ok := current.bluesAnticoneSizes[block]; ok {
return blueAnticoneSize, nil
}
}
return 0, errors.Errorf("block %s is not in blue set of %s", block.hash, context.hash)
}

370
blockdag/ghostdag_test.go Normal file
View File

@@ -0,0 +1,370 @@
package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"reflect"
"sort"
"strings"
"testing"
)
type testBlockData struct {
parents []string
id string // id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash
expectedScore uint64
expectedSelectedParent string
expectedBlues []string
}
// TestGHOSTDAG iterates over several dag simulations, and checks
// that the blue score, blue set and selected parent of each
// block are calculated as expected.
func TestGHOSTDAG(t *testing.T) {
dagParams := dagconfig.SimnetParams
tests := []struct {
k dagconfig.KType
expectedReds []string
dagData []*testBlockData
}{
{
k: 3,
expectedReds: []string{"F", "G", "H", "I", "O", "P"},
dagData: []*testBlockData{
{
parents: []string{"A"},
id: "B",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"B"},
id: "C",
expectedScore: 2,
expectedSelectedParent: "B",
expectedBlues: []string{"B"},
},
{
parents: []string{"A"},
id: "D",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"C", "D"},
id: "E",
expectedScore: 4,
expectedSelectedParent: "C",
expectedBlues: []string{"C", "D"},
},
{
parents: []string{"A"},
id: "F",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"F"},
id: "G",
expectedScore: 2,
expectedSelectedParent: "F",
expectedBlues: []string{"F"},
},
{
parents: []string{"A"},
id: "H",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"A"},
id: "I",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"E", "G"},
id: "J",
expectedScore: 5,
expectedSelectedParent: "E",
expectedBlues: []string{"E"},
},
{
parents: []string{"J"},
id: "K",
expectedScore: 6,
expectedSelectedParent: "J",
expectedBlues: []string{"J"},
},
{
parents: []string{"I", "K"},
id: "L",
expectedScore: 7,
expectedSelectedParent: "K",
expectedBlues: []string{"K"},
},
{
parents: []string{"L"},
id: "M",
expectedScore: 8,
expectedSelectedParent: "L",
expectedBlues: []string{"L"},
},
{
parents: []string{"M"},
id: "N",
expectedScore: 9,
expectedSelectedParent: "M",
expectedBlues: []string{"M"},
},
{
parents: []string{"M"},
id: "O",
expectedScore: 9,
expectedSelectedParent: "M",
expectedBlues: []string{"M"},
},
{
parents: []string{"M"},
id: "P",
expectedScore: 9,
expectedSelectedParent: "M",
expectedBlues: []string{"M"},
},
{
parents: []string{"M"},
id: "Q",
expectedScore: 9,
expectedSelectedParent: "M",
expectedBlues: []string{"M"},
},
{
parents: []string{"M"},
id: "R",
expectedScore: 9,
expectedSelectedParent: "M",
expectedBlues: []string{"M"},
},
{
parents: []string{"R"},
id: "S",
expectedScore: 10,
expectedSelectedParent: "R",
expectedBlues: []string{"R"},
},
{
parents: []string{"N", "O", "P", "Q", "S"},
id: "T",
expectedScore: 13,
expectedSelectedParent: "S",
expectedBlues: []string{"S", "N", "Q"},
},
},
},
}
for i, test := range tests {
func() {
resetExtraNonceForTest()
dagParams.K = test.k
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), Config{
DAGParams: &dagParams,
})
if err != nil {
t.Fatalf("Failed to setup dag instance: %v", err)
}
defer teardownFunc()
genesisNode := dag.genesis
blockByIDMap := make(map[string]*blockNode)
idByBlockMap := make(map[*blockNode]string)
blockByIDMap["A"] = genesisNode
idByBlockMap[genesisNode] = "A"
for _, blockData := range test.dagData {
parents := blockSet{}
for _, parentID := range blockData.parents {
parent := blockByIDMap[parentID]
parents.add(parent)
}
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
if err != nil {
t.Fatalf("TestGHOSTDAG: block %v got unexpected error from PrepareBlockForTest: %v", blockData.id, err)
}
utilBlock := util.NewBlock(block)
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
if err != nil {
t.Fatalf("TestGHOSTDAG: dag.ProcessBlock got unexpected error for block %v: %v", blockData.id, err)
}
if isDelayed {
t.Fatalf("TestGHOSTDAG: block %s "+
"is too far in the future", blockData.id)
}
if isOrphan {
t.Fatalf("TestGHOSTDAG: block %v was unexpectedly orphan", blockData.id)
}
node := dag.index.LookupNode(utilBlock.Hash())
blockByIDMap[blockData.id] = node
idByBlockMap[node] = blockData.id
bluesIDs := make([]string, 0, len(node.blues))
for _, blue := range node.blues {
bluesIDs = append(bluesIDs, idByBlockMap[blue])
}
selectedParentID := idByBlockMap[node.selectedParent]
fullDataStr := fmt.Sprintf("blues: %v, selectedParent: %v, score: %v",
bluesIDs, selectedParentID, node.blueScore)
if blockData.expectedScore != node.blueScore {
t.Errorf("Test %d: Block %v expected to have score %v but got %v (fulldata: %v)",
i, blockData.id, blockData.expectedScore, node.blueScore, fullDataStr)
}
if blockData.expectedSelectedParent != selectedParentID {
t.Errorf("Test %d: Block %v expected to have selected parent %v but got %v (fulldata: %v)",
i, blockData.id, blockData.expectedSelectedParent, selectedParentID, fullDataStr)
}
if !reflect.DeepEqual(blockData.expectedBlues, bluesIDs) {
t.Errorf("Test %d: Block %v expected to have blues %v but got %v (fulldata: %v)",
i, blockData.id, blockData.expectedBlues, bluesIDs, fullDataStr)
}
}
reds := make(map[string]bool)
for id := range blockByIDMap {
reds[id] = true
}
for tip := &dag.virtual.blockNode; tip.selectedParent != nil; tip = tip.selectedParent {
tipID := idByBlockMap[tip]
delete(reds, tipID)
for _, blue := range tip.blues {
blueID := idByBlockMap[blue]
delete(reds, blueID)
}
}
if !checkReds(test.expectedReds, reds) {
redsIDs := make([]string, 0, len(reds))
for id := range reds {
redsIDs = append(redsIDs, id)
}
sort.Strings(redsIDs)
sort.Strings(test.expectedReds)
t.Errorf("Test %d: Expected reds %v but got %v", i, test.expectedReds, redsIDs)
}
}()
}
}
func checkReds(expectedReds []string, reds map[string]bool) bool {
if len(expectedReds) != len(reds) {
return false
}
for _, redID := range expectedReds {
if !reds[redID] {
return false
}
}
return true
}
func TestBlueAnticoneSizeErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("TestBlueAnticoneSizeErrors: Failed to setup DAG instance: %s", err)
}
defer teardownFunc()
// Prepare a block chain with size K beginning with the genesis block
currentBlockA := dag.dagParams.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
newBlock := prepareAndProcessBlock(t, dag, currentBlockA)
currentBlockA = newBlock
}
// Prepare another block chain with size K beginning with the genesis block
currentBlockB := dag.dagParams.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
newBlock := prepareAndProcessBlock(t, dag, currentBlockB)
currentBlockB = newBlock
}
// Get references to the tips of the two chains
blockNodeA := dag.index.LookupNode(currentBlockA.BlockHash())
blockNodeB := dag.index.LookupNode(currentBlockB.BlockHash())
// Try getting the blueAnticoneSize between them. Since the two
// blocks are not in the anticones of eachother, this should fail.
_, err = dag.blueAnticoneSize(blockNodeA, blockNodeB)
if err == nil {
t.Fatalf("TestBlueAnticoneSizeErrors: blueAnticoneSize unexpectedly succeeded")
}
expectedErrSubstring := "is not in blue set of"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestBlueAnticoneSizeErrors: blueAnticoneSize returned wrong error. "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
}
func TestGHOSTDAGErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("TestGHOSTDAGErrors: Failed to setup DAG instance: %s", err)
}
defer teardownFunc()
// Add two child blocks to the genesis
block1 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
block2 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
// Add a child block to the previous two blocks
block3 := prepareAndProcessBlock(t, dag, block1, block2)
// Clear the reachability store
dag.reachabilityStore.loaded = map[daghash.Hash]*reachabilityData{}
err = dag.db.Update(func(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
cursor := bucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
err := bucket.Delete(cursor.Key())
if err != nil {
return err
}
}
return nil
})
if err != nil {
t.Fatalf("TestGHOSTDAGErrors: db.Update failed: %s", err)
}
// Try to rerun GHOSTDAG on the last block. GHOSTDAG uses
// reachability data, so we expect it to fail.
blockNode3 := dag.index.LookupNode(block3.BlockHash())
_, err = dag.ghostdag(blockNode3)
if err == nil {
t.Fatalf("TestGHOSTDAGErrors: ghostdag unexpectedly succeeded")
}
expectedErrSubstring := "Couldn't find reachability data"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestGHOSTDAGErrors: ghostdag returned wrong error. "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
}

View File

@@ -1,9 +1,8 @@
indexers
========
[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://godoc.org/github.com/daglabs/btcd/blockchain/indexers?status.png)](http://godoc.org/github.com/daglabs/btcd/blockchain/indexers)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://godoc.org/github.com/kaspanet/kaspad/blockdag/indexers?status.png)](http://godoc.org/github.com/kaspanet/kaspad/blockdag/indexers)
Package indexers implements optional block chain indexes.
@@ -12,21 +11,14 @@ via an RPC interface.
## Supported Indexers
- Transaction-by-hash (txbyhashidx) Index
- Transaction-by-hash (txindex) Index
- Creates a mapping from the hash of each transaction to the block that
contains it along with its offset and length within the serialized block
- Transaction-by-address (txbyaddridx) Index
- Transaction-by-address (addrindex) Index
- Creates a mapping from every address to all transactions which either credit
or debit the address
- Requires the transaction-by-hash index
- AcceptanceData-by-block Index
- Creates a mapping from the hash of each block to the list of transaction this block
accepts from it's .Blues
## Installation
```bash
$ go get -u github.com/daglabs/btcd/blockchain/indexers
```
## License
Package indexers is licensed under the [copyfree](http://copyfree.org) ISC
License.

View File

@@ -3,11 +3,11 @@ package indexers
import (
"bytes"
"encoding/gob"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
@@ -64,7 +64,7 @@ func (idx *AcceptanceIndex) Name() string {
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the bucket for the
// to be created for the first time. It creates the bucket for the
// acceptance index.
//
// This is part of the Indexer interface.

View File

@@ -1,12 +1,12 @@
package indexers
import (
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"io"
"io/ioutil"
@@ -71,7 +71,7 @@ func TestAcceptanceIndexSerializationAndDeserialization(t *testing.T) {
// comparing dag1's last block acceptance data and dag3's last block acceptance
// data.
func TestAcceptanceIndexRecover(t *testing.T) {
params := &dagconfig.SimNetParams
params := &dagconfig.SimnetParams
params.BlockCoinbaseMaturity = 0
testFiles := []string{
@@ -116,11 +116,11 @@ func TestAcceptanceIndexRecover(t *testing.T) {
}
for i := 1; i < len(blocks)-2; i++ {
isOrphan, delay, err := db1DAG.ProcessBlock(blocks[i], blockdag.BFNone)
isOrphan, isDelayed, err := db1DAG.ProcessBlock(blocks[i], blockdag.BFNone)
if err != nil {
t.Fatalf("ProcessBlock fail on block %v: %v\n", i, err)
}
if delay != 0 {
if isDelayed {
t.Fatalf("ProcessBlock: block %d "+
"is too far in the future", i)
}
@@ -147,11 +147,11 @@ func TestAcceptanceIndexRecover(t *testing.T) {
}
for i := len(blocks) - 2; i < len(blocks); i++ {
isOrphan, delay, err := db1DAG.ProcessBlock(blocks[i], blockdag.BFNone)
isOrphan, isDelayed, err := db1DAG.ProcessBlock(blocks[i], blockdag.BFNone)
if err != nil {
t.Fatalf("ProcessBlock fail on block %v: %v\n", i, err)
}
if delay != 0 {
if isDelayed {
t.Fatalf("ProcessBlock: block %d "+
"is too far in the future", i)
}
@@ -185,11 +185,11 @@ func TestAcceptanceIndexRecover(t *testing.T) {
}
for i := len(blocks) - 2; i < len(blocks); i++ {
isOrphan, delay, err := db2DAG.ProcessBlock(blocks[i], blockdag.BFNone)
isOrphan, isDelayed, err := db2DAG.ProcessBlock(blocks[i], blockdag.BFNone)
if err != nil {
t.Fatalf("ProcessBlock fail on block %v: %v\n", i, err)
}
if delay != 0 {
if isDelayed {
t.Fatalf("ProcessBlock: block %d "+
"is too far in the future", i)
}
@@ -298,16 +298,16 @@ func copyDirectory(scrDir, dest string) error {
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
func copyFile(srcFile, dstFile string) error {
out, err := os.Create(dstFile)
defer out.Close()
if err != nil {
return err
}
defer out.Close()
in, err := os.Open(srcFile)
defer in.Close()
if err != nil {
return err
}
defer in.Close()
_, err = io.Copy(out, in)
if err != nil {

View File

@@ -1,924 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"fmt"
"github.com/pkg/errors"
"sync"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
const (
// addrIndexName is the human-readable name for the index.
addrIndexName = "address index"
// level0MaxEntries is the maximum number of transactions that are
// stored in level 0 of an address index entry. Subsequent levels store
// 2^n * level0MaxEntries entries, or in words, double the maximum of
// the previous level.
level0MaxEntries = 8
// addrKeySize is the number of bytes an address key consumes in the
// index. It consists of 1 byte address type + 20 bytes hash160.
addrKeySize = 1 + 20
// levelKeySize is the number of bytes a level key in the address index
// consumes. It consists of the address key + 1 byte for the level.
levelKeySize = addrKeySize + 1
// levelOffset is the offset in the level key which identifes the level.
levelOffset = levelKeySize - 1
// addrKeyTypePubKeyHash is the address type in an address key which
// represents both a pay-to-pubkey-hash and a pay-to-pubkey address.
// This is done because both are identical for the purposes of the
// address index.
addrKeyTypePubKeyHash = 0
// addrKeyTypeScriptHash is the address type in an address key which
// represents a pay-to-script-hash address. This is necessary because
// the hash of a pubkey address might be the same as that of a script
// hash.
addrKeyTypeScriptHash = 1
// Size of a transaction entry. It consists of 8 bytes block id + 4
// bytes offset + 4 bytes length.
txEntrySize = 8 + 4 + 4
)
var (
// addrIndexKey is the key of the address index and the db bucket used
// to house it.
addrIndexKey = []byte("txbyaddridx")
// errUnsupportedAddressType is an error that is used to signal an
// unsupported address type has been used.
errUnsupportedAddressType = errors.New("address type is not supported " +
"by the address index")
)
// -----------------------------------------------------------------------------
// The address index maps addresses referenced in the blockchain to a list of
// all the transactions involving that address. Transactions are stored
// according to their order of appearance in the blockchain. That is to say
// first by block height and then by offset inside the block. It is also
// important to note that this implementation requires the transaction index
// since it is needed in order to catch up old blocks due to the fact the spent
// outputs will already be pruned from the utxo set.
//
// The approach used to store the index is similar to a log-structured merge
// tree (LSM tree) and is thus similar to how leveldb works internally.
//
// Every address consists of one or more entries identified by a level starting
// from 0 where each level holds a maximum number of entries such that each
// subsequent level holds double the maximum of the previous one. In equation
// form, the number of entries each level holds is 2^n * firstLevelMaxSize.
//
// New transactions are appended to level 0 until it becomes full at which point
// the entire level 0 entry is appended to the level 1 entry and level 0 is
// cleared. This process continues until level 1 becomes full at which point it
// will be appended to level 2 and cleared and so on.
//
// The result of this is the lower levels contain newer transactions and the
// transactions within each level are ordered from oldest to newest.
//
// The intent of this approach is to provide a balance between space efficiency
// and indexing cost. Storing one entry per transaction would have the lowest
// indexing cost, but would waste a lot of space because the same address hash
// would be duplicated for every transaction key. On the other hand, storing a
// single entry with all transactions would be the most space efficient, but
// would cause indexing cost to grow quadratically with the number of
// transactions involving the same address. The approach used here provides
// logarithmic insertion and retrieval.
//
// The serialized key format is:
//
// <addr type><addr hash><level>
//
// Field Type Size
// addr type uint8 1 byte
// addr hash hash160 20 bytes
// level uint8 1 byte
// -----
// Total: 22 bytes
//
// The serialized value format is:
//
// [<block id><start offset><tx length>,...]
//
// Field Type Size
// block id uint64 8 bytes
// start offset uint32 4 bytes
// tx length uint32 4 bytes
// -----
// Total: 16 bytes per indexed tx
// -----------------------------------------------------------------------------
// fetchBlockHashFunc defines a callback function to use in order to convert a
// serialized block ID to an associated block hash.
type fetchBlockHashFunc func(serializedID []byte) (*daghash.Hash, error)
// serializeAddrIndexEntry serializes the provided block id and transaction
// location according to the format described in detail above.
func serializeAddrIndexEntry(blockID uint64, txLoc wire.TxLoc) []byte {
// Serialize the entry.
serialized := make([]byte, 16)
byteOrder.PutUint64(serialized, blockID)
byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxStart))
byteOrder.PutUint32(serialized[12:], uint32(txLoc.TxLen))
return serialized
}
// deserializeAddrIndexEntry decodes the passed serialized byte slice into the
// provided region struct according to the format described in detail above and
// uses the passed block hash fetching function in order to conver the block ID
// to the associated block hash.
func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion, fetchBlockHash fetchBlockHashFunc) error {
// Ensure there are enough bytes to decode.
if len(serialized) < txEntrySize {
return errDeserialize("unexpected end of data")
}
hash, err := fetchBlockHash(serialized[0:8])
if err != nil {
return err
}
region.Hash = hash
region.Offset = byteOrder.Uint32(serialized[8:12])
region.Len = byteOrder.Uint32(serialized[12:16])
return nil
}
// keyForLevel returns the key for a specific address and level in the address
// index entry.
func keyForLevel(addrKey [addrKeySize]byte, level uint8) [levelKeySize]byte {
var key [levelKeySize]byte
copy(key[:], addrKey[:])
key[levelOffset] = level
return key
}
// dbPutAddrIndexEntry updates the address index to include the provided entry
// according to the level-based scheme described in detail above.
func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint64, txLoc wire.TxLoc) error {
// Start with level 0 and its initial max number of entries.
curLevel := uint8(0)
maxLevelBytes := level0MaxEntries * txEntrySize
// Simply append the new entry to level 0 and return now when it will
// fit. This is the most common path.
newData := serializeAddrIndexEntry(blockID, txLoc)
level0Key := keyForLevel(addrKey, 0)
level0Data := bucket.Get(level0Key[:])
if len(level0Data)+len(newData) <= maxLevelBytes {
mergedData := newData
if len(level0Data) > 0 {
mergedData = make([]byte, len(level0Data)+len(newData))
copy(mergedData, level0Data)
copy(mergedData[len(level0Data):], newData)
}
return bucket.Put(level0Key[:], mergedData)
}
// At this point, level 0 is full, so merge each level into higher
// levels as many times as needed to free up level 0.
prevLevelData := level0Data
for {
// Each new level holds twice as much as the previous one.
curLevel++
maxLevelBytes *= 2
// Move to the next level as long as the current level is full.
curLevelKey := keyForLevel(addrKey, curLevel)
curLevelData := bucket.Get(curLevelKey[:])
if len(curLevelData) == maxLevelBytes {
prevLevelData = curLevelData
continue
}
// The current level has room for the data in the previous one,
// so merge the data from previous level into it.
mergedData := prevLevelData
if len(curLevelData) > 0 {
mergedData = make([]byte, len(curLevelData)+
len(prevLevelData))
copy(mergedData, curLevelData)
copy(mergedData[len(curLevelData):], prevLevelData)
}
err := bucket.Put(curLevelKey[:], mergedData)
if err != nil {
return err
}
// Move all of the levels before the previous one up a level.
for mergeLevel := curLevel - 1; mergeLevel > 0; mergeLevel-- {
mergeLevelKey := keyForLevel(addrKey, mergeLevel)
prevLevelKey := keyForLevel(addrKey, mergeLevel-1)
prevData := bucket.Get(prevLevelKey[:])
err := bucket.Put(mergeLevelKey[:], prevData)
if err != nil {
return err
}
}
break
}
// Finally, insert the new entry into level 0 now that it is empty.
return bucket.Put(level0Key[:], newData)
}
// dbFetchAddrIndexEntries returns block regions for transactions referenced by
// the given address key and the number of entries skipped since it could have
// been less in the case where there are less total entries than the requested
// number of entries to skip.
func dbFetchAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, numToSkip, numRequested uint32, reverse bool, fetchBlockHash fetchBlockHashFunc) ([]database.BlockRegion, uint32, error) {
// When the reverse flag is not set, all levels need to be fetched
// because numToSkip and numRequested are counted from the oldest
// transactions (highest level) and thus the total count is needed.
// However, when the reverse flag is set, only enough records to satisfy
// the requested amount are needed.
var level uint8
var serialized []byte
for !reverse || len(serialized) < int(numToSkip+numRequested)*txEntrySize {
curLevelKey := keyForLevel(addrKey, level)
levelData := bucket.Get(curLevelKey[:])
if levelData == nil {
// Stop when there are no more levels.
break
}
// Higher levels contain older transactions, so prepend them.
prepended := make([]byte, len(serialized)+len(levelData))
copy(prepended, levelData)
copy(prepended[len(levelData):], serialized)
serialized = prepended
level++
}
// When the requested number of entries to skip is larger than the
// number available, skip them all and return now with the actual number
// skipped.
numEntries := uint32(len(serialized) / txEntrySize)
if numToSkip >= numEntries {
return nil, numEntries, nil
}
// Nothing more to do when there are no requested entries.
if numRequested == 0 {
return nil, numToSkip, nil
}
// Limit the number to load based on the number of available entries,
// the number to skip, and the number requested.
numToLoad := numEntries - numToSkip
if numToLoad > numRequested {
numToLoad = numRequested
}
// Start the offset after all skipped entries and load the calculated
// number.
results := make([]database.BlockRegion, numToLoad)
for i := uint32(0); i < numToLoad; i++ {
// Calculate the read offset according to the reverse flag.
var offset uint32
if reverse {
offset = (numEntries - numToSkip - i - 1) * txEntrySize
} else {
offset = (numToSkip + i) * txEntrySize
}
// Deserialize and populate the result.
err := deserializeAddrIndexEntry(serialized[offset:],
&results[i], fetchBlockHash)
if err != nil {
// Ensure any deserialization errors are returned as
// database corruption errors.
if isDeserializeErr(err) {
err = database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("failed to "+
"deserialized address index "+
"for key %x: %s", addrKey, err),
}
}
return nil, 0, err
}
}
return results, numToSkip, nil
}
// minEntriesToReachLevel returns the minimum number of entries that are
// required to reach the given address index level.
func minEntriesToReachLevel(level uint8) int {
maxEntriesForLevel := level0MaxEntries
minRequired := 1
for l := uint8(1); l <= level; l++ {
minRequired += maxEntriesForLevel
maxEntriesForLevel *= 2
}
return minRequired
}
// maxEntriesForLevel returns the maximum number of entries allowed for the
// given address index level.
func maxEntriesForLevel(level uint8) int {
numEntries := level0MaxEntries
for l := level; l > 0; l-- {
numEntries *= 2
}
return numEntries
}
// dbRemoveAddrIndexEntries removes the specified number of entries from from
// the address index for the provided key. An assertion error will be returned
// if the count exceeds the total number of entries in the index.
func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, count int) error {
// Nothing to do if no entries are being deleted.
if count <= 0 {
return nil
}
// Make use of a local map to track pending updates and define a closure
// to apply it to the database. This is done in order to reduce the
// number of database reads and because there is more than one exit
// path that needs to apply the updates.
pendingUpdates := make(map[uint8][]byte)
applyPending := func() error {
for level, data := range pendingUpdates {
curLevelKey := keyForLevel(addrKey, level)
if len(data) == 0 {
err := bucket.Delete(curLevelKey[:])
if err != nil {
return err
}
continue
}
err := bucket.Put(curLevelKey[:], data)
if err != nil {
return err
}
}
return nil
}
// Loop forwards through the levels while removing entries until the
// specified number has been removed. This will potentially result in
// entirely empty lower levels which will be backfilled below.
var highestLoadedLevel uint8
numRemaining := count
for level := uint8(0); numRemaining > 0; level++ {
// Load the data for the level from the database.
curLevelKey := keyForLevel(addrKey, level)
curLevelData := bucket.Get(curLevelKey[:])
if len(curLevelData) == 0 && numRemaining > 0 {
return AssertError(fmt.Sprintf("dbRemoveAddrIndexEntries "+
"not enough entries for address key %x to "+
"delete %d entries", addrKey, count))
}
pendingUpdates[level] = curLevelData
highestLoadedLevel = level
// Delete the entire level as needed.
numEntries := len(curLevelData) / txEntrySize
if numRemaining >= numEntries {
pendingUpdates[level] = nil
numRemaining -= numEntries
continue
}
// Remove remaining entries to delete from the level.
offsetEnd := len(curLevelData) - (numRemaining * txEntrySize)
pendingUpdates[level] = curLevelData[:offsetEnd]
break
}
// When all elements in level 0 were not removed there is nothing left
// to do other than updating the database.
if len(pendingUpdates[0]) != 0 {
return applyPending()
}
// At this point there are one or more empty levels before the current
// level which need to be backfilled and the current level might have
// had some entries deleted from it as well. Since all levels after
// level 0 are required to either be empty, half full, or completely
// full, the current level must be adjusted accordingly by backfilling
// each previous levels in a way which satisfies the requirements. Any
// entries that are left are assigned to level 0 after the loop as they
// are guaranteed to fit by the logic in the loop. In other words, this
// effectively squashes all remaining entries in the current level into
// the lowest possible levels while following the level rules.
//
// Note that the level after the current level might also have entries
// and gaps are not allowed, so this also keeps track of the lowest
// empty level so the code below knows how far to backfill in case it is
// required.
lowestEmptyLevel := uint8(255)
curLevelData := pendingUpdates[highestLoadedLevel]
curLevelMaxEntries := maxEntriesForLevel(highestLoadedLevel)
for level := highestLoadedLevel; level > 0; level-- {
// When there are not enough entries left in the current level
// for the number that would be required to reach it, clear the
// the current level which effectively moves them all up to the
// previous level on the next iteration. Otherwise, there are
// are sufficient entries, so update the current level to
// contain as many entries as possible while still leaving
// enough remaining entries required to reach the level.
numEntries := len(curLevelData) / txEntrySize
prevLevelMaxEntries := curLevelMaxEntries / 2
minPrevRequired := minEntriesToReachLevel(level - 1)
if numEntries < prevLevelMaxEntries+minPrevRequired {
lowestEmptyLevel = level
pendingUpdates[level] = nil
} else {
// This level can only be completely full or half full,
// so choose the appropriate offset to ensure enough
// entries remain to reach the level.
var offset int
if numEntries-curLevelMaxEntries >= minPrevRequired {
offset = curLevelMaxEntries * txEntrySize
} else {
offset = prevLevelMaxEntries * txEntrySize
}
pendingUpdates[level] = curLevelData[:offset]
curLevelData = curLevelData[offset:]
}
curLevelMaxEntries = prevLevelMaxEntries
}
pendingUpdates[0] = curLevelData
if len(curLevelData) == 0 {
lowestEmptyLevel = 0
}
// When the highest loaded level is empty, it's possible the level after
// it still has data and thus that data needs to be backfilled as well.
for len(pendingUpdates[highestLoadedLevel]) == 0 {
// When the next level is empty too, the is no data left to
// continue backfilling, so there is nothing left to do.
// Otherwise, populate the pending updates map with the newly
// loaded data and update the highest loaded level accordingly.
level := highestLoadedLevel + 1
curLevelKey := keyForLevel(addrKey, level)
levelData := bucket.Get(curLevelKey[:])
if len(levelData) == 0 {
break
}
pendingUpdates[level] = levelData
highestLoadedLevel = level
// At this point the highest level is not empty, but it might
// be half full. When that is the case, move it up a level to
// simplify the code below which backfills all lower levels that
// are still empty. This also means the current level will be
// empty, so the loop will perform another another iteration to
// potentially backfill this level with data from the next one.
curLevelMaxEntries := maxEntriesForLevel(level)
if len(levelData)/txEntrySize != curLevelMaxEntries {
pendingUpdates[level] = nil
pendingUpdates[level-1] = levelData
level--
curLevelMaxEntries /= 2
}
// Backfill all lower levels that are still empty by iteratively
// halfing the data until the lowest empty level is filled.
for level > lowestEmptyLevel {
offset := (curLevelMaxEntries / 2) * txEntrySize
pendingUpdates[level] = levelData[:offset]
levelData = levelData[offset:]
pendingUpdates[level-1] = levelData
level--
curLevelMaxEntries /= 2
}
// The lowest possible empty level is now the highest loaded
// level.
lowestEmptyLevel = highestLoadedLevel
}
// Apply the pending updates.
return applyPending()
}
// addrToKey converts known address types to an addrindex key. An error is
// returned for unsupported types.
func addrToKey(addr util.Address) ([addrKeySize]byte, error) {
switch addr := addr.(type) {
case *util.AddressPubKeyHash:
var result [addrKeySize]byte
result[0] = addrKeyTypePubKeyHash
copy(result[1:], addr.Hash160()[:])
return result, nil
case *util.AddressScriptHash:
var result [addrKeySize]byte
result[0] = addrKeyTypeScriptHash
copy(result[1:], addr.Hash160()[:])
return result, nil
}
return [addrKeySize]byte{}, errUnsupportedAddressType
}
// AddrIndex implements a transaction by address index. That is to say, it
// supports querying all transactions that reference a given address because
// they are either crediting or debiting the address. The returned transactions
// are ordered according to their order of appearance in the blockchain. In
// other words, first by block height and then by offset inside the block.
//
// In addition, support is provided for a memory-only index of unconfirmed
// transactions such as those which are kept in the memory pool before inclusion
// in a block.
type AddrIndex struct {
// The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a
// separate mutex.
db database.DB
dagParams *dagconfig.Params
// The following fields are used to quickly link transactions and
// addresses that have not been included into a block yet when an
// address index is being maintained. The are protected by the
// unconfirmedLock field.
//
// The txnsByAddr field is used to keep an index of all transactions
// which either create an output to a given address or spend from a
// previous output to it keyed by the address.
//
// The addrsByTx field is essentially the reverse and is used to
// keep an index of all addresses which a given transaction involves.
// This allows fairly efficient updates when transactions are removed
// once they are included into a block.
unconfirmedLock sync.RWMutex
txnsByAddr map[[addrKeySize]byte]map[daghash.TxID]*util.Tx
addrsByTx map[daghash.TxID]map[[addrKeySize]byte]struct{}
}
// Ensure the AddrIndex type implements the Indexer interface.
var _ Indexer = (*AddrIndex)(nil)
// Ensure the AddrIndex type implements the NeedsInputser interface.
var _ NeedsInputser = (*AddrIndex)(nil)
// NeedsInputs signals that the index requires the referenced inputs in order
// to properly create the index.
//
// This implements the NeedsInputser interface.
func (idx *AddrIndex) NeedsInputs() bool {
return true
}
// Init is only provided to satisfy the Indexer interface as there is nothing to
// initialize for this index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
idx.db = db
return nil
}
// Key returns the database key to use for the index as a byte slice.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Key() []byte {
return addrIndexKey
}
// Name returns the human-readable name of the index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Name() string {
return addrIndexName
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the bucket for the address
// index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Create(dbTx database.Tx) error {
_, err := dbTx.Metadata().CreateBucket(addrIndexKey)
return err
}
// writeIndexData represents the address index data to be written for one block.
// It consists of the address mapped to an ordered list of the transactions
// that involve the address in block. It is ordered so the transactions can be
// stored in the order they appear in the block.
type writeIndexData map[[addrKeySize]byte][]int
// indexScriptPubKey extracts all standard addresses from the passed public key
// script and maps each of them to the associated transaction using the passed
// map.
func (idx *AddrIndex) indexScriptPubKey(data writeIndexData, scriptPubKey []byte, txIdx int) {
// Nothing to index if the script is non-standard or otherwise doesn't
// contain any addresses.
_, addr, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
idx.dagParams)
if err != nil || addr == nil {
return
}
addrKey, err := addrToKey(addr)
if err != nil {
// Ignore unsupported address types.
return
}
// Avoid inserting the transaction more than once. Since the
// transactions are indexed serially any duplicates will be
// indexed in a row, so checking the most recent entry for the
// address is enough to detect duplicates.
indexedTxns := data[addrKey]
numTxns := len(indexedTxns)
if numTxns > 0 && indexedTxns[numTxns-1] == txIdx {
return
}
indexedTxns = append(indexedTxns, txIdx)
data[addrKey] = indexedTxns
}
// indexBlock extract all of the standard addresses from all of the transactions
// in the passed block and maps each of them to the associated transaction using
// the passed map.
func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *blockdag.BlockDAG) {
for txIdx, tx := range block.Transactions() {
// Coinbases do not reference any inputs. Since the block is
// required to have already gone through full validation, it has
// already been proven on the first transaction in the block is
// a coinbase.
if txIdx > util.CoinbaseTransactionIndex {
for _, txIn := range tx.MsgTx().TxIn {
// The UTXO should always have the input since
// the index contract requires it, however, be
// safe and simply ignore any missing entries.
entry, ok := dag.GetUTXOEntry(txIn.PreviousOutpoint)
if !ok {
continue
}
idx.indexScriptPubKey(data, entry.ScriptPubKey(), txIdx)
}
}
for _, txOut := range tx.MsgTx().TxOut {
idx.indexScriptPubKey(data, txOut.ScriptPubKey, txIdx)
}
}
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the main chain. This indexer adds a mapping for each address
// the transactions in the block involve.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
_ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
// The offset and length of the transactions within the serialized
// block.
txLocs, err := block.TxLoc()
if err != nil {
return err
}
// Build all of the address to transaction mappings in a local map.
addrsToTxns := make(writeIndexData)
idx.indexBlock(addrsToTxns, block, dag)
// Add all of the index entries for each address.
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
for addrKey, txIdxs := range addrsToTxns {
for _, txIdx := range txIdxs {
err := dbPutAddrIndexEntry(addrIdxBucket, addrKey,
blockID, txLocs[txIdx])
if err != nil {
return err
}
}
}
return nil
}
// DisconnectBlock is invoked by the index manager when a block has been
// disconnected from the main chain. This indexer removes the address mappings
// each transaction in the block involve.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) DisconnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG) error {
// Build all of the address to transaction mappings in a local map.
addrsToTxns := make(writeIndexData)
idx.indexBlock(addrsToTxns, block, dag)
// Remove all of the index entries for each address.
bucket := dbTx.Metadata().Bucket(addrIndexKey)
for addrKey, txIdxs := range addrsToTxns {
err := dbRemoveAddrIndexEntries(bucket, addrKey, len(txIdxs))
if err != nil {
return err
}
}
return nil
}
// TxRegionsForAddress returns a slice of block regions which identify each
// transaction that involves the passed address according to the specified
// number to skip, number requested, and whether or not the results should be
// reversed. It also returns the number actually skipped since it could be less
// in the case where there are not enough entries.
//
// NOTE: These results only include transactions confirmed in blocks. See the
// UnconfirmedTxnsForAddress method for obtaining unconfirmed transactions
// that involve a given address.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr util.Address, numToSkip, numRequested uint32, reverse bool) ([]database.BlockRegion, uint32, error) {
addrKey, err := addrToKey(addr)
if err != nil {
return nil, 0, err
}
var regions []database.BlockRegion
var skipped uint32
err = idx.db.View(func(dbTx database.Tx) error {
// Create closure to lookup the block hash given the ID using
// the database transaction.
fetchBlockHash := func(id []byte) (*daghash.Hash, error) {
// Deserialize and populate the result.
return blockdag.DBFetchBlockHashBySerializedID(dbTx, id)
}
var err error
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
regions, skipped, err = dbFetchAddrIndexEntries(addrIdxBucket,
addrKey, numToSkip, numRequested, reverse,
fetchBlockHash)
return err
})
return regions, skipped, err
}
// indexUnconfirmedAddresses modifies the unconfirmed (memory-only) address
// index to include mappings for the addresses encoded by the passed public key
// script to the transaction.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) indexUnconfirmedAddresses(scriptPubKey []byte, tx *util.Tx) {
// The error is ignored here since the only reason it can fail is if the
// script fails to parse and it was already validated before being
// admitted to the mempool.
_, addr, _ := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
idx.dagParams)
// Ignore unsupported address types.
addrKey, err := addrToKey(addr)
if err != nil {
return
}
// Add a mapping from the address to the transaction.
idx.unconfirmedLock.Lock()
addrIndexEntry := idx.txnsByAddr[addrKey]
if addrIndexEntry == nil {
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
idx.txnsByAddr[addrKey] = addrIndexEntry
}
addrIndexEntry[*tx.ID()] = tx
// Add a mapping from the transaction to the address.
addrsByTxEntry := idx.addrsByTx[*tx.ID()]
if addrsByTxEntry == nil {
addrsByTxEntry = make(map[[addrKeySize]byte]struct{})
idx.addrsByTx[*tx.ID()] = addrsByTxEntry
}
addrsByTxEntry[addrKey] = struct{}{}
idx.unconfirmedLock.Unlock()
}
// AddUnconfirmedTx adds all addresses related to the transaction to the
// unconfirmed (memory-only) address index.
//
// NOTE: This transaction MUST have already been validated by the memory pool
// before calling this function with it and have all of the inputs available in
// the provided utxo view. Failure to do so could result in some or all
// addresses not being indexed.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) AddUnconfirmedTx(tx *util.Tx, utxoSet blockdag.UTXOSet) {
// Index addresses of all referenced previous transaction outputs.
//
// The existence checks are elided since this is only called after the
// transaction has already been validated and thus all inputs are
// already known to exist.
for _, txIn := range tx.MsgTx().TxIn {
entry, ok := utxoSet.Get(txIn.PreviousOutpoint)
if !ok {
// Ignore missing entries. This should never happen
// in practice since the function comments specifically
// call out all inputs must be available.
continue
}
idx.indexUnconfirmedAddresses(entry.ScriptPubKey(), tx)
}
// Index addresses of all created outputs.
for _, txOut := range tx.MsgTx().TxOut {
idx.indexUnconfirmedAddresses(txOut.ScriptPubKey, tx)
}
}
// RemoveUnconfirmedTx removes the passed transaction from the unconfirmed
// (memory-only) address index.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) RemoveUnconfirmedTx(txID *daghash.TxID) {
idx.unconfirmedLock.Lock()
defer idx.unconfirmedLock.Unlock()
// Remove all address references to the transaction from the address
// index and remove the entry for the address altogether if it no longer
// references any transactions.
for addrKey := range idx.addrsByTx[*txID] {
delete(idx.txnsByAddr[addrKey], *txID)
if len(idx.txnsByAddr[addrKey]) == 0 {
delete(idx.txnsByAddr, addrKey)
}
}
// Remove the entry from the transaction to address lookup map as well.
delete(idx.addrsByTx, *txID)
}
// UnconfirmedTxnsForAddress returns all transactions currently in the
// unconfirmed (memory-only) address index that involve the passed address.
// Unsupported address types are ignored and will result in no results.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
// Ignore unsupported address types.
addrKey, err := addrToKey(addr)
if err != nil {
return nil
}
// Protect concurrent access.
idx.unconfirmedLock.RLock()
defer idx.unconfirmedLock.RUnlock()
// Return a new slice with the results if there are any. This ensures
// safe concurrency.
if txns, exists := idx.txnsByAddr[addrKey]; exists {
addressTxns := make([]*util.Tx, 0, len(txns))
for _, tx := range txns {
addressTxns = append(addressTxns, tx)
}
return addressTxns
}
return nil
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("addrindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the addrindex with --dropaddrindex", lastKnownBlockID-currentBlockID)
}
// NewAddrIndex returns a new instance of an indexer that is used to create a
// mapping of all addresses in the blockchain to the respective transactions
// that involve them.
//
// It implements the Indexer interface which plugs into the IndexManager that in
// turn is used by the blockchain package. This allows the index to be
// seamlessly maintained along with the chain.
func NewAddrIndex(dagParams *dagconfig.Params) *AddrIndex {
return &AddrIndex{
dagParams: dagParams,
txnsByAddr: make(map[[addrKeySize]byte]map[daghash.TxID]*util.Tx),
addrsByTx: make(map[daghash.TxID]map[[addrKeySize]byte]struct{}),
}
}
// DropAddrIndex drops the address index from the provided database if it
// exists.
func DropAddrIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, addrIndexKey, addrIndexName, interrupt)
}

View File

@@ -1,277 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"testing"
"github.com/daglabs/btcd/wire"
)
// addrIndexBucket provides a mock address index database bucket by implementing
// the internalBucket interface.
type addrIndexBucket struct {
levels map[[levelKeySize]byte][]byte
}
// Clone returns a deep copy of the mock address index bucket.
func (b *addrIndexBucket) Clone() *addrIndexBucket {
levels := make(map[[levelKeySize]byte][]byte)
for k, v := range b.levels {
vCopy := make([]byte, len(v))
copy(vCopy, v)
levels[k] = vCopy
}
return &addrIndexBucket{levels: levels}
}
// Get returns the value associated with the key from the mock address index
// bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Get(key []byte) []byte {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
return b.levels[levelKey]
}
// Put stores the provided key/value pair to the mock address index bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Put(key []byte, value []byte) error {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
b.levels[levelKey] = value
return nil
}
// Delete removes the provided key from the mock address index bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Delete(key []byte) error {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
delete(b.levels, levelKey)
return nil
}
// printLevels returns a string with a visual representation of the provided
// address key taking into account the max size of each level. It is useful
// when creating and debugging test cases.
func (b *addrIndexBucket) printLevels(addrKey [addrKeySize]byte) string {
highestLevel := uint8(0)
for k := range b.levels {
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
continue
}
level := uint8(k[levelOffset])
if level > highestLevel {
highestLevel = level
}
}
var levelBuf bytes.Buffer
_, _ = levelBuf.WriteString("\n")
maxEntries := level0MaxEntries
for level := uint8(0); level <= highestLevel; level++ {
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
for i := 0; i < numEntries; i++ {
start := i * txEntrySize
num := byteOrder.Uint32(data[start:])
_, _ = levelBuf.WriteString(fmt.Sprintf("%02d ", num))
}
for i := numEntries; i < maxEntries; i++ {
_, _ = levelBuf.WriteString("_ ")
}
_, _ = levelBuf.WriteString("\n")
maxEntries *= 2
}
return levelBuf.String()
}
// sanityCheck ensures that all data stored in the bucket for the given address
// adheres to the level-based rules described by the address index
// documentation.
func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal int) error {
// Find the highest level for the key.
highestLevel := uint8(0)
for k := range b.levels {
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
continue
}
level := uint8(k[levelOffset])
if level > highestLevel {
highestLevel = level
}
}
// Ensure the expected total number of entries are present and that
// all levels adhere to the rules described in the address index
// documentation.
var totalEntries int
maxEntries := level0MaxEntries
for level := uint8(0); level <= highestLevel; level++ {
// Level 0 can'have more entries than the max allowed if the
// levels after it have data and it can't be empty. All other
// levels must either be half full or full.
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
totalEntries += numEntries
if level == 0 {
if (highestLevel != 0 && numEntries == 0) ||
numEntries > maxEntries {
return errors.Errorf("level %d has %d entries",
level, numEntries)
}
} else if numEntries != maxEntries && numEntries != maxEntries/2 {
return errors.Errorf("level %d has %d entries", level,
numEntries)
}
maxEntries *= 2
}
if totalEntries != expectedTotal {
return errors.Errorf("expected %d entries - got %d", expectedTotal,
totalEntries)
}
// Ensure all of the numbers are in order starting from the highest
// level moving to the lowest level.
expectedNum := uint32(0)
for level := highestLevel + 1; level > 0; level-- {
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
for i := 0; i < numEntries; i++ {
start := i * txEntrySize
num := byteOrder.Uint32(data[start:])
if num != expectedNum {
return errors.Errorf("level %d offset %d does "+
"not contain the expected number of "+
"%d - got %d", level, i, num,
expectedNum)
}
expectedNum++
}
}
return nil
}
// TestAddrIndexLevels ensures that adding and deleting entries to the address
// index creates multiple levels as described by the address index
// documentation.
func TestAddrIndexLevels(t *testing.T) {
t.Parallel()
tests := []struct {
name string
key [addrKeySize]byte
numInsert int
printLevels bool // Set to help debug a specific test.
}{
{
name: "level 0 not full",
numInsert: level0MaxEntries - 1,
},
{
name: "level 1 half",
numInsert: level0MaxEntries + 1,
},
{
name: "level 1 full",
numInsert: level0MaxEntries*2 + 1,
},
{
name: "level 2 half, level 1 half",
numInsert: level0MaxEntries*3 + 1,
},
{
name: "level 2 half, level 1 full",
numInsert: level0MaxEntries*4 + 1,
},
{
name: "level 2 full, level 1 half",
numInsert: level0MaxEntries*5 + 1,
},
{
name: "level 2 full, level 1 full",
numInsert: level0MaxEntries*6 + 1,
},
{
name: "level 3 half, level 2 half, level 1 half",
numInsert: level0MaxEntries*7 + 1,
},
{
name: "level 3 full, level 2 half, level 1 full",
numInsert: level0MaxEntries*12 + 1,
},
}
nextTest:
for testNum, test := range tests {
// Insert entries in order.
populatedBucket := &addrIndexBucket{
levels: make(map[[levelKeySize]byte][]byte),
}
for i := 0; i < test.numInsert; i++ {
txLoc := wire.TxLoc{TxStart: i * 2}
err := dbPutAddrIndexEntry(populatedBucket, test.key,
uint64(i), txLoc)
if err != nil {
t.Errorf("dbPutAddrIndexEntry #%d (%s) - "+
"unexpected error: %v", testNum,
test.name, err)
continue nextTest
}
}
if test.printLevels {
t.Log(populatedBucket.printLevels(test.key))
}
// Delete entries from the populated bucket until all entries
// have been deleted. The bucket is reset to the fully
// populated bucket on each iteration so every combination is
// tested. Notice the upper limit purposes exceeds the number
// of entries to ensure attempting to delete more entries than
// there are works correctly.
for numDelete := 0; numDelete <= test.numInsert+1; numDelete++ {
// Clone populated bucket to run each delete against.
bucket := populatedBucket.Clone()
// Remove the number of entries for this iteration.
err := dbRemoveAddrIndexEntries(bucket, test.key,
numDelete)
if err != nil {
if numDelete <= test.numInsert {
t.Errorf("dbRemoveAddrIndexEntries (%s) "+
" delete %d - unexpected error: "+
"%v", test.name, numDelete, err)
continue nextTest
}
}
if test.printLevels {
t.Log(bucket.printLevels(test.key))
}
// Sanity check the levels to ensure the adhere to all
// rules.
numExpected := test.numInsert
if numDelete <= test.numInsert {
numExpected -= numDelete
}
err = bucket.sanityCheck(test.key, numExpected)
if err != nil {
t.Errorf("sanity check fail (%s) delete %d: %v",
test.name, numDelete, err)
continue nextTest
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More