Compare commits

...

330 Commits

Author SHA1 Message Date
Elichai Turkel
84f8a19b98 fdlkjsjkldsfhjkds 2021-01-19 11:43:13 +02:00
Ori Newman
799eb7515c Test validateAndInsertPruningPoint (#1420)
* Add TestValidateAndInsertPruningPoint

* Check fake UTXO set and validate that the pruning point changed
2021-01-18 18:17:13 +02:00
Mike Zak
0769705b37 Update to version 0.8.6 2021-01-18 15:17:15 +02:00
Svarog
189e3b6be9 Fix missing utxo notifications (#1428)
* fix missing UTXO notifications (#1426)

* Remove redundant semicolon

Co-authored-by: aspect <anton.yemelyanov@gmail.com>
2021-01-18 13:08:16 +02:00
Mike Zak
e8dfbc8367 Merge remote-tracking branch 'origin/master' into v0.8.5-dev 2021-01-18 11:36:25 +02:00
Ori Newman
d70740331a Remove hashesQueueSet (#1424)
Co-authored-by: Svarog <feanorr@gmail.com>
2021-01-18 09:10:26 +02:00
Svarog
9a81b1328a Add the Address of node to whom connected in log of send/receiveVersion (#1423)
* Add the Address of node to whom connected in log of send/receiveVersion

* Don't call functions before LogAndMeasureExecutionTime

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-17 16:31:48 +02:00
Ori Newman
d4f3a252ff Add TestIsFinalizedTransaction (#1422)
Co-authored-by: Svarog <feanorr@gmail.com>
2021-01-17 15:47:49 +02:00
Ori Newman
f14527de4c Give different limit to the RPC server (#1421) 2021-01-17 13:58:42 +02:00
Ori Newman
dd57e6abe6 Fix checkParentHeadersExist and cover pruning_violation_proof_of_work_and_difficulty.go with tests (#1418)
* Fix checkParentHeadersExist and cover pruning_violation_proof_of_work_and_difficulty.go with tests

* Remove unused variable

* Change consensus violation

* Change condition order

* Get rid of irrelevant error codes in extractRejectCode

* Fix wrong test db names

* Fix checkParentHeadersExist
2021-01-17 11:27:04 +02:00
Ori Newman
67be4d82bf Don't mark bad merkle root as invalid (#1419)
* Don't mark bad merkle root as invalid

* Fix TestBlockStatus

* Move discardAllChanges inside the inner if
2021-01-17 10:40:05 +02:00
Ori Newman
a1381d6768 Add TestCheckParentBlockBodiesExist (#1405)
* Add TestCheckParentBlockBodiesExist

* Use block in pruning point's anticone for the test

* Fix test db name
2021-01-14 13:31:17 +02:00
Ori Newman
10b519a3e2 Add tests to ValidateHeaderInIsolation (#1415)
* Add tests to ValidateHeaderInIsolation

* Fix tests db names

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-14 11:06:08 +02:00
Ori Newman
a35f8269ea Add checkBlockIsNotPruned (#1413)
* Add checkBlockIsNotPruned

* Fix test name and comment

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-14 10:56:22 +02:00
stasatdaglabs
15af6641fc Send the IBD root UTXO set in chunks instead of a massive monolythic message (#1412)
* Extract syncPruningPointUTXOSet to a separate method.

* Implement logic to send pruning point utxo set chunks in a loop.

* Replace IBDRootUTXOSetAndBlockMessage with IbdRootUtxoSetChunkMessage.

* Add a new message: RequestNextIBDRootUTXOSetChunk.

* Add a new message: DoneIBDRootUTXOSetChunks.

* Protect HandleRequestIBDRootUTXOSetAndBlock from rogue messages.

* Reimplement receiveIBDRootUTXOSetAndBlock.

* Add CmdDoneIBDRootUTXOSetChunks to the HandleRelayInvs flow.

* Decrease the max message size to 10mb.

* Fix bad step.

* Fix confusion between outgoing/incoming routes.

* Measure how long it takes to send/receive the UTXO set.

* Use LogAndMeasure in handleRequestIBDRootUTXOSetAndBlockFlow.
2021-01-13 18:03:07 +02:00
Svarog
1b97cfb302 Prevent a race condition in findHighestSharedBlockHash where we get headersSelectedTip and then pass it as highHash to GetBlockLocator, without locking consensus (#1410)
* Prevent a race condition in findHighestSharedBlockHash where we get headersSelectedTip and then pass it as highHash to GetBlockLocator, without locking consensus

* Restart findHighestSharedBlockHash if lowHash or highHash are no longer in selectedParentChain

* Test for specifically ErrBlockNotInSelectedParentChain instead of database NotFound error

* Fix TestCreateHeadersSelectedChainBlockLocator

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-13 17:55:37 +02:00
Ori Newman
61be80a60c Add TestCheckMergeSizeLimit (#1408)
Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-13 16:19:52 +02:00
Elichai Turkel
83134cc2b5 Add a codecov yml, disable patch checks and make status checks always pass (#1414) 2021-01-13 15:57:57 +02:00
Svarog
4988817da1 Reject SubmitBlock if the node is in IBD (#1409)
* Reject SubmitBlock if the node is in IBD

* Add comments

* Don't use iota for RejectReason constants, since in .proto those are hard-coded
2021-01-13 15:04:55 +02:00
Elichai Turkel
68bd8330ac Log the networks hashrate (#1406)
* Log the hashrate of each block

* Add a test for GetHashrateString

* Move difficulty related functions to its own package

* Convert the validated log in validateAndInsertBlock to a log function

* Add tests for max/min int
2021-01-13 12:51:23 +02:00
Elichai Turkel
192dd2ba8f Add codecov to github (#1358) 2021-01-13 10:35:12 +02:00
Ori Newman
cc49b1826a Reset windowExpectedEndTime after each window (#1407) 2021-01-12 21:34:26 +02:00
Svarog
ce348373c6 Delete existing UTXOSet when commiting VirtualUTXOSet. (#1403) 2021-01-12 16:51:56 +02:00
talelbaz
8ad5725421 Adding a test for the error cases on the function 'checkBlockStatus()' (#1398)
* Adds test for error cases on the function checkBlockStatus.

* Fix review's comments.

* Move test to validateandinsertblock_test.go

Co-authored-by: tal <tal@daglabs.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-12 15:55:02 +02:00
Ori Newman
23a2fbf401 Remove erroneous finality optimization from LowestChainBlockAboveOrEqualToBlueScore (#1402)
* Remove finality erroneous optimization from LowestChainBlockAboveOrEqualToBlueScore

* Add TestLowestChainBlockAboveOrEqualToBlueScore

* Remove unnecessary fields from dagTraversalManager
2021-01-12 15:27:08 +02:00
Elichai Turkel
c1361e5b3e Change log sizes and add some new features to logger (#1400)
* Increase default log sizes, and increase kaspad log sizes

* Add an option to not print logs to stdout

* Allow logs to be printed in the current working directory

* Add more pruning related logs

* Add comment and increase log rotations to save last 64 logs
2021-01-12 13:26:29 +02:00
Ori Newman
53744ceb45 Compare transaction IDs with Equal (#1401) 2021-01-12 12:53:33 +02:00
Ori Newman
bcf2302460 Add high hash to block locator, and add block locator tests (#1397)
* Include high hash in the block locator

* Add tests for block locator

* Remove redundant function

* Remove redundant assignments
2021-01-12 11:16:25 +02:00
Ori Newman
6101e6bdb6 Fix UTXO serialization, its test, and the static check that missed it (#1396)
* Fix UTXO serialization, its test, and the static check that missed it

* Remove duplicate case

* Use one line for static check

Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
2021-01-11 17:45:17 +02:00
stasatdaglabs
d9b97afb92 Don't swallow errors in HandleNewBlockTransactions. (#1390)
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-11 17:16:15 +02:00
Ori Newman
b8ca33d91d Add selected chain store and optimize block locator with it (#1394)
* Add selected chain store and optimize block locator with it

* Fix build error

* Fix comments

* Fix IsStaged

* Rename CalculateSelectedParentChainChanges to CalculateChainPath and SelectedParentChainChanges->SelectedChainPath

* Use binary.LittleEndian directly to allow compiler optimizations

* Remove boolean from HeadersSelectedChainStore interface

* Prevent endless loop in block locator
2021-01-11 15:51:45 +02:00
Svarog
c7deda41c6 Fix deserialization of script version in UTXOSet deserialization (#1395)
* Initalize protoUTXOSetIterator with index = -1

* Handle error when failed to deserialize Script version

* Add support for (de)serialization of (u)int16

* Log the error when converting it into ErrMalformedUTXO
2021-01-11 15:23:27 +02:00
talelbaz
434cf45112 Adds a new test to validate POW, and Fix Main-net and Test-net genesis block data. (#1389)
* commit for do fetch&merge

* Adds a new test to validate POW, and Fix Main-net and Testnet genesis block data.

* Fix window's test for testnet and change the expected pruning point for mainnet and testnet.

* Delete function "solveBlock" on proof_of_work_test.go and call the function mining.SolvaBlock instead. Also, remove using of random in "solveBlockWithWrongPOW" function.

* Replace 0xFFFFFFFFFFFFFFFF to math.MaxUint64 in "solveBlockWithWrongPOW" function and change the function's comment of "TestPOW"

* Replace 0xFFFFFFFFFFFFFFFF to math.MaxUint64 in "solveBlockWithWrongPOW" function and change the function's comment of "TestPOW"

* Change from <= to < in the for statement in "solveBlockWithWrongPOW" function

* Adds one arg to the function call "NewTestConsensus" (the function sig has changed).

Co-authored-by: tal <tal@daglabs.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-11 13:15:26 +02:00
Elichai Turkel
2cc0bf1639 Optimize block locator using finality store (#1386)
* Make sure block locator doesn't include a hash lower than the lowHash in
the block locator

* Use finalityStore to optimize LowestChainBlockAboveOrEqualToBlueScore
2021-01-10 13:36:02 +02:00
Ori Newman
0f2d0d45b5 Add TargetBlocksPerSecond for kaspaminer (#1385)
Co-authored-by: Svarog <feanorr@gmail.com>
2021-01-10 12:44:24 +02:00
Svarog
09e1a73340 Added some logs to block-relay and IBD flows (#1384)
Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-10 12:05:34 +02:00
Svarog
c6d20c1f6f Start IBDBlockLocator from PruningPoint instead of Genesis (#1383) 2021-01-10 11:06:06 +02:00
Svarog
49e0a2a2e7 Add basic support for archival node (#1370)
* Add archival cli flag

* If --archival was activated - don't delete anything

* Fix tests

* Still change block status to StatusHeaderOnly even in archival nodes
2021-01-10 10:25:15 +02:00
Ori Newman
285ae5cd40 Update READMEs and add CONTRIBUTING.md (#1381)
* Update READMEs and add CONTRIBUTING.md

* Update go version

* Update READMEs and CONTRIBUTING.md

* Update README.md

* Update README.md

* Update README.md
2021-01-10 09:13:00 +02:00
stasatdaglabs
541205904e Add RPC documentation (#1379)
* Split messages.proto to p2p and rpc.

* Split messages.proto to p2p and rpc.

* Write a short intro to the RPC docs.

* Start documenting RPC calls.

* Use a custom protoc-gen-doc.

* Continue writing RPC documentation.

* Finish writing RPC documentation.

* Fix a formatting error.

* Fix merge errors.

* Fix formatting into protowire/README.md.

* Rerun go generate ..

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-07 16:55:47 +02:00
Ori Newman
256b7f25f1 Decrease grpc client dial timeout to one second (#1378) 2021-01-07 16:26:37 +02:00
Elichai Turkel
82d95c59a3 Increase log files sizes (#1374) 2021-01-07 11:17:13 +02:00
Ori Newman
79c5d4595e Remove gencerts (#1371) 2021-01-07 10:00:13 +02:00
stasatdaglabs
b195301a99 Add IsIBDPeer to GetConnectedPeerInfoResponse. (#1367)
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-06 17:33:51 +02:00
Ori Newman
4ad89056c2 Implement getMempoolEntries (#1369)
* Implement getMempoolEntries

* Fix comment

* Fix comment
2021-01-06 17:26:21 +02:00
Elichai Turkel
64f6cd2178 Merge pull request #1368 from kaspanet/v0.8.4-dev
Upgrade master to 0.8.4rc0
2021-01-06 14:43:39 +02:00
Mike Zak
26368cd674 Update to version 0.8.5 2021-01-06 14:04:46 +02:00
Svarog
9ea4c0fa38 Add sanity check that makes sure that the utxoset that we save fits the pruningPoints's commitment. (#1366)
Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
2021-01-06 13:27:02 +02:00
Elichai Turkel
a04a5462ae Update devnet genesis with older timestamp (#1364)
* Update devnet genesis with older timestamp

* Update BlueWindow test for new genesis
2021-01-06 12:50:27 +02:00
Elichai Turkel
4577023e44 Add script pubkey version to signature hash (#1360)
* Replace 0xffff with math.MaxUint16 on version checks

* Add script version to the signature hash

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-06 11:51:12 +02:00
Ori Newman
d8293ef635 Revert "Add recoverability for UTXO index (#1342)" (#1353)
This reverts commit 778375c4
2021-01-06 11:41:52 +02:00
Ori Newman
2059d6ba56 Delete sync rate mechanism (#1356)
Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-06 10:43:44 +02:00
Ori Newman
3ec1cbe236 Add TestBlockStatus (#1361)
Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-06 10:37:10 +02:00
Ori Newman
741e0962be Remove diffs from restoreUTXO logs (#1354)
Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-06 10:31:42 +02:00
Elichai Turkel
6279db2bf1 Reverse ghostdag tie break direction (#1359)
* Remove hash reversal in ghostdag

* Update tests after chaning ghostdag hash direction

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-05 19:10:59 +02:00
stasatdaglabs
e24bc527f3 Update the max block size and mass constants to reasonable values (#1344)
Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
2021-01-05 18:36:08 +02:00
talelbaz
8a309a7d2a Upgradability mechanisms script version (#1313)
* ''

* ''

* ''

* Changes genesis block version to 0.

* a

* a

* All tests are done.

* All tests passed for changed block version from int32 to uint16

* Adds validation of rejecting blocks with unknown versions.

* Changes txn version from int32 to uint16.

* .

* Adds comments to exported functions.

* Change functions name from ConvertFromRpcScriptPubKeyToRPCScriptPubKey to ConvertFromAppMsgRPCScriptPubKeyToRPCScriptPubKey and from ConvertFromRPCScriptPubKeyToRpcScriptPubKey to ConvertFromRPCScriptPubKeyToAppMsgRPCScriptPubKey

* change comment to "ScriptPublicKey represents a Kaspad ScriptPublicKey"

* delete part (tx.Version < 0) that cannot be exist on the if statement.

* Revert protobuf version.

* Fix a comment.

* Fix a comment.

* Rename a variable.

* Rename a variable.

* Remove a const.

* Rename a type.

* Rename a field.

* Rename a field.

* Remove commented-out code.

* Remove dangerous nil case in DomainTransactionOutput.Clone().

* Remove a constant.

* Fix a string.

* Fix wrong totalScriptPubKeySize in transactionMassStandalonePart.

* Remove a constant.

* Remove an unused error.

* Fix a serialization error.

* Specify version types to be uint16 explicitly.

* Use constants.ScriptPublicKeyVersion.

* Fix a bad test.

* Remove some whitespace.

* Add a case to utxoEntry.Equal().

* Rename scriptPubKey to scriptPublicKey.

* Remove a TODO.

* Rename constants.

* Rename a variable.

* Add version to parseShortForm.

Co-authored-by: tal <tal@daglabs.com>
Co-authored-by: stasatdaglabs <stas@daglabs.com>
2021-01-05 17:50:09 +02:00
Elichai Turkel
70d515a5a9 PruningManager: Delete tips that are in pruningPoint.Anticone from the tips list (#1351) 2021-01-05 14:14:40 +02:00
Elichai Turkel
72a7ca53e6 Save and expose the database in TestConsensus (#1349) 2021-01-05 14:13:33 +02:00
Svarog
119e7374e1 Move common log message to trace (#1348) 2021-01-05 12:51:48 +02:00
Elichai Turkel
e509cb1597 Smal performance improvements to BlueWindow (#1343)
* Convert BlockGHOSTDAGData from an interface to a public struct with getters

* Move hashes.Less to externalapi so it can access the hashes directly without copying

* Reduce calls to ghostdagstore.Get in blueWindow

* Simplify the logic in RequiredDifficulty and reuse big.Int

* Remove bigintpool as its no longer used

* Use ChooseSelectedParent in RequiredDifficulty instead of looping over the parents

* Remove comment
2021-01-05 12:13:02 +02:00
Ori Newman
0fb97a4f37 Add logs (#1346)
* Add logs

* Fix logInterval to const
2021-01-04 17:04:13 +02:00
Svarog
789a7379bd Require only inputs not be prefilled (#1345)
* bug invalidateAndInsertPruningPoint: if ValidateAndInsertBlock returned a non-RuleError error - the error was ignored

* Convert checkNoPrefilledFields into checkNoPrefilledInputs

* Add log line

* clone pruning point when passing to validateBlockTransactionsAgainstPastUTXO
2021-01-04 15:55:08 +02:00
Ori Newman
778375c4af Add recoverability for UTXO index (#1342)
* Add recoverability for UTXO index

* Add comment

* Rename UTXOOutpointPair->OutpointUTXOPair

* Get rid of the db transaction on resetStore and collect all keys before deleting

* Use VirtualSelectedParent instead of selected tip

* Fix error
2021-01-04 14:15:51 +02:00
stasatdaglabs
acef311fb4 Improve the performance of downloading headers (#1340)
* Add a new message: BlockHeadersMessage.

* Add a new message: BlockHeadersMessage.

* Send a lot of headers as a single message instead of many small messages.

* Keep a short queue of blockHeadersMessages so that there's never a moment when the node is not validating and inserting headers

* Add a missing return statement.

* Remove MsgBlockHeader from payloads.
2021-01-03 17:57:14 +02:00
stasatdaglabs
e8cad2b2f3 Send headers continuously without needing to run the BlockLocator protocol after every ~maxBlueScoreDifference blocks (#1339)
* Send headers continuously without needing to run the BlockLocator protocol after ever ~maxBlueScoreDifference blocks

* Add logging.

* Make logs more descriptive.
2021-01-03 15:50:21 +02:00
Ori Newman
97fddeff4b Don't mine when node is not connected (#1338) 2021-01-03 14:57:09 +02:00
Svarog
d6fe9a3017 Filter headers-only blocks out of parentChildren when selecting virtualSelectedParent (#1337) 2021-01-03 14:35:03 +02:00
Ori Newman
1abffd472c Add lock to mempool.GetTransaction (#1336) 2021-01-03 13:06:46 +02:00
Svarog
8c8da3b01f Convert all log messages in pick_virtual_parents.go to Debugf (#1335) 2021-01-03 10:20:37 +02:00
Elichai Turkel
51625e7967 Remove LevelDBSnapshot from LevelDBTransaction (#1334)
* Remove leveldb snapshot from LevelDBTransaction

* Update transactions_test.go to represent the new Transaction logic
2020-12-31 17:58:01 +02:00
Svarog
6fa3aa1dca Change SyncRateWindow to 15 minutes + update sync times on block headers as well (#1331)
* Change SyncRateWindow to 15 minutes + update sync times on block headers as well

* Rename result to isSyncRateTooLow

* Fix formula for expected blocks
2020-12-31 16:45:56 +02:00
Ori Newman
23304a4977 Add UTXO set cache (#1333)
* Add UTXO set cache

* Add clear method to cache
2020-12-31 16:34:46 +02:00
stasatdaglabs
b473a09111 Fix a crash in the UTXO index (#1332)
* Add a field to TransactionAcceptanceData: TransactionInputUTXOEntries.

* Fix failing tests.

* Add transactionInputUtxoEntries to the database.

* Populate transactionInputUTXOEntries in applyMergeSetBlocks and use them in the UTXO index.

* Remove UTXOEntry.Clone().

* Add an additional equality test.
2020-12-31 14:50:14 +02:00
Ori Newman
dd35669861 Check that there are no prefilled fields when validating a block (#1329)
* Check that there are no prefilled fields when validating a block

* Use cleanBlockPrefilledFields in AddBlock

* Move cleanBlockPrefilledFields to BuildBlockWithParents

* Move cleanBlockPrefilledFields to func (bb *testBlockBuilder) BuildBlockWithParents
2020-12-30 18:31:17 +02:00
stasatdaglabs
9401b77a4f Improve the performance of GetBlock and GetBlockHeader. (#1328) 2020-12-30 18:13:12 +02:00
Elichai Turkel
e87368157d Update go.yml (#1330) 2020-12-30 17:46:17 +02:00
stasatdaglabs
7dd0188838 Move the heavy lifting in BlockLocator from the syncer to the syncee (#1324)
* Add a new message: BlockLocatorHighestHash.

* Add a new message: IBDBlockLocator.

* Implement HandleIBDBlockLocator.

* Reimplement findHighestSharedBlockHash.

* Make HandleIBDBlockLocator only return hashes that are in the selected parent chain of the target hash.

* Increase the cache sizes of blockRelationStore, reachabilityDataStore, and ghostdagDataStore.

* Fix wrong initial highHash in findHighestSharedBlockHash.

* Make go vet happy.

* Protect against receiving wrong messages when expecting MsgIBDBlockLocatorHighestHash.
2020-12-30 15:44:14 +02:00
Ori Newman
6172e48adc Don't ban when capacity has been reached (#1326) 2020-12-30 15:15:41 +02:00
Elichai Turkel
739cffd918 Remove half the ghostdag store calls in LowestChainBlockAboveOrEqualToBlueScore (#1323) 2020-12-30 13:53:46 +02:00
Elichai Turkel
bd89ca2125 Log only tips length, and inspect ReachabilityData error instead of calling HasReachabilityData (#1321)
* Print the amount of tips instead of the tips themselves

* Inspect ReachabilityData error instead of calling HasReachabilityData
2020-12-30 13:02:34 +02:00
Svarog
d917a1fc1e Remove the block delay from kaspaminer (#1320) 2020-12-30 12:37:23 +02:00
Svarog
9b12b9c58a Make ReachabilityData a read-only interface with a writable variant, to prevent cloning (#1316)
* Rename reachabilityManager.data to dataForInsertion, and use it only during insertions

* Make reachabilityData an interface

* Fix db serialization of reachability data

* Fix reachabilityDataStore

* Fix all tests

* Cleanup debugging code

* Fix insertToFutureCoveringSet

* Add comments

* Rename to ReachabilityData and MutableReachabilityData
2020-12-30 09:48:38 +02:00
Elichai Turkel
533fa8c00e Change sirtual parents selection to allow faster branch merges in the network (#1315)
* if more candidates then max, choose half with highest blueWork and half with lowest

* Add a Test GhostDAG sorter

* Add a test for pick virtual parents

* Fix review nits
2020-12-29 21:42:31 +02:00
Svarog
0f93189c16 Don't print the whole UTXODiff to log, it might be quite huge (#1318) 2020-12-29 17:16:38 +02:00
Elichai Turkel
52427cb953 Reduce the amount of calls to FinalityPoint() (#1317) 2020-12-29 17:08:06 +02:00
talelbaz
d72f70fabe Adds new dags for GHOSTDAG tests (formatted as json files). (#1187)
* Adds new dags for ghostdag tests.

* Change the error msg for the number of tests (6 instead of 3).

Co-authored-by: tal <tal@daglabs.com>
2020-12-29 14:10:38 +02:00
Ori Newman
49b6cc6038 Add mutable and immutable header interfaces (#1305)
* Add mutable and immutable header interfaces

* Fix ShouldMine()

* Remove false comment

* Fix Equal signature

* Fix Equal implementation
2020-12-29 13:55:17 +02:00
Elichai Turkel
c10a087696 Remove virtualDiffParents that aren't in V.Past or in P.Future (#1310) 2020-12-29 12:07:45 +02:00
Ori Newman
02d5fb29cf Fix notifyVirtualSelectedParentBlueScoreChanged to show the selected tip blue score instead of the virtual's (#1309)
* Fix notifyVirtualSelectedParentBlueScoreChanged to show the selected tip blue score instead of the virtual's

* Fix ShouldMine() to fetch selected tip header
2020-12-29 12:07:05 +02:00
stasatdaglabs
48278bd1c0 Slightly improve the performance of antiPastHashesBetween. (#1312) 2020-12-29 10:36:18 +02:00
Ori Newman
d91afbfe3b Change most Tracef to Debugf (#1302)
* Change most Tracef to Debugf

* Remove diff from log
2020-12-29 10:32:28 +02:00
Ori Newman
5f22632836 Use sync rate for getBlockTemplate's isSynced (#1311)
* Use sync rate for getBlockTemplate's isSynced

* Fix a typo

Co-authored-by: Mike Zak <feanorr@gmail.com>
2020-12-29 09:28:02 +02:00
stasatdaglabs
4aafe8a630 Fix a crashed caused by orphans whose validation failed. (#1297) 2020-12-29 09:02:28 +02:00
Elichai Turkel
7e379028f3 Log the time it takes to delete blocks and save the utxo set for pruning point (#1307) 2020-12-28 16:22:00 +02:00
Elichai Turkel
af1b8c8490 Move version initializiation to init function to prevent race conditions (#1299) 2020-12-28 16:04:00 +02:00
Ori Newman
c7c8b25c09 Set stream max message size and increase the max message size to 1GB (#1300) 2020-12-28 12:53:11 +02:00
stasatdaglabs
b0251fe1a6 Add missing lock to IsValidPruningPoint. (#1296) 2020-12-28 10:08:36 +02:00
Ori Newman
cfe013eca7 Add IsHeaderOnly field to BlockVerboseData (#1295) 2020-12-27 18:23:51 +02:00
stasatdaglabs
50e74bf412 Add BlueScore to BlockVerboseData. (#1294) 2020-12-27 17:57:43 +02:00
stasatdaglabs
12f1c3dfab Fix a crash in GetMissingBlockBodyHashes (#1289)
* Remove the limit on the amount of hashes returned from antiPastHashesBetween.

* Guard against requests with a non-existing block hash.

* Move missing-block-hash guards to consensus.go.

* Ban a peer that doesn't send us all the requested headers during IBD.

* Extract blockHeap.ToSlice.

* Re-request headers in requestHeaders if we didn't receive the highHash.
2020-12-27 17:03:21 +02:00
Ori Newman
a231ec7214 Make only one transaction in validateAndInsertBlock (#1292) 2020-12-27 16:37:09 +02:00
Ori Newman
8aecf961bc Red inclusion (#1275)
* Accept red blocks transactions

* Add comments to TestTransactionAcceptance

* Fix tests

* Remove fetchUTXOSetIfMissing

* Remove redundant dependency

* Fix comments
2020-12-24 18:12:46 +02:00
stasatdaglabs
0dea766373 Fix the stopOldTemplateSolving channel in the miner getting closed twice (#1286)
* Fix the stopOldTemplateSolving channel in the miner getting closed twice.

* Remove a unused variable.
2020-12-24 17:58:28 +02:00
Ori Newman
830ddf4735 Fill testConsensus's dagParams (#1283) 2020-12-24 17:47:03 +02:00
stasatdaglabs
9d0f513e49 Implement a simple mechanism to stop a miner from mining while kaspad is not synced (#1284)
* Reintroduce isSynced into the GetBlockTemplate response.

* Add a warning for when kaspad is not synced.

* Rephrase a log.
2020-12-24 17:15:44 +02:00
stasatdaglabs
bd97075e07 Remove the limit to the returned headers from buildMsgBlockHeaders (#1281)
* Remove the limit to the returned header hashes from buildMsgBlockHeaders.

* Build msgBlockHeaders in batches rather than all at once.
2020-12-24 16:53:26 +02:00
Svarog
05941a76e7 Make DomainHash and TransactionID read-only structs (#1282)
* Increase size of reachability cache

* Change DomainHash to struct with unexported hashArray

* Fixing compilation errors stemming from new DomainHash structure

* Remove obsolete Read/WriteElement methods in appmessage

* Fix all tests

* Fix all tests

* Add comments

* A few renamings

* go mod tidy
2020-12-24 16:15:23 +02:00
stasatdaglabs
7cbda3b018 Fix RPC requests with unknown payloads crashing kaspad (#1203)
* [NOD-1596] Return an error on an unknown field.

* [NOD-1596] Don't use unknownFields to check whether a message is invalid.
2020-12-24 15:17:34 +02:00
Ori Newman
a0b93e1230 Fix deletePastBlocks (#1280) 2020-12-24 13:53:16 +02:00
stasatdaglabs
b749b2db0b Fix transaction relay not working (#1279)
* Implement HandleGetMempoolEntry.

* Fix equality bug in handleRelayedTransactionsFlow.
2020-12-24 10:40:56 +02:00
Svarog
717914319a Increase size of reachability and block-relations cache (#1272)
* Increase size of reachability cache

* Increase cache size for BlockRelationStore
2020-12-24 10:27:05 +02:00
stasatdaglabs
6ef8eaf133 Fix AddBlock not returning validation failure errors (#1268)
* Fix AddBlock not returning validation failure errors.

* Elevate a log from Info to Warning.

* Elevate a log from Info to Warning.
2020-12-23 15:08:02 +02:00
Elichai Turkel
273c271771 Remove hash reversal (#1270)
* Remove hash reversing

* Move toBig to pow.go

* Update some tests
2020-12-23 12:42:37 +02:00
Ori Newman
729e3db145 Pruning calculation changes (#1250)
* 1) Calculate pruning point incrementally
2) Add IsValidPruningPoint to pruning manager and consensus
3) Use reachability children for selected child iterator

* Add request IBD root hash flow

* Fix UpdatePruningPointByVirtual and IsValidPruningPoint

* Regenerate messages.pb.go

* Make the pruning point the earliest chain block with finality interval higher than the previous pruning point

* Fix merge errors
2020-12-23 11:37:39 +02:00
Elichai Turkel
43c00f5e7f Remove the sorting requirement from BlueWindow (#1266)
* Remove the requirement for sorting in BlueWindow

* Sort the BlueWindow in window_test
2020-12-23 10:00:14 +02:00
stasatdaglabs
90d4dbcba1 Implement a simple CLI wallet (#1261)
* Copy over the CLI wallet from Kasparov.

* Fix trivial compilation errors.

* Reimplement the balance command.

* Extract isUTXOSpendable to a separate function.

* Reimplement the send command.

* Fix bad transaction ID parsing.

* Add a missing newline in a log.

* Don't use msgTx in send().

* Fix isUTXOSpendable not checking whether a UTXO is of a coinbase transaction.

* Add --devnet, --testnet, etc. to command line flags.

* In `create`, only print the public key of the active network.

* Use coinbase maturity in isUTXOSpendable.

* Add a readme.

* Fix formatting in readme.
2020-12-23 09:41:48 +02:00
Ori Newman
cb9d7e313d Implement Clone and Equal for all model types (#1155)
* [NOD-1575] Implement Clone and Equal for all model types

* [NOD-1575] Add assertion for transaction ID equality

* [NOD-1575] Use DomainTransaction.Equal to compare to expected coinbase transaction

* [NOD-1575] Add TestDomainBlockHeader_Clone

* [NOD-1575] Don't clone nil values

* [NOD-1575] Add type assertions

* [NOD-1575] Don't clone nil values

* [NOD-1575] Add missing Equals

* [NOD-1575] Add length checks

* [NOD-1575] Update comment

* [NOD-1575] Check length for TransactionAcceptanceData

* [NOD-1575] Explicitly clone nils where needed

* [NOD-1575] Clone tx id

* [NOD-1575] Flip condition

* Nod 1576 make coverage tests for equal clone inside model externalapi (#1177)

* [NOD-1576] Make coverage tests for equal and clone inside model and externalapi

* Some formatting and naming fixes

* Made transactionToCompare type exported

* Added some tests and made some changes to the tests code

* No changes made

* Some formatting and naming changes made

* Made better test coverage for externalapi clone and equal functions

* Changed expected result for two cases

* Added equal and clone functions tests for ghostdag and utxodiff

* Added tests

* [NOD-1576] Implement reachabilitydata equal/clone unit tests

* [NOD-1576]  Full coverage of reachabilitydata equal/clone unit tests

* Made changes and handling panic to transaction_equal_clone_test.go and formating of utxodiff_equal_clone_test.go

* Added recoverForEqual2 for handling panic to transaction_equal_clone_test.go

* [NOD-1576]  Full coverage of transaction equal unit test

* [NOD-1576] Add expects panic

* [NOD-1576] Allow composites in go vet

* [NOD-1576] Code review fixes (#1223)

* [NOD-1576] Code review fixes

* [NOD-1576] Code review fixes part 2

* [NOD-1576] Fix wrong name

Co-authored-by: karim1king <karimkaspersky@yahoo.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
Co-authored-by: Karim <karim1king@users.noreply.github.com>

* Fix merge errors

* Use Equal where possible

* Use Equal where possible

* Use Equal where possible

Co-authored-by: andrey-hash <74914043+andrey-hash@users.noreply.github.com>
Co-authored-by: karim1king <karimkaspersky@yahoo.com>
Co-authored-by: Karim <karim1king@users.noreply.github.com>
2020-12-22 17:38:54 +02:00
stasatdaglabs
c2cec2f170 Fix the Sequence field in transaction inputs always getting deserialized to MaxUint64. (#1258) 2020-12-22 16:29:38 +02:00
Svarog
e7edfaceb7 Remove redundant rule errors (#1256)
* Remove ErrTimeTooNew and rename ErrBlockIsTooMuchInTheFuture to ErrTimeTooMuchInTheFuture

* Remove ErrBlockMassTooHigh

* Remove ErrHighHash

* Remove ErrInvalidSubnetwork + some cleanup around subnetwork validation

* Remove ErrTxMassTooHigh

* Remove ErrBadTxInput

* Remove ErrOverwriteTx

* Remove ErrTooManySigOps

* Remove ErrParentBlockUnknown

* Remove ErrParentBlockIsNotCurrentTips

* Remove ErrWithDiff

* Remove ErrFinality

* Remove ErrDelayedBlockIsNotAllowed + ErrOrphanBlockIsNotAllowed

* Remove ErrSelectedParentDisqualifiedFromChain

* Remove ErrBuildInTransactionHasGas

* Remove ErrBadFees
2020-12-22 14:05:21 +02:00
FestinaLente666
5632bee49d comments on default constants (#1253)
* comments on default constants

* more comments on default constants

* more comments on default constants

* more comments on default constants

* gofmt

* small typos
2020-12-22 11:44:32 +02:00
stasatdaglabs
21a459c0f4 Implement virtual selected parent chain RPC methods (#1249)
* [NOD-1579] Rename AcceptedTxIDs to AcceptedTransactionIDs.

* [NOD-1579] Add InsertBlockResult to ValidateAndInsertBlock results.

* [NOD-1593] Rename InsertBlockResult to BlockInsertionResult.

* [NOD-1593] Add SelectedParentChainChanges to AddBlockToVirtual's result.

* [NOD-1593] Implement findSelectedParentChainChanges.

* [NOD-1593] Implement TestFindSelectedParentChainChanges.

* [NOD-1593] Fix a string.

* [NOD-1593] Finish implementing TestFindSelectedParentChainChanges.

* [NOD-1593] Fix merge errors.

* [NOD-1597] Begin implementing UTXOIndex.

* [NOD-1597] Connect UTXOIndex to RPC.

* [NOD-1597] Connect Consensus to UTXOIndex.

* [NOD-1597] Add AcceptanceData to BlockInfo.

* [NOD-1597] Implement UTXOIndex.Update().

* [NOD-1597] Implement add(), remove(), and discard() in utxoIndexStore.

* [NOD-1597] Add error cases to add() and remove().

* [NOD-1597] Add special cases to add() and remove().

* [NOD-1597] Implement commit.

* [NOD-1597] Add a mutex around UTXOIndex.Update().

* [NOD-1597] Return changes to the UTXO from Update().

* [NOD-1597] Add NotifyUTXOsChangedRequestMessage and related structs.

* [NOD-1597] Implement HandleNotifyUTXOsChanged.

* [NOD-1597] Begin implementing TestUTXOIndex.

* [NOD-1597] Implement RegisterForUTXOsChangedNotifications.

* [NOD-1597] Fix bad transaction.ID usage.

* [NOD-1597] Implement convertUTXOChangesToUTXOsChangedNotification.

* [NOD-1597] Make UTXOsChangedNotificationMessage.Removed UTXOsByAddressesEntry instead of just RPCOutpoint so that the client can discern which address was the UTXO removed for.

* [NOD-1597] Collect outpoints in TestUTXOIndex.

* [NOD-1597] Rename RPC stuff.

* [NOD-1597] Add messages for GetUTXOsByAddresses.

* [NOD-1597] Implement HandleGetUTXOsByAddresses.

* [NOD-1597] Implement GetUTXOsByAddresses.

* [NOD-1597] Implement UTXOs().

* [NOD-1597] Implement getUTXOOutpointEntryPairs().

* [NOD-1597] Expand TestUTXOIndex.

* [NOD-1597] Convert SubmitTransaction to use RPCTransaction instead of MsgTx.

* [NOD-1597] Finish implementing TestUTXOIndex.

* [NOD-1597] Add messages for GetVirtualSelectedParentBlueScore.

* [NOD-1597] Implement HandleGetVirtualSelectedParentBlueScore and GetVirtualSelectedParentBlueScore.

* [NOD-1597] Implement TestVirtualSelectedParentBlueScore.

* [NOD-1597] Implement NotifyVirtualSelectedParentBlueScoreChanged.

* [NOD-1597] Expand TestVirtualSelectedParentBlueScore.

* [NOD-1597] Implement notifyVirtualSelectedParentBlueScoreChanged.

* [NOD-1597] Make go lint happy.

* [NOD-1593] Fix merge errors.

* [NOD-1593] Rename findSelectedParentChainChanges to calculateSelectedParentChainChanges.

* [NOD-1593] Expand TestCalculateSelectedParentChainChanges.

* [NOD-1597] Add logs to utxoindex.go.

* [NOD-1597] Add logs to utxoindex/store.go.

* [NOD-1597] Add logs to RPCManager.NotifyXXX functions.

* Implement notifySelectedParentChainChanged.

* Implement TestSelectedParentChain.

* Rename NotifyChainChanged to NotifyVirtualSelectedParentChainChanged.

* Rename GetChainFromBlock to GetVirtualSelectedParentChainFromBlock.

* Remove AcceptanceIndex from the config.

* Implement HandleGetVirtualSelectedParentChainFromBlock.

* Expand TestVirtualSelectedParentChain.

* Fix merge errors.

* Add a comment.

* Move a comment.
2020-12-21 14:43:32 +02:00
Elichai Turkel
45edacfbfa Replace Double-SHA256 with blake2b and implement domain seperation (#1245)
* Replace default hasher (Double-SHA256) with domain seperated blake2b

* Replace all hashes with domain seperated blake2b

* Update the genesis blocks

* Replace OP_HASH256 with OP_BLAKE2B

* Fix the merkle tree by appending zeros instead of duplicating the hash when there is 1 branch left

* Update tests

* Add a payloadHash function

* Update gitignore to ignore binaries

* Fix a bug in the blake2b opcode
2020-12-21 12:51:45 +02:00
Svarog
9f8f0fd747 Added safeguard against running TestDifficulty with a fresh genesis block (#1251) 2020-12-21 11:30:43 +02:00
stasatdaglabs
053bb351b5 [NOD-1597] Implement a UTXO index (#1221)
* [NOD-1579] Rename AcceptedTxIDs to AcceptedTransactionIDs.

* [NOD-1579] Add InsertBlockResult to ValidateAndInsertBlock results.

* [NOD-1593] Rename InsertBlockResult to BlockInsertionResult.

* [NOD-1593] Add SelectedParentChainChanges to AddBlockToVirtual's result.

* [NOD-1593] Implement findSelectedParentChainChanges.

* [NOD-1593] Implement TestFindSelectedParentChainChanges.

* [NOD-1593] Fix a string.

* [NOD-1593] Finish implementing TestFindSelectedParentChainChanges.

* [NOD-1593] Fix merge errors.

* [NOD-1597] Begin implementing UTXOIndex.

* [NOD-1597] Connect UTXOIndex to RPC.

* [NOD-1597] Connect Consensus to UTXOIndex.

* [NOD-1597] Add AcceptanceData to BlockInfo.

* [NOD-1597] Implement UTXOIndex.Update().

* [NOD-1597] Implement add(), remove(), and discard() in utxoIndexStore.

* [NOD-1597] Add error cases to add() and remove().

* [NOD-1597] Add special cases to add() and remove().

* [NOD-1597] Implement commit.

* [NOD-1597] Add a mutex around UTXOIndex.Update().

* [NOD-1597] Return changes to the UTXO from Update().

* [NOD-1597] Add NotifyUTXOsChangedRequestMessage and related structs.

* [NOD-1597] Implement HandleNotifyUTXOsChanged.

* [NOD-1597] Begin implementing TestUTXOIndex.

* [NOD-1597] Implement RegisterForUTXOsChangedNotifications.

* [NOD-1597] Fix bad transaction.ID usage.

* [NOD-1597] Implement convertUTXOChangesToUTXOsChangedNotification.

* [NOD-1597] Make UTXOsChangedNotificationMessage.Removed UTXOsByAddressesEntry instead of just RPCOutpoint so that the client can discern which address was the UTXO removed for.

* [NOD-1597] Collect outpoints in TestUTXOIndex.

* [NOD-1597] Rename RPC stuff.

* [NOD-1597] Add messages for GetUTXOsByAddresses.

* [NOD-1597] Implement HandleGetUTXOsByAddresses.

* [NOD-1597] Implement GetUTXOsByAddresses.

* [NOD-1597] Implement UTXOs().

* [NOD-1597] Implement getUTXOOutpointEntryPairs().

* [NOD-1597] Expand TestUTXOIndex.

* [NOD-1597] Convert SubmitTransaction to use RPCTransaction instead of MsgTx.

* [NOD-1597] Finish implementing TestUTXOIndex.

* [NOD-1597] Add messages for GetVirtualSelectedParentBlueScore.

* [NOD-1597] Implement HandleGetVirtualSelectedParentBlueScore and GetVirtualSelectedParentBlueScore.

* [NOD-1597] Implement TestVirtualSelectedParentBlueScore.

* [NOD-1597] Implement NotifyVirtualSelectedParentBlueScoreChanged.

* [NOD-1597] Expand TestVirtualSelectedParentBlueScore.

* [NOD-1597] Implement notifyVirtualSelectedParentBlueScoreChanged.

* [NOD-1597] Make go lint happy.

* [NOD-1593] Fix merge errors.

* [NOD-1593] Rename findSelectedParentChainChanges to calculateSelectedParentChainChanges.

* [NOD-1593] Expand TestCalculateSelectedParentChainChanges.

* [NOD-1597] Add logs to utxoindex.go.

* [NOD-1597] Add logs to utxoindex/store.go.

* [NOD-1597] Add logs to RPCManager.NotifyXXX functions.

* [NOD-1597] Ignore transactions that aren't accepted.

* [NOD-1597] Use GetBlockAcceptanceData instead of GetBlockInfo.

* [NOD-1597] Convert scriptPublicKey to string directly, instead of using hex.

* [NOD-1597] Add a comment.

* [NOD-1597] Guard against calling utxoindex methods when utxoindex is turned off.

* [NOD-1597] Add lock to UTXOs.

* [NOD-1597] Guard against calls to getUTXOOutpointEntryPairs when staging isn't empty.
2020-12-20 17:24:56 +02:00
stasatdaglabs
843edc4ba5 Limit the orphan collection (#1238)
* Limit the orphan collection.

* Fix grammar in a comment.

* Fix a bad log.
2020-12-20 11:20:51 +02:00
Ori Newman
bd5f4e8c6a Pruning fixes (#1243)
* Pruning related fixes

* Rename setBlockStatus->setBlockStatusAfterBlockValidation

* Rename StatusValid->StatusUTXOValid

* Add comment

* Fix typo

* Rename hasValidatedOnlyHeader->hasValidatedHeader

* Rename checkBlockBodiesExist->checkParentBlockBodiesExist

* Add comments and logs

* Adding logs

* Add logs and assert

* Add comment

* Fix typo

* Fix log
2020-12-20 09:38:34 +02:00
Elichai Turkel
6b1e691a57 Add GitHub actions in preperation for deprecating Jenkins (#1164)
* Add a test script

* add gh action for build and test

* added all the test

* Change github workflow to use the new test script

* Change the docker file to use the new test script

* Add doc comment for ProtocolError.Unwrap()

* Use another github action to increase windows page size

* Run the action after any edit to the PR metadata/base

* Change go version from 1.15 to 1.14

* Rename test.sh to build_and_test.sh

Co-authored-by: Isabella Liu <isabellaliu77@gmail.com>
2020-12-17 15:48:55 +02:00
Ori Newman
bf67c6351e Add TestPruning (#1222)
* Add TestPruning

* Add missing argument to teardown

* Add missing return value to AddBlock
2020-12-16 14:49:55 +02:00
Mike Zak
99a14c5999 Update to version 0.8.4 2020-12-16 14:00:09 +02:00
Ori Newman
b510fc08a7 Change PoW error (#1234)
* Use proper error for invalid PoW

* Add comment
2020-12-16 13:33:10 +02:00
Ori Newman
dc3ae4d3ac Use pointer receivers when needed (#1237) 2020-12-16 12:50:17 +02:00
Svarog
1ebda36b17 Remove IsAwaitingUTXOSet from validateAndInsertBlock log, to prevent long operation (#1235) 2020-12-16 11:33:48 +02:00
Ori Newman
12379bedb6 Fix UTXO serialization errors (#1233) 2020-12-16 11:27:43 +02:00
stasatdaglabs
f90d7d796a [NOD-1593] Return SelectedParentChainChanged from ValidateAndInsertBlock (#1202)
* [NOD-1579] Rename AcceptedTxIDs to AcceptedTransactionIDs.

* [NOD-1579] Add InsertBlockResult to ValidateAndInsertBlock results.

* [NOD-1593] Rename InsertBlockResult to BlockInsertionResult.

* [NOD-1593] Add SelectedParentChainChanges to AddBlockToVirtual's result.

* [NOD-1593] Implement findSelectedParentChainChanges.

* [NOD-1593] Implement TestFindSelectedParentChainChanges.

* [NOD-1593] Fix a string.

* [NOD-1593] Finish implementing TestFindSelectedParentChainChanges.

* [NOD-1593] Fix merge errors.

* [NOD-1593] Fix merge errors.

* [NOD-1593] Rename findSelectedParentChainChanges to calculateSelectedParentChainChanges.

* [NOD-1593] Expand TestCalculateSelectedParentChainChanges.
2020-12-15 11:37:52 +02:00
Ori Newman
fddda46d4f Fix infinite loop on antiPastHashesBetween (#1226)
* Fix infinite loop on antiPastHashesBetween

* Get rid of highBlockBlueScore and lowBlockBlueScore
2020-12-15 10:37:35 +02:00
Svarog
77adb6c99f Make consensus.databaseContext a DBManager and allow keeping data dir in TestConsensus
* Make consensus.databaseContext a DBManager

* Allow keeping data dir in TestConsensus
2020-12-15 10:11:14 +02:00
Ori Newman
48e1a2c396 New headers first flow (#1211)
* Get rid of insertMode

* Rename AddBlockToVirtual->AddBlock

* When F is not in the future of P, enforce finality with P and not with F.

* Don't allow blocks with invalid parents or with missing block body

* Check finality violation before checking block status

* Implement CalculateIndependentPruningPoint

* Move checkBlockStatus to validateBlock

* Add ValidateBlock to block processor interface

* Adjust SetPruningPoint to the new IBD flow

* Add pruning store to CSM's constructor

* Flip wrong condition on AddHeaderTip

* Fix func (hts *headerSelectedTipStore) Has

* Fix block stage order

* Call to ValidateBodyInContext from validatePostProofOfWork

* Enable overrideDAGParams

* Update log

* Rename SetPruningPoint to ValidateAndInsertPruningPoint and move most of its logic inside block processor

* Rename hasValidatedHeader->hasValidatedOnlyHeader

* Fix typo

* Name return values for fetchMissingUTXOSet

* Add comment

* Return ErrMissingParents when block body is missing

* Add logs and comments

* Fix merge error

* Fix pruning point calculation to be by virtual selected parent

* Replace CalculateIndependentPruningPoint to CalculatePruningPointByHeaderSelectedTip

* Fix isAwaitingUTXOSet to check pruning point by headers

* Change isAwaitingUTXOSet indication

* Remove IsBlockInHeaderPruningPointFuture from BlockInfo

* Fix LowestChainBlockAboveOrEqualToBlueScore

* Add validateNewPruningPointTransactions

* Add validateNewPruningAgainstPastUTXO

* Rename set_pruning_utxo_set.go to update_pruning_utxo_set.go

* Check missing block body hashes by missing block instead of status

* Validate pruning point against past UTXO with the pruning point as block hash

* Remove virtualHeaderHash

* Fix comment

* Fix imports
2020-12-14 17:53:08 +02:00
oudeis
6926a7ab81 Update to version 0.8.3 2020-12-14 12:38:03 +00:00
Svarog
a9e0c33e5c Lower minimum difficulty for mainnet and testnet (#1220) 2020-12-14 12:39:54 +02:00
Svarog
2c1688909d Move TestNet to use GRPCSeeds by default (#1217) 2020-12-14 09:09:18 +02:00
Svarog
6714e084e9 Small fix in proof-of-work log (#1205) 2020-12-09 18:31:36 +02:00
Elichai Turkel
3354ac67c8 Parallelize all the tests in ForAllNets (#1199) 2020-12-09 17:43:36 +02:00
Svarog
0d8f7bba40 [NOD-1595] Implement all fields of GetBlockDAGInfo (#1200)
* [NOD-1595] Implement all fields of GetBlockDAGInfo

* [NOD-1595]

* [NOD-1595] Don't swallow errors in GetDifficultyRatio

* [NOD-1595] Change roundingPrecision in GetDifficultyRatio to 2 decimal places
2020-12-09 12:14:15 +02:00
Svarog
e04f76b800 [NOD-1594] Add HeaderCount to GetBlockCount rpc call (#1197) 2020-12-08 19:11:35 +02:00
Elichai Turkel
82fa8e6831 Abstract CheckProofOfWork as a public function and change PoW structure (#1198)
* Expose CheckProofOfWork from model/pow

* Update blockvalidator to call the new CheckProofOfWork

* Update genesis blocks

* Update tools to use the new CheckProofOfWork

* Update tests with new PoW
2020-12-08 17:17:30 +02:00
Svarog
b7ca3f4461 [NOD-1590] Implement optimized finalityPoint calculation mechanism (#1190)
* [NOD-1590] Moved all finality logic to FinalityManager

* [NOD-1590] Add finality store

* [NOD-1590] Implement optimized finalityPoint calculation mechanism

* [NOD-1590] Add comments

* [NOD-1590] Add finalityStore to consensus object, and TestConsensus

* [NOD-1590] Added logs to finalityPoint calculation
2020-12-08 10:26:39 +02:00
Svarog
37bf261da1 [NOD-1581] Enable fsync in database writes (#1168) 2020-12-07 14:18:33 +02:00
Ori Newman
d90e18ec51 Fix v0.8.2-dev build (#1189) 2020-12-07 12:39:40 +02:00
Elichai Turkel
9962527793 Replace BlueWindow implementation to accomadate a better DAA scheme (#1179)
* Change DifficultyAdjustmentWindowSize and TimestampDeviationTolerance from uint64 to int

* refactor block_heap for readability and usage

* Add a new SizedUpHeap

* Refactor BlueWindow with the new DAA

* Update TestBlueBlockWindow with the new DAA window

* Fix review requested changes
2020-12-06 18:42:49 +02:00
talelbaz
78550d3639 Adds "checkDelayedBlock" - checks if the block timeStamp is in the future. (#1183)
* [#1056] Adds "checkDelayedBlock" - check if the block timeStamp is in future.
Adds 2 new fields to blockValidator struct.
Adds new Rule Error "ErrDelayedBlock" .

* [#1056] Replace "ErrDelayedBlock" to "ErrBlockIsTooMuchInTheFuture".

* [#1056] Replace "checkDelayedBlock" to "checkBlockTimeStampInIsolation".

* [#1056] Cosmetics changes: timeStamp -> timestamp .

* Merge remote-tracking branch 'origin/v0.8.2-dev' into droppedDelayedBlock

# Conflicts:
#	domain/consensus/factory.go
#	domain/consensus/processes/blockvalidator/block_header_in_isolation.go
#	domain/consensus/processes/blockvalidator/blockvalidator.go

Co-authored-by: tal <tal@daglabs.com>
2020-12-06 18:41:07 +02:00
stasatdaglabs
7f899b0d09 [NOD-1579] Improve the IBD mechanism (#1174)
* [NOD-1579] Remove selected tip hash messages.

* [NOD-1579] Start moving IBD stuff into blockrelay.

* [NOD-1579] Rename relaytransactions to transactionrelay.

* [NOD-1579] Move IBD files into blockrelay.

* [NOD-1579] Remove flow stuff from ibd.go.

* [NOD-1579] Bring back IsInIBD().

* [NOD-1579] Simplify block relay flow.

* [NOD-1579] Check orphan pool for missing parents to avoid unnecessary processing.

* [NOD-1579] Implement processOrphan.

* [NOD-1579] Implement addToOrphanSetAndRequestMissingParents.

* [NOD-1579] Fix TestIBD.

* [NOD-1579] Implement isBlockInOrphanResolutionRange.

* [NOD-1579] Implement limited block locators.

* [NOD-1579] Add some comments.

* [NOD-1579] Specifically check for StatusHeaderOnly in blockrelay.

* [NOD-1579] Simplify runIBDIfNotRunning.

* [NOD-1579] Don't run IBD if it is already running.

* [NOD-1579] Fix a comment.

* [NOD-1579] Rename mode to syncInfo.

* [NOD-1579] Simplify validateAndInsertBlock.

* [NOD-1579] Fix bad SyncStateSynced condition.

* [NOD-1579] Implement validateAgainstSyncStateAndResolveInsertMode.

* [NOD-1579] Use insertModeHeader.

* [NOD-1579] Add logs to TrySetIBDRunning and UnsetIBDRunning.

* [NOD-1579] Implement and use dequeueIncomingMessageAndSkipInvs.

* [NOD-1579] Fix a log.

* [NOD-1579] Fix a bug in createBlockLocator.

* [NOD-1579] Rename a variable.

* [NOD-1579] Fix a slew of bugs in missingBlockBodyHashes and selectedChildIterator.

* [NOD-1579] Fix bad chunk size in syncMissingBlockBodies.

* [NOD-1579] Remove maxOrphanBlueScoreDiff.

* [NOD-1579] Fix merge errors.

* [NOD-1579] Remove a debug log.

* [NOD-1579] Add logs.

* [NOD-1579] Make various go quality tools happy.

* [NOD-1579] Fix a typo in a variable name.

* [NOD-1579] Fix full blocks over header-only blocks not failing the missing-parents validation.

* [NOD-1579] Add an error log about a condition that should never happen.

* [NOD-1579] Check all antiPast hashes instead of just the lowHash's anticone to filter for header-only blocks.

* [NOD-1579] Remove the nil stuff from GetBlockLocator.

* [NOD-1579] Remove superfluous condition in handleRelayInvsFlow.start().

* [NOD-1579] Return a boolean from requestBlock instead of comparing to nil.

* [NOD-1579] Fix a bad log.Debugf.

* [NOD-1579] Remove a redundant check.

* [NOD-1579] Change an info log to a warning log.

* [NOD-1579] Move OnNewBlock out of relayBlock.

* [NOD-1579] Remove redundant exists check from runIBDIfNotRunning.

* [NOD-1579] Fix bad call to OnNewBlock.

* [NOD-1579] Remove an impossible check.

* [NOD-1579] Added a log.

* [NOD-1579] Rename insertModeBlockWithoutUpdatingVirtual to insertModeBlockBody.

* [NOD-1579] Add a check for duplicate headers.

* [NOD-1579] Added a comment.

* [NOD-1579] Tighten a stop condition.

* [NOD-1579] Simplify a log.

* [NOD-1579] Clarify a log.

* [NOD-1579] Move a log.
2020-12-06 16:23:56 +02:00
Svarog
4886425caf [NOD-1589] Re-enable DisableDifficultyAdjustment (#1182)
* [NOD-1589] Re-enable DisableDifficultyAdjustment

* [NOD-1589] Remove simnet from TestDifficulty

* [NOD-1589] Update comment
2020-12-06 16:02:48 +02:00
Elichai Turkel
c3902ed7a8 Replace blue score with blue work in ghostdag (#1172)
* Replace blueScore with blueWork in ghostDAG SelectedParent selection

* Add blueWork to protopuf ghostdag data

* Auto generate protobuf go code

* Serialize/Deserialize blueWork when converting to protobuf

* pass block header store to ghostdagmanager

* Convert tal's ghostdag2 implementation to blueWork

* Change finality test to check the blueWork instead of blueScore

* Update ghostdag_test to pass blockHeaderStore to ghostdag, and test all networks genesis headers

* Add sanity blueWork check to ghostdag_test
2020-12-06 14:45:21 +02:00
Svarog
33eaf9edac [NOD-1548] Re-add test difficulty + Make GHOSTDAGData immutable + don't clone in store (#1178)
* [NOD-1548] Readd TestDifficulty

* [NOD-1548] Make GHOSTDAGData immutable + don't clone in store

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-12-06 12:35:14 +02:00
Svarog
f97b8f7580 [NOD-1587] Add DAGParams to TestConsensus (#1181) 2020-12-06 10:57:57 +02:00
Ori Newman
05979de705 [NOD-1586] Return routes.Disconnect (#1180) 2020-12-06 10:54:34 +02:00
Ori Newman
32a04d1811 Allow to configure consensus (closes #1067)
* Allow to configure consensus with a JSON file

* Define everywhere maxBlockParents as KType

* Move consensus default to consensus_defaults.go
2020-12-03 18:30:01 +02:00
Svarog
a585f32763 [NOD-1551] Make UTXODiff immutable + skip cloning it in datastore (#1167)
* [NOD-1551] Make UTXO-Diff implemented fully in utils/utxo

* [NOD-1551] Fixes everywhere except database

* [NOD-1551] Fix database

* [NOD-1551] Add comments

* [NOD-1551] Partial commit

* [NOD-1551] Comlete making UTXOEntry immutable + don't clone it in UTXOCollectionClone

* [NOD-1551] Rename ToUnmutable -> ToImmutable

* [NOD-1551] Track immutable references generated from mutable UTXODiff, and invalidate them if the mutable one changed

* [NOD-1551] Clone scriptPubKey in NewUTXOEntry

* [NOD-1551] Remove redundant code

* [NOD-1551] Remove redundant call for .CloneMutable and then .ToImmutable

* [NOD-1551] Make utxoEntry pointert-receiver + clone ScriptPubKey in getter
2020-12-03 13:24:24 +02:00
Mike Zak
9866abb75a [NOD-1583] Split consensusserialization to consensushashing and serialization 2020-12-02 13:18:50 +02:00
Mike Zak
ab3c81c552 [NOD-1583] Move all TestXXX interfaces to testapi 2020-12-02 13:18:50 +02:00
Ori Newman
9756d64f28 [NOD-1582] Fix orphan resolution (#1169)
* [NOD-1582] Fix multiple request per missing ancestor

* [NOD-1582] Don't remove peer on routerpkg.ErrRouteClosed from RPC

* [NOD-1582] Use LogAndMeasureExecutionTime where possible
2020-12-02 13:05:33 +02:00
Elichai Turkel
21fc2d4219 [NOD-1433] Write specified unit tests for GHOSTDAG (#1010)
commit 3830df34b2
Merge: 46dc2e977 17e7819c2
Author: Elichai Turkel <elichai.turkel@gmail.com>
Date:   Tue Dec 1 16:29:51 2020 +0200

    Merge pull request #1170 from kaspanet/tal-ghost-fix

    Fix GhostDAG tests and jsons

commit 17e7819c27
Author: Elichai Turkel <elichai.turkel@gmail.com>
Date:   Tue Dec 1 16:24:01 2020 +0200

    Remove non-json ghostdag tests

commit 4bebb1d96a
Author: Elichai Turkel <elichai.turkel@gmail.com>
Date:   Tue Dec 1 13:26:06 2020 +0200

    Add a coment above tal's ghostdag2 impl

commit faf21a042e
Author: Elichai Turkel <elichai.turkel@gmail.com>
Date:   Tue Dec 1 13:20:08 2020 +0200

    fix the interfaces after merge

commit a8b7a25b2e
Merge: af91b69b2 f1c6df48c
Author: Elichai Turkel <elichai.turkel@gmail.com>
Date:   Tue Dec 1 13:19:08 2020 +0200

    Merge branch 'v0.8.2-dev' into tal-ghost-fix

commit af91b69b20
Author: Elichai Turkel <elichai.turkel@gmail.com>
Date:   Tue Dec 1 13:18:41 2020 +0200

    Fix the non-json tests

commit c56f34b73b
Author: Elichai Turkel <elichai.turkel@gmail.com>
Date:   Tue Dec 1 13:18:17 2020 +0200

    Fix the jsons

commit 46dc2e9773
Author: tal <tal@daglabs.com>
Date:   Mon Nov 30 17:15:20 2020 +0200

    [NOD - 1143] Cosmetics changes.

commit b28e5ce816
Author: tal <tal@daglabs.com>
Date:   Mon Nov 30 15:48:08 2020 +0200

    [#1126] Place selectedParent to be first on blueMergeSet.

commit 4b56ed2da9
Author: tal <tal@daglabs.com>
Date:   Mon Nov 30 14:51:50 2020 +0200

    [#1126] Change pacement between blockRight and blockLeft .

commit b09f31be93
Merge: e17a98b7b 0db39833f
Author: talelbaz <63008512+talelbaz@users.noreply.github.com>
Date:   Mon Nov 30 14:30:22 2020 +0200

    Merge pull request #1162 from kaspanet/new-jsons

    Update the dag json tests

commit e17a98b7ba
Author: tal <tal@daglabs.com>
Date:   Mon Nov 30 14:08:25 2020 +0200

    [#1126] Use WALK function in tests & cosmetic changes.

commit 0db39833f3
Author: Elichai Turkel <elichai.turkel@gmail.com>
Date:   Mon Nov 30 12:20:13 2020 +0200

    Update the dag json tests

commit 5a3da43dd4
Author: tal <tal@daglabs.com>
Date:   Sun Nov 29 12:03:37 2020 +0200

    [NOD-1433] Remove unneccessry code.

commit a6cde558ac
Author: tal <tal@daglabs.com>
Date:   Mon Nov 23 17:05:56 2020 +0200

    [NOD-1433] Change "Stage" sig function according to the new interface - added error as a return type.

commit 07859b6218
Author: tal <tal@daglabs.com>
Date:   Mon Nov 23 17:03:26 2020 +0200

    [NOD-1433]  Print formats changed & Cosmetics code changes.

commit e1a851664e
Author: tal <tal@daglabs.com>
Date:   Sun Nov 15 17:34:59 2020 +0200

    [NOD-1433] Travers the tests dir and run each test.

commit 4c7474edc1
Author: tal <tal@daglabs.com>
Date:   Mon Nov 9 12:44:53 2020 +0200

    [NOD-1433] Travers the tests dir and run each test.

commit 89dd1e61d3
Author: tal <tal@daglabs.com>
Date:   Mon Nov 9 11:48:36 2020 +0200

    [NOD-1433] Change implementation to adjust genesis's score 0.
    Also, keep changing the test file to fit the new implementation.

commit 6acdcd17de
Author: tal <tal@daglabs.com>
Date:   Sun Nov 8 17:07:22 2020 +0200

    [NOD-1433] New test was added(Test 6).

commit bf23889317
Author: tal <tal@daglabs.com>
Date:   Sun Nov 8 14:59:36 2020 +0200

    Fix golint errors

commit 79ff990b5f
Author: tal <tal@daglabs.com>
Date:   Sun Nov 8 14:47:12 2020 +0200

    added "Optimize imports".

commit 73d0128f63
Author: tal <tal@daglabs.com>
Date:   Sun Nov 8 13:03:22 2020 +0200

    Added an implementation factory.

commit 61ca8b2e7e
Author: tal <tal@daglabs.com>
Date:   Thu Nov 5 16:03:18 2020 +0200

    1. impl - choose the highest hash.
    2. test - changed the test accordingly.

commit ef0943ca29
Author: tal <tal@daglabs.com>
Date:   Thu Oct 29 18:00:45 2020 +0200

    Update Tests

commit 6e5936abff
Author: tal <tal@daglabs.com>
Date:   Tue Oct 27 10:22:45 2020 +0200

    Change to the new API

commit 5a70dc48b3
Author: tal <tal@daglabs.com>
Date:   Mon Oct 26 18:35:31 2020 +0200

    1. Added tests for ori

commit 2b9f78353f
Author: tal <tal@daglabs.com>
Date:   Mon Oct 26 13:04:37 2020 +0200

    1. Added structure "isolatedTest" {k, test}
    2. Added for loop on the tests.
    3. New test - Test 5.

commit c026d7b7a2
Author: tal <tal@daglabs.com>
Date:   Thu Oct 22 17:35:56 2020 +0300

    Fix bugs in the GHOSTDAG : counters, conntains and isAncestorOf.
    Added more tests.

commit 74493b27d2
Author: tal <tal@daglabs.com>
Date:   Thu Oct 22 16:49:27 2020 +0300

    added compare between Hashes

commit f689253463
Author: tal <tal@daglabs.com>
Date:   Thu Oct 22 11:49:01 2020 +0300

    added compare between Hashes

commit 66be07f616
Author: tal <tal@daglabs.com>
Date:   Mon Oct 19 18:42:40 2020 +0300

    First test - pass.

commit 327f34f2dc
Author: tal <tal@daglabs.com>
Date:   Mon Oct 19 15:20:27 2020 +0300

    Add alternative implementation for ghostdag.
    change all function's signatures (add error type)

commit fd2ea3d84a
Author: tal <tal@daglabs.com>
Date:   Mon Oct 19 11:57:05 2020 +0300

    add alternative implementation for ghostdag
2020-12-01 16:54:13 +02:00
Elichai Turkel
f1c6df48c9 [#1028] Replace oldschnorr with the BIP340 schnorr variant (#1165)
* Update go-secp256k1 to v0.0.3

* Update the txscript engine to support only 32 bytes pubkeys

* Update the txscript engine tests

* Update txscript/sign.go to use the new Schnorr KeyPair API

* Update txscript sign_test to use the new schnorr

* Update sigcache tests to use new schnorr pubkey

* Update integration tests to use the new txscript and new schnorr pubkey
2020-12-01 08:48:23 +02:00
Svarog
80c445c78b [NOD-1551] Optimize binary writes + remove redundant VarInt code (#1163)
* [NOD-1551] Optimize binary writes + remove redundant VarInt code

* [NOD-1551] Remove varInt tests

* [NOD-1551] Fix TestBech32 for Go1.15
2020-11-30 13:45:06 +02:00
Svarog
3b6eb73e53 [NOD-1551] Remove lock in SigCache (#1161) 2020-11-30 11:34:19 +02:00
talelbaz
f407c44a8d [Issue - #1126] - Checking pruning point violation - pruning point in the past. (#1160)
* [NOD-1126]
1. Change function name in BlockValidator interface from: "ValidateProofOfWorkAndDifficulty" to "ValidatePruningPointViolationAndProofOfWorkAndDifficulty".
2. Add to the blockValidator struct the pruningManager (also added to the function "New" Respectively).
3. Added new function "checkPruningPointViolation" of blockValidator type.
4. Add new internal check - "checkPruningPointViolation", on the function "ValidateProofOfWorkAndDifficulty".(The third check).
5. Add new error rule - "ErrPruningPointViolation".

* [Issue-1126]
1. Remove the function "PruningPoint" from PruningManager interface.
2. Changes in blockValidator struct - remove pruningManager, and adding pruningStore.
3. Reads for "pruningPoint" function from pruningStore instead of pruningManager (because of note 1 above) in the functions: * "checkPruningPointViolation" of type blockValidator.
             * "FindNextPruningPoint" of type pruningManager.

* [Issue-1126]
1. Add missing error handling.

* [Issue-1126] Changes in function "checkPruningPointViolation": If header = genesis, stop checking and return nil.

* [Issue-1126] In function "checkPruningPointViolation" - change from a for loop to the "IsAncestorOfAny" function.

* [#1126] "FindNextPruningPoint" - save the pruning point in case the point is the genesis and change code internal order.

* [#1126] "FindNextPruningPoint" - cosmetics change.

* [#1126] "FindNextPruningPoint" - remove "return nil" when there is no pruning point on the if expression.

Co-authored-by: tal <tal@daglabs.com>
2020-11-30 09:57:15 +02:00
stasatdaglabs
a1af992d15 [NOD-1578] Fix areHeaderTipsSyncedMaxTimeDifference (#1157)
* [NOD-1578] Fix areHeaderTipsSyncedMaxTimeDifference.

* [NOD-1578] Return errors that occur in the new logClosure.
2020-11-29 10:44:50 +02:00
Svarog
048caebda3 [NOD-1551] Add SigCache to TransactionValidator + Option to manipulate it in TestConsensus (#1159)
* [NOD-1551] Add SigCache

* [NOD-1551] Add option to edit SigCache in TestConsensus

* [NOD-1551] Fix comments and make SetSigCache pointer-receiver
2020-11-29 10:18:00 +02:00
oudeis
baa4311a34 Update to version 0.8.2 2020-11-29 05:12:30 +00:00
alexandratran
f6dfce8180 Update README.md 2020-11-26 21:46:57 -08:00
Ori Newman
0e91b44fc6 [NOD-1577] Change cache size to 200 (#1156) 2020-11-26 17:11:49 +02:00
Svarog
f7fa823f17 [NOD-1551] Requirements for performance tests (#1154)
* [NOD-1551] Add NewTestConsensusWithDataDir to factory

* [NOD-1551] Cache transaction ID

* [NOD-1551] Should return err if err != nil

* [NOD-1551] BuildBlockWithParents returns the blocks pastUTXOData

* [NOD-1551] Set BlockCoinbaseMaturity to 0 in TestDoubleSpends

* [NOD-1551] Fix comments

* --amend

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-11-26 12:12:01 +02:00
Svarog
546ea83123 [NOD-1570] Fix the way UTXO iterators work (#1153)
* [NOD-1570] Implement utxo.IteratorWithDiff

* [NOD-1570] Utilize utxo.ITeratorWithDiff in RestorePastUTXOSetIterator and VirtualUTXOSetIterator

* [NOD-1570] Fix comment
2020-11-25 18:28:42 +02:00
Elichai Turkel
f9c2137344 [RES-65] Add a test for BoundedMergeDepth - new (#1131)
* Test bounded merge depth

* Fix a bug in GetBlockInfo, where trying to use reachability on an invalid block

* Add a test to reproduce and test the GetBlockInfo bug
2020-11-25 13:42:55 +02:00
stasatdaglabs
0fa13357c3 [NOD-1566] Add caching to all stores (#1152)
* [NOD-1566] Add a dependency to golang-lru.

* [NOD-1566] Add caching to blockstore.go.

* [NOD-1566] Add LRUCache to all store objects and initialize them.

* [NOD-1566] Add caching to acceptanceDataStore.

* [NOD-1566] Add caching to blockHeaderStore.

* [NOD-1566] Implement a simpler LRU cache.

* [NOD-1566] Use the simpler cache implementation everywhere.

* [NOD-1566] Remove dependency in golang-lru.

* [NOD-1566] Fix object reuse issues in store Get functions.

* [NOD-1566] Add caching to blockRelationStore.

* [NOD-1566] Add caching to blockStatusStore.

* [NOD-1566] Add caching to ghostdagDataStore.

* [NOD-1566] Add caching to multisetStore.

* [NOD-1566] Add caching to reachabilityDataStore.

* [NOD-1566] Add caching to utxoDiffStore.

* [NOD-1566] Add caching to reachabilityReindexRoot.

* [NOD-1566] Add caching to pruningStore.

* [NOD-1566] Add caching to headerTipsStore.

* [NOD-1566] Add caching to consensusStateStore.

* [NOD-1566] Add comments explaining why we don't discard staging at the normal location in consensusStateStore.

* [NOD-1566] Make go vet happy.

* [NOD-1566] Fix merge errors.

* [NOD-1566] Add a missing break statement.

* [NOD-1566] Run go mod tidy.

* [NOD-1566] Remove serializedUTXOSetCache.
2020-11-25 13:41:13 +02:00
Ori Newman
5b2fae0457 [NOD-1568] Add staticcheck checks (#1150) 2020-11-25 11:43:51 +02:00
Ori Newman
3bad9ec1eb [NOD-1569] Stop using ReceiveFromChanWhenDone (#1151) 2020-11-25 11:30:07 +02:00
Ori Newman
45d9b63572 [NOD-1567] Add clone methods to data stores types (#1149)
* [NOD-1567] Add clone methods to data stores types

* [NOD-1567] Fix comments

* [NOD-1567] Fix test
2020-11-24 17:56:18 +02:00
Elichai Turkel
afc634d871 Add TestCheckBlockSanity back (#1137) 2020-11-24 16:57:40 +02:00
Ori Newman
2334f8b4eb [NOD-1564] Add TestChainedTransactions (#1145)
* [NOD-1564] Add TestChainedTransactions

* [NOD-1564] Fix errors
2020-11-24 11:42:53 +02:00
stasatdaglabs
d65f382c80 [NOD-1565] Reorder getSyncInfo in a way that won't unnecessarily call HeaderTipsPruningPoint. (#1146) 2020-11-24 11:34:02 +02:00
Ori Newman
2096a28d1c [NOD-1563] Add TestMaxHeaders (#1144) 2020-11-23 18:33:45 +02:00
Ori Newman
96d9e5800f [NOD-1561] Add TestCheckParentsIncest and fix validation order (#1143) 2020-11-23 18:27:44 +02:00
Ori Newman
8264369c81 [NOD-1561] Add TestValidateMedianTime (#1141)
* [NOD-1561] Add TestValidateMedianTime

* [NOD-1561] Remove redundant variable
2020-11-23 17:18:30 +02:00
Ori Newman
bb2d7f72ac [NOD-1560] Add TestValidateTransactionInIsolation (#1140)
* [NOD-1560] Add TestValidateTransactionInIsolation

* [NOD-1560] Make ForAllNets copy the params before mutating them

* [NOD-1560] Remove redundant continue

* [NOD-1560] Don't change finality duration
2020-11-23 16:28:59 +02:00
Ori Newman
c1505b4748 [NOD-1555] Use stageDiff to update virtualDiffParents (#1139)
* [NOD-1555] Filter ancestors in updateVirtualDiffParents

* [NOD-1555] Use stageDiff to update virtualDiffParents

* [NOD-1555] Don't add existing blocks in addToVirtualDiffParents

* [NOD-1555] Remove redundant check

* [NOD-1555] Fix log and rename removeAncestorsFromVirtualDiffParents->removeAncestorsFromVirtualDiffParentsAndAssignDiffChild

* [NOD-1555] Add logs

* [NOD-1555] Fix comment

* [NOD-1555] Fix logs
2020-11-23 15:09:39 +02:00
stasatdaglabs
dec9ef5f75 [NOD-1555] Implement TestResolveBlockStatusSanity (#1138)
* [NOD-1555] Implement TestResolveBlockStatusSanity.

* [NOD-1555] Fix the test name string.
2020-11-23 14:29:41 +02:00
Mike Zak
5211727206 [NOD-1557] Cover the consensusStateManager package in trace logs (#1135)
* [NOD-1557] Add trace logs in add_block_to_virtual.go.

* [NOD-1557] Add trace logs in resolve_block_status.go.

* [NOD-1557] Add trace logs in calculate_past_utxo.go.

* [NOD-1557] Add trace logs in finality.go.

* [NOD-1557] Add trace logs in multisets.go.

* [NOD-1557] Fix compilation errors.

* [NOD-1557] Add trace logs to verify_and_build_utxo.go.

* [NOD-1557] Add trace logs to update_virtual.go.

* [NOD-1557] Add trace logs to set_pruning_utxo_set.go.

* [NOD-1557] Add trace logs to populate_tx_with_utxo_entries.go.

* [NOD-1557] Add trace logs to pick_virtual_parents.go.

* [NOD-1557] Make go vet happy.

* [NOD-1557] Clarify that some logic in AddBlockToVirtual is there for the sake of logging alone.

* [NOD-1557] Call blockStatusStore directly in AddBlockToVirtual when refetching the block status.
2020-11-23 13:08:10 +02:00
Elichai Turkel
fafe1d534f Add TestSequenceLocksActive back (#1133) 2020-11-22 17:17:39 +02:00
Elichai Turkel
c56a5336f3 Re-add TestPruningDepth (#1132) 2020-11-22 17:04:13 +02:00
Elichai Turkel
b3a3121725 Add TestFinality back (#1129)
* Add VirtualFinalityPoint to TestConsensusStateManager

* Add TestFinality back
2020-11-22 12:30:27 +02:00
Svarog
950dd0cc8d [NOD-1556] Add some logs (#1110)
* [NOD-1556] Add logs regarding block status and virtual blue score

* [NOD-1556] UTXODiffAlgebra: add the offending outpoint to the text of errors

* [NOD-1556] Make checkIntersectionWithRule return ok as well
2020-11-19 11:17:05 +02:00
stasatdaglabs
bb244706ea [NOD-1543] Optimize the performance of Count() in BlockHeaderStore and BlockStore (#1109)
* [NOD-1543] Optimize Count() in BlockHeaderStore.

* [NOD-1543] Optimize Count() in BlockStore.

* [NOD-1543] Fix commitCount.

* [NOD-1543] Explicitly initialize count to 0.
2020-11-18 16:35:32 +02:00
stasatdaglabs
ed386bbc8f [NOD-1550] Don't request blocks for invs that are known to be orphans (#1108)
* [NOD-1550] Implement IsOrphan().

* [NOD-1550] Don't request blocks for invs that are known to be orphans.
2020-11-18 13:37:14 +02:00
stasatdaglabs
75d21d39cc [NOD-1549] Properly handle errors in unorphanBlock (#1107)
* [NOD-1549] In AddBlock, simply log RuleErrors.

* [NOD-1549] Properly handle errors in unorphanBlock.
2020-11-18 12:19:30 +02:00
Elichai Turkel
3f92ddd827 Add blueScore to RPC GetBlock, and add more INFO logs (#1103)
* Add BlueScore to RPC command GetBlock

* Add info logs when getting new blocks from the p2p
2020-11-18 11:19:12 +02:00
Ori Newman
8500acd86b [NOD-1548] Remove PoW check from tests (#1105)
* [NOD-1548] Add TestDifficulty and remove PoW check from tests

* [NOD-1548] Add TestSkipProofOfWork

* [NOD-1548] Remove TestDifficulty
2020-11-18 10:27:29 +02:00
Elichai Turkel
5b037950d8 Fix double printing the mainnet has not launched yet message (#1101) 2020-11-18 09:16:58 +02:00
stasatdaglabs
184911f76e [NOD-1547] Make the SendAddresses flow not one-time for the sake of DNSSeeder (#1104)
* [NOD-1547] Make the SendAddresses flow not one-time for the sake of DNSSeeder.

* [NOD-1547] Add all special commands to chooseRouteForCommand.
2020-11-17 18:05:14 +02:00
stasatdaglabs
7479f5f5e8 [NOD-1545] Fix incorrect block difficulty calculation in buildHeader. (#1102) 2020-11-17 16:40:55 +02:00
Ori Newman
891095563e Fix blocks order (#1099) 2020-11-17 16:00:16 +02:00
Ori Newman
60c24d8dea Fix TestBlueBlockWindow (#1098)
* Fix TestBlueBlockWindow

* Add comments
2020-11-17 16:00:16 +02:00
Svarog
d4993c1d06 [NOD-1542] Don't try to return more addresses then we have (#1097)
* [NOD-1542] Don't try to return more addresses then we have

* [NOD-1542] Allocate according to updated count
2020-11-17 16:00:16 +02:00
Elichai Turkel
c785ca0e52 Add more compactBits tests (#1096) 2020-11-17 16:00:16 +02:00
Svarog
9eb5c4a0ed [NOD-1532] Make all nets equal in mining difficulty (#1095)
* [NOD-1532] Make all nets equal in mining difficulty

* [NOD-1532] Fix comments
2020-11-17 16:00:16 +02:00
stasatdaglabs
9d5d1b02dc [NOD-1538] Implement a simple orphan pool (#1093)
* [NOD-1538] Implement a simple orphan pool.

* [NOD-1538] Connect the orphan pool to the appropriate flows.

* [NOD-1538] Make UnorphanBlocks actually unorphan blocks.

* [NOD-1538] Fix logs.

* [NOD-1538] Make unorphaned blocks call LogBlock.

* [NOD-1538] Fix a log and some bad names.

* [NOD-1538] Don't return an error from LogBlock.

* [NOD-1538] Pass a pointer to hash in findChildOrphansOfBlock.

* [NOD-1538] Extract addChildOrphansToProcessQueue to a separate function.
2020-11-17 16:00:16 +02:00
stasatdaglabs
213be67c47 [NOD-1538] Fix isBlockInHeaderPruningPointFuture. (#1094) 2020-11-17 16:00:16 +02:00
Ori Newman
14d7ab5fc6 Add TestBigToCompact and TestCompactToBig (#1092)
* Add TestBigToCompact and TestCompactToBig

* Add tests
2020-11-17 16:00:16 +02:00
Mike Zak
7224d58940 [NOD-1532] Add comments 2020-11-17 16:00:16 +02:00
Mike Zak
b2188f5993 [NOD-1532] AlwaysCallResolveBlockStatus in BuildBlock + Fixes to make it work if there's nothing to resolve 2020-11-17 16:00:16 +02:00
Mike Zak
dbd15aecf5 [NOD-1532] Invert condition in isViolatingFinality 2020-11-17 16:00:16 +02:00
Mike Zak
66f5a5bd7d [NOD-1532] Genesis is not violating finality by definition 2020-11-17 16:00:16 +02:00
Mike Zak
c994200878 [NOD-1532] ResolveBlockStatus should return the blockStatus 2020-11-17 16:00:16 +02:00
Ori Newman
7050ebeac9 [NOD-1541] Add TestPastMedianTime (#1091) 2020-11-17 16:00:16 +02:00
Svarog
f2df48139f [NOD-1532] Remove virtual from ParentChildren when checking for virtualSelectedParent candidates (#1089)
* [NOD-1532] Remove virtual from ParentChildren when checking for virtualSelectedParent candidates

* [NOD-1532] Fix minor issues
2020-11-17 16:00:16 +02:00
stasatdaglabs
48d8137604 [NOD-1538] Implement GetBlockCount. 2020-11-17 16:00:16 +02:00
stasatdaglabs
310cf0bb9b [NOD-1538] Remove bad check in selectPeerForIBD. 2020-11-17 16:00:16 +02:00
Ori Newman
b6c47fdd21 [NOD-1535] fix reachability tests (#1087)
* [NOD-1535] Don't use pointer to outpoint when serializing

* [NOD-1535] Fix reachability tests
2020-11-17 16:00:16 +02:00
Ori Newman
e6a2b7366f [NOD-1535] Don't use pointer to outpoint when serializing (#1086) 2020-11-17 16:00:16 +02:00
Mike Zak
d8f72e2b27 [NOD-1532] Update VirtualUTXODiffParents diffs even if list didn't change 2020-11-17 16:00:16 +02:00
stasatdaglabs
08749deaeb [NOD-1538] Fix mempool not wrapping consensus errors and bad invalid message handling (#1082)
* [NOD-1538] Correct messages.proto.

* [NOD-1538] Fix invalid message handling.

* [NOD-1538] Fix mempool not wrapping consensus errors.

* [NOD-1538] Extract wrapping logic to a separate function.

* [NOD-1538] Extract wrapping logic to an even better separate function.
2020-11-17 16:00:16 +02:00
Mike Zak
83a88d9989 [NOD-1532] newUTXOSetIterator should start with -1 index 2020-11-17 16:00:16 +02:00
Mike Zak
151910c27a [NOD-1532] Check UTXOCommitment for all blocks 2020-11-17 16:00:16 +02:00
Mike Zak
fca8ed57bd [NOD-1532] Add another block in TestUTXOCommitment 2020-11-17 16:00:16 +02:00
Ori Newman
56679818be [NOD-1535] Add non coinbase transactions to diff (#1084) 2020-11-17 16:00:16 +02:00
Mike Zak
f07f2edad2 [NOD-1532] Properly deal with selectedParentStatuses in buildBlockWithParents 2020-11-17 16:00:16 +02:00
Mike Zak
2dcfe90850 [NOD-1532] Shouldn't update parent diff if the parent is not UTXO-verified 2020-11-17 16:00:16 +02:00
Mike Zak
dc80a39c54 [NOD-1532] OpTrueScript should also return the redeem script 2020-11-17 16:00:16 +02:00
Mike Zak
34be898491 [NOD-1532] utxoSetIterator should be a pointer receiver 2020-11-17 16:00:16 +02:00
Mike Zak
f4a2fbf64f [NOD-1532] Fixes in updateVirtualDiffParents 2020-11-17 16:00:16 +02:00
Ori Newman
a0c6076ccc [NOD-1535] Add new block to virtual diff parents only if it's valid (#1077) 2020-11-17 16:00:16 +02:00
Mike Zak
fddce00d08 [NOD-1532] Fixes in updateVirtualDiffParent 2020-11-17 16:00:16 +02:00
Mike Zak
ae682d59f7 [NOD-1532] Fixes in updateVirtualDiffParent 2020-11-17 16:00:16 +02:00
Ori Newman
347f3de15c [NOD-1535] fix reachability test (#1075)
* [NOD-1535] Don't compare pointers

* [NOD-1535] Fix condition on updateVirtualDiffParents
2020-11-17 16:00:16 +02:00
stasatdaglabs
a34091991a [NOD-1538] Fix MinimalNetAdapter and don't insert BlockRelations before making sure the block's parents exist (#1074)
* [NOD-1538] Fix minimal net adapter.

* [NOD-1538] Don't insert block relation until we've validated that the block's parents exist.

* [NOD-1538] Don't hold addressManager in MinimalNetAdapter.

* [NOD-1538] Fix a comment in messages.proto.
2020-11-17 16:00:16 +02:00
Mike Zak
efe1986a56 [NOD-1532] Don't validate coinbase transaction in normal flow 2020-11-17 16:00:16 +02:00
Mike Zak
3ab507b66f [NOD-1532] Use correct coinbase transaction in buildBlockWith Parents 2020-11-17 16:00:16 +02:00
Ori Newman
afbad73c0b [NOD-1535] Don't compare pointers (#1072) 2020-11-17 16:00:16 +02:00
Mike Zak
a1fa17d872 [NOD-1532] Add DiscardAllStores to TestConsensus 2020-11-17 16:00:16 +02:00
Ori Newman
b50421beee [NOD-1535] Don't reuse pointers on loop (#1069)
* [NOD-1535] Don't reuse pointers on loop

* [NOD-1535] Don't reuse pointers on loop
2020-11-17 16:00:16 +02:00
Mike Zak
aeded07815 [NOD-1532] Make BuildBlockWithParents resolve the status of the new block's selectedParent 2020-11-17 16:00:16 +02:00
Mike Zak
7d14f24b84 [NOD-1532] Fix some error messages 2020-11-17 16:00:16 +02:00
Mike Zak
c52b8100c6 [NOD-1532] make dagtopologymanager test external 2020-11-17 16:00:16 +02:00
Mike Zak
f52cddc25c [NOD-1532] Remove consensus rule that requires blocks are sorted by hash 2020-11-17 16:00:16 +02:00
Ori Newman
fc5e39f6cc [NOD-1535] fix reachability test (#1061)
* Revert "[NOD-1500] Delete integration tests"

This reverts commit fcb57a2066.

* [NOD-1518] hashserialization -> consenusserialization

* [NOD-1518] Fix add genesis to virtual

* [NOD-1518] Fix a bug in SerializeCoinbasePayload.

* [NOD-1518] Fix a loop error and make pastMedianTime behave correctly everywhere on genesis.

* [NOD-1518] Fix another bug and an infinite loop.

* [NOD-1518] Fix uninitialized slice.

* [NOD-1518] Fix bad should-commit checks and another infinite loop.

* [NOD-1518] Fix nil serialization.

* [NOD-1518] Rename blockHash to currentBlockHash.

* [NOD-1518] Move the check whether stagedVirtualUTXOSet != nil to the top of commitVirtualUTXODiff.

* [NOD-1518] Simplify utxoDiffStore.Commit.

* [NOD-1518] Unextract resolveBlockStatusAndCheckFinality.

* [NOD-1518] Move no-transactions logic into CalculateIDMerkleRoot.

* [NOD-1518] Remove redundant is-staged check.

* [NOD-1518] Fix merge errors.

* [NOD-1518] Don't write anything if utxoDiffChild is nil.

* [NOD-1518] Stage virtualAcceptanceData and virtualMultiset.

* [NOD-1518] Fix bugs in getBlockTemplate and submitBlock.

* [NOD-1518] Fix bad validation order in validateHeaderInContext.

* [NOD-1518] Fix bug in Next().

* [NOD-1518] Fix nil dereference of subnetworks in AddressCache.

* [NOD-1518] Fix multisetStore.Get returning a pointer to a multiset that is changed in place.

* [NOD-1518] Break on genesis in countSubtrees.

* [NOD-1518] Fix createBlockLocator.

* [NOD-1518] Fix MsgTxToDomainTransaction.

* [NOD-1518] Set MaxTxVersion to 1.

* [NOD-1518] Fix missing error handling, bug in MsgTxToDomainTransaction, and bad subnetwork equality check.

* [NOD-1518] Fix bug in hasUTXOByOutpointFromStagedVirtualUTXODiff.

* [NOD-1518] Remove irrelevant comments.

* [NOD-1518] Generate transactions with sufficient fee in tx_relay_test.

* [NOD-1518] Fix broken RPC handlers.

* [NOD-1518] Fix merge errors.

* [NOD-1518] Fix bad exists check in restorePastUTXO and missing genesis check in CalculatePastUTXOAndAcceptanceData.

* [NOD-1518] Add a comment.

* [NOD-1518] Use a regular mutex instead of a read-write mutex in consensus to avoid dealing with sneaky not-actually-read functions.

* [NOD-1518] Fix a deadlock in GetVirtualSelectedParent.

* [NOD-1518] Fix missing handler registration for CmdHeader.

* [NOD-1518] Fix processHeader calling OnNewBlock and LogBlock. Also fix conversion errors in IBDRootUTXOSetAndBlock.

* [NOD-1518] Fix bad Command() in MsgIBDRootUTXOSetAndBlock.

* [NOD-1518] Fix bad SyncStateMissingUTXOSet logic in resolveSyncState.

* [NOD-1518] Rename mode to syncState.

* [NOD-1518] Fix headers-only blocks coming in after the consensus thinks it's synced.

* [NOD-1518] Fix selectedChildIterator.Next not ignoring virtual, infinite loop in HashSet.Length().

* [NOD-1518] Fix not-properly wrapped IBD blocks.

* [NOD-1518] Fix bad conversion in RequestIBDBlocks.

* [NOD-1518] Fix bad string for CmdRequestHeaders.

* [NOD-1518] Fix bad string for CmdDoneHeaders.

* [NOD-1518] Fix bad Command() for MsgIBDRootNotFound.

* [NOD-1518] Fix bad areHeaderTipsSyncedMaxTimeDifference value.

* [NOD-1518] Add missing string for CmdRequestIBDBlocks.

* [NOD-1518] Fix bad check for SyncStateMissingBlockBodies.

* [NOD-1518] Fix bad timeout durations in tests.

* [NOD-1518] Fix IBD blocks not calling OnNewBlock.

* [NOD-1518] Change when IBD finishes.

* [NOD-1518] Properly clone utxoDiffChild.

* [NOD-1535] Fix reachability tests

* [NOD-1518] Fix merge errors.

* [NOD-1518] Move call to LogBlock to into OnNewBlock.

* [NOD-1518] Return "not implemented" in unimplemented RPC handlers.

* [NOD-1518] Extract cloning of hashes to a method over DomainHash.

* [NOD-1518] Use isHeaderOnlyBlock.

* [NOD-1518] Use constants.TransactionVersion.

* [NOD-1518] Break immediately if we reached the virtual in SelectedChildIterator.

* [NOD-1518] Don't stage nil utxoDiffChild.

* [NOD-1518] Properly check the genesis hash in CalculatePastUTXOAndAcceptanceData.

* [NOD-1518] Explain why we break on current == nil in countSubtrees.

* [NOD-1518] Add a comment explaining why we check against StatusValid in resolveSyncState.

* [NOD-1535] Add external reachability tests

* [NOD-1535] Fix reachability tests and fix related bugs

* [NOD-1535] Add setters fox reindex slack and window

* [NOD-1535] Remove redundant line

* [NOD-1535] Add comment

* [NOD-1535] Fix comments

* [NOD-1535] Rename DBReader->DatabaseContext

* [NOD-1535] Check that reindex root is changed

* [NOD-1535] Fix calculateNewTips

Co-authored-by: Mike Zak <feanorr@gmail.com>
Co-authored-by: stasatdaglabs <stas@daglabs.com>
2020-11-17 16:00:16 +02:00
Svarog
8ccf381fc7 [NOD-1532] csm unit tests (#1059)
* Revert "[NOD-1500] Delete integration tests"

This reverts commit fcb57a2066.

* [NOD-1518] hashserialization -> consenusserialization

* [NOD-1518] Fix add genesis to virtual

* [NOD-1518] Fix a bug in SerializeCoinbasePayload.

* [NOD-1518] Fix a loop error and make pastMedianTime behave correctly everywhere on genesis.

* [NOD-1518] Fix another bug and an infinite loop.

* [NOD-1518] Fix uninitialized slice.

* [NOD-1518] Fix bad should-commit checks and another infinite loop.

* [NOD-1518] Fix nil serialization.

* [NOD-1518] Rename blockHash to currentBlockHash.

* [NOD-1518] Move the check whether stagedVirtualUTXOSet != nil to the top of commitVirtualUTXODiff.

* [NOD-1518] Simplify utxoDiffStore.Commit.

* [NOD-1518] Unextract resolveBlockStatusAndCheckFinality.

* [NOD-1518] Move no-transactions logic into CalculateIDMerkleRoot.

* [NOD-1518] Remove redundant is-staged check.

* [NOD-1518] Fix merge errors.

* [NOD-1518] Don't write anything if utxoDiffChild is nil.

* [NOD-1518] Stage virtualAcceptanceData and virtualMultiset.

* [NOD-1518] Fix bugs in getBlockTemplate and submitBlock.

* [NOD-1518] Fix bad validation order in validateHeaderInContext.

* [NOD-1518] Fix bug in Next().

* [NOD-1518] Fix nil dereference of subnetworks in AddressCache.

* [NOD-1518] Fix multisetStore.Get returning a pointer to a multiset that is changed in place.

* [NOD-1518] Break on genesis in countSubtrees.

* [NOD-1518] Fix createBlockLocator.

* [NOD-1518] Fix MsgTxToDomainTransaction.

* [NOD-1518] Set MaxTxVersion to 1.

* [NOD-1518] Fix missing error handling, bug in MsgTxToDomainTransaction, and bad subnetwork equality check.

* [NOD-1518] Fix bug in hasUTXOByOutpointFromStagedVirtualUTXODiff.

* [NOD-1518] Remove irrelevant comments.

* [NOD-1518] Generate transactions with sufficient fee in tx_relay_test.

* [NOD-1518] Fix broken RPC handlers.

* [NOD-1518] Fix merge errors.

* [NOD-1518] Fix bad exists check in restorePastUTXO and missing genesis check in CalculatePastUTXOAndAcceptanceData.

* [NOD-1518] Add a comment.

* [NOD-1518] Use a regular mutex instead of a read-write mutex in consensus to avoid dealing with sneaky not-actually-read functions.

* [NOD-1518] Fix a deadlock in GetVirtualSelectedParent.

* [NOD-1518] Fix missing handler registration for CmdHeader.

* [NOD-1518] Fix processHeader calling OnNewBlock and LogBlock. Also fix conversion errors in IBDRootUTXOSetAndBlock.

* [NOD-1518] Fix bad Command() in MsgIBDRootUTXOSetAndBlock.

* [NOD-1518] Fix bad SyncStateMissingUTXOSet logic in resolveSyncState.

* [NOD-1518] Rename mode to syncState.

* [NOD-1518] Fix headers-only blocks coming in after the consensus thinks it's synced.

* [NOD-1518] Fix selectedChildIterator.Next not ignoring virtual, infinite loop in HashSet.Length().

* [NOD-1518] Fix not-properly wrapped IBD blocks.

* [NOD-1532] Add TestMultiset

* [NOD-1518] Fix bad conversion in RequestIBDBlocks.

* [NOD-1518] Fix bad string for CmdRequestHeaders.

* [NOD-1518] Fix bad string for CmdDoneHeaders.

* [NOD-1518] Fix bad Command() for MsgIBDRootNotFound.

* [NOD-1532] Add TestPastUTXOMultiset

* [NOD-1518] Fix bad areHeaderTipsSyncedMaxTimeDifference value.

* [NOD-1532] Added TestDoubleSpends

* [NOD-1518] Add missing string for CmdRequestIBDBlocks.

* [NOD-1518] Fix bad check for SyncStateMissingBlockBodies.

* [NOD-1518] Fix bad timeout durations in tests.

* [NOD-1518] Fix IBD blocks not calling OnNewBlock.

* [NOD-1518] Change when IBD finishes.

* [NOD-1518] Properly clone utxoDiffChild.

* [NOD-1532] Update hashes of blocks

* [NOD-1532] Fix genesis blocks and a few more bugs

* [NOD-1532] Bugfix: incorrect key passed to dbTx.Put

* [NOD-1532] Make sure there's no nil payloads

* [NOD-1532] Fix AddBlockToVirtual

* [NOD-1532] Update tips and virtualDiffParents properly

* [NOD-1532] Allow nil payload

* [NOD-1532] Check for actual error and not just some RuleError

* [NOD-1532] Get rid of SimpleCoinbaseData and make OpTrueScript P2SH

* [NOD-1532] If coinbaseData is nil - fill in with generic coinbaseData

Co-authored-by: Ori Newman <orinewman1@gmail.com>
Co-authored-by: stasatdaglabs <stas@daglabs.com>
2020-11-17 16:00:16 +02:00
stasatdaglabs
f320887bff [NOD-1538] Fix bad allocation in notBannedAddressesWithException. 2020-11-17 16:00:16 +02:00
stasatdaglabs
eef5e3768c [NOD-1518] Fix genesis block insertion and integration tests (#1013)
* Revert "[NOD-1500] Delete integration tests"

This reverts commit fcb57a2066.

* [NOD-1518] hashserialization -> consenusserialization

* [NOD-1518] Fix add genesis to virtual

* [NOD-1518] Fix a bug in SerializeCoinbasePayload.

* [NOD-1518] Fix a loop error and make pastMedianTime behave correctly everywhere on genesis.

* [NOD-1518] Fix another bug and an infinite loop.

* [NOD-1518] Fix uninitialized slice.

* [NOD-1518] Fix bad should-commit checks and another infinite loop.

* [NOD-1518] Fix nil serialization.

* [NOD-1518] Rename blockHash to currentBlockHash.

* [NOD-1518] Move the check whether stagedVirtualUTXOSet != nil to the top of commitVirtualUTXODiff.

* [NOD-1518] Simplify utxoDiffStore.Commit.

* [NOD-1518] Unextract resolveBlockStatusAndCheckFinality.

* [NOD-1518] Move no-transactions logic into CalculateIDMerkleRoot.

* [NOD-1518] Remove redundant is-staged check.

* [NOD-1518] Fix merge errors.

* [NOD-1518] Don't write anything if utxoDiffChild is nil.

* [NOD-1518] Stage virtualAcceptanceData and virtualMultiset.

* [NOD-1518] Fix bugs in getBlockTemplate and submitBlock.

* [NOD-1518] Fix bad validation order in validateHeaderInContext.

* [NOD-1518] Fix bug in Next().

* [NOD-1518] Fix nil dereference of subnetworks in AddressCache.

* [NOD-1518] Fix multisetStore.Get returning a pointer to a multiset that is changed in place.

* [NOD-1518] Break on genesis in countSubtrees.

* [NOD-1518] Fix createBlockLocator.

* [NOD-1518] Fix MsgTxToDomainTransaction.

* [NOD-1518] Set MaxTxVersion to 1.

* [NOD-1518] Fix missing error handling, bug in MsgTxToDomainTransaction, and bad subnetwork equality check.

* [NOD-1518] Fix bug in hasUTXOByOutpointFromStagedVirtualUTXODiff.

* [NOD-1518] Remove irrelevant comments.

* [NOD-1518] Generate transactions with sufficient fee in tx_relay_test.

* [NOD-1518] Fix broken RPC handlers.

* [NOD-1518] Fix merge errors.

* [NOD-1518] Fix bad exists check in restorePastUTXO and missing genesis check in CalculatePastUTXOAndAcceptanceData.

* [NOD-1518] Add a comment.

* [NOD-1518] Use a regular mutex instead of a read-write mutex in consensus to avoid dealing with sneaky not-actually-read functions.

* [NOD-1518] Fix a deadlock in GetVirtualSelectedParent.

* [NOD-1518] Fix missing handler registration for CmdHeader.

* [NOD-1518] Fix processHeader calling OnNewBlock and LogBlock. Also fix conversion errors in IBDRootUTXOSetAndBlock.

* [NOD-1518] Fix bad Command() in MsgIBDRootUTXOSetAndBlock.

* [NOD-1518] Fix bad SyncStateMissingUTXOSet logic in resolveSyncState.

* [NOD-1518] Rename mode to syncState.

* [NOD-1518] Fix headers-only blocks coming in after the consensus thinks it's synced.

* [NOD-1518] Fix selectedChildIterator.Next not ignoring virtual, infinite loop in HashSet.Length().

* [NOD-1518] Fix not-properly wrapped IBD blocks.

* [NOD-1518] Fix bad conversion in RequestIBDBlocks.

* [NOD-1518] Fix bad string for CmdRequestHeaders.

* [NOD-1518] Fix bad string for CmdDoneHeaders.

* [NOD-1518] Fix bad Command() for MsgIBDRootNotFound.

* [NOD-1518] Fix bad areHeaderTipsSyncedMaxTimeDifference value.

* [NOD-1518] Add missing string for CmdRequestIBDBlocks.

* [NOD-1518] Fix bad check for SyncStateMissingBlockBodies.

* [NOD-1518] Fix bad timeout durations in tests.

* [NOD-1518] Fix IBD blocks not calling OnNewBlock.

* [NOD-1518] Change when IBD finishes.

* [NOD-1518] Properly clone utxoDiffChild.

* [NOD-1518] Fix merge errors.

* [NOD-1518] Move call to LogBlock to into OnNewBlock.

* [NOD-1518] Return "not implemented" in unimplemented RPC handlers.

* [NOD-1518] Extract cloning of hashes to a method over DomainHash.

* [NOD-1518] Use isHeaderOnlyBlock.

* [NOD-1518] Use constants.TransactionVersion.

* [NOD-1518] Break immediately if we reached the virtual in SelectedChildIterator.

* [NOD-1518] Don't stage nil utxoDiffChild.

* [NOD-1518] Properly check the genesis hash in CalculatePastUTXOAndAcceptanceData.

* [NOD-1518] Explain why we break on current == nil in countSubtrees.

* [NOD-1518] Add a comment explaining why we check against StatusValid in resolveSyncState.

Co-authored-by: Mike Zak <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-11-12 15:19:39 +02:00
Ori Newman
7a7821e1c8 [NOD-1313] Refactor AddressManager (#918) (#1049)
* [NOD-1313] Refactor AddressManager (#918)

* [NOD-1313] Refactor AddressManager.

* [NOD-1313]Remove old tests.Minor improvements,fixes.

* [NOD-1313] After merge fixes. Fix import cycle.

* [NOD-1313] Integration tests fixes.

* [NOD-1313] Allocate new slice for the returned key.

* [NOD-1313] AddressManager improvements and fixes.

* Move local and banned addresses to separate lists.
* Move AddressManager config to the separate file.
* Add LocalAddressManager.
* Remove redundant KnownAddress structure.
* Restore local addresses functionality.
* Call initListeners from the LocalAddressManager.
* AddressManager minor improvements and fixes.

* [NOD-1313] Minor fixes.

* [NOD-1313] Implement HandleGetPeerAddresses. Refactoring.

* [NOD-1313] After-merge fixes.

* [NOD-1313] Minor improvements.

* AddressManager: added BannedAddresses() method.
* AddressManager: HandleGetPeerAddresses() add banned addresses
  separately.
* AddressManager: remove addressEntry redundant struct.
* ConnectionManager: checkOutgoingConnections() minor improvements and
  fixes.
* Minor refactoring.
* Minor fixes.

* [NOD-1313] GetPeerAddresses RPC message update

* GetPeerAddresses RPC: add BannedAddresses in the separate field.
* Update protobuf.

* [NOD-1534] Update messages.pb.go

Co-authored-by: Kirill <gammerxpower@gmail.com>
2020-11-12 14:40:41 +02:00
Svarog
37fbdcb453 [NOD-1526] Restore txscript tests (#1019)
* [NOD-1526] Fix compilation errors

* [NOD-1526] Make MsgTx.PayloadHash non-pointer

* [NOD-1526] Fixed many tests

* [NOD-1526] Fix reference_test.go

* [NOD-1526] Removed last instances of appmessage in consensus

* [NOD-1526] No need to check for subnetwork
2020-11-12 10:22:17 +02:00
Svarog
135ffbd4f2 [NOD-1529] Add getters + AddBlock to TestConsensus (#1025)
* [NOD-1529] Add all stores and processes to consensus, and add access to TestConsensus

* [NOD-1529] Move the getters of TestConsensus to separate file

* [NOD-1529] Add AddBlock to TestConsensus

* [NOD-1529] Update NewTestConsensus to be more all-encompassing

* [NOD-1529] Remove test directory in teardown

* [NOD-1529] Add ForAllNets function

* [NOD-1529] Add comment
2020-11-11 12:31:13 +02:00
Ori Newman
4736213ba4 [NOD-1528] Make data stores copy data on stage (#1020)
* [NOD-1528] Make data stores copy data on stage

* [NOD-1528] Add proto objects to serialize consensus state objects

* [NOD-1528] Fix receiver names

* [NOD-1528] Add copy to block store and utxo diff staging

* [NOD-1528] Return errors where needed
2020-11-10 18:32:42 +02:00
oudeis
8290fadd3a [NOD-1521] use staticcheck (#1015)
* [NOD-1521] Use static check as part of jenkins to check for swallowed errors

* [NOD-1521] added staticcheck installation

* [NOD-1521] Fix static check errors

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-11-10 17:59:35 +02:00
Ori Newman
23c1ea6c31 [NOD-1525] Implement headers first ibd (#1017)
* [NOD-1525] Implement headers first IBD

* [NOD-1525] Fix proto translators

* [NOD-1525] Register missing flows

* [NOD-1525] Rename SyncStateNormal->SyncStateRelay, simplifiy IBD peer selection code and get rid of panic in FinishIBD

* [NOD-1525] Remove redundant methods from interface
2020-11-10 16:14:51 +02:00
Ori Newman
31c5264430 [NOD-1527] Allow to process headers while in missing utxo set sync state (#1018)
* [NOD-1527] Allow to process headers while in missing utxo set sync state

* [NOD-1527] Add isHeaderOnlyBlock function
2020-11-10 14:43:18 +02:00
Ori Newman
32da4440ba [NOD-1495] Disallow non native transactions (#988)
* [NOD-1495] Disallow non native transactions

* [NOD-1495] Use deserializeUTXOSetBytes

* [NOD-1495] Delete checkNoNonNativeTransactions

* [NOD-1495] Invert condition in checkTransactionPayload

Co-authored-by: Mike Zak <feanorr@gmail.com>
2020-11-09 17:15:16 +02:00
Svarog
e7a61c7edf [NOD-1524] Add lock to consensus (#1014) 2020-11-09 16:21:09 +02:00
Svarog
6db337c8c5 [NOD-1519] Add TestAPI for BuildBlockWithParents (#1011)
* [NOD-1519] Separate BlockBuilder and BlockProcessor

* [NOD-1519] Wire blockBuilder properly + implement buildBlockWithParents

* [NOD-1519] Added testapi package, TestConsensus interface, and TestConsensus factory

* [NOD-1519] Add comments

* [NOD-1519] Separate TestBlockBuilder out of BlockBuilder

* [NOD-1519] TestBlockBuilder should also implement BlockBuilder

* [NOD-1519] Add NewTestConsensus to factory interface
2020-11-09 12:02:42 +02:00
Svarog
2282e36196 [NOD-1522] Add IsEqual to SubnetworkID (#1012)
* [NOD-1519] Add IsEqual to SubnetworkID

* [NOD-1522] Added comment
2020-11-09 10:52:57 +02:00
oudeis
72b5832f30 Update to version 0.8.0 2020-11-09 07:07:30 +00:00
Ori Newman
9a344152aa [NOD-1517] Properly initialize consensus with Genesis block (#1009)
* [NOD-1517] Properly initialize consensus with Genesis block

* [NOD-1517] Remove redundant AddHeaderTip

* [NOD-1517] Don't return nil from dbHash<->DomainHash converters

* [NOD-1517] Use pointer receivers

* [NOD-1517] Use domain block in dagParams

* [NOD-1517] Remove boolean from SelectedTip

* [NOD-1517] Rename hasHeader to isHeadersOnlyBlock

* [NOD-1517] Add comment

* [NOD-1517] Change genesis version

* [NOD-1517] Rename TestNewFactory->TestNewConsensus
2020-11-08 15:17:20 +02:00
Svarog
281944762d [NOD-1500] Glue between domain and application (#1007)
* [NOD-1500] Added Domain type and Constructor

* [NOD-1500] Replaced dag+txpool with domain in flowContext

* [NOD-1500] Replaced dag+txpool with domain in flowContext

* [NOD-1500] Converters: domain objects from/to appmessage

* [NOD-1500] Convert hashes to DomainHashes in appmessages

* [NOD-1500] Remove references to daghash in dagconfig

* [NOD-1500] Fixed all appmessage usages of hashes

* [NOD-1500] Update all RPC to use domain

* [NOD-1500] Big chunk of protocol flows re-wired to domain

* [NOD-1500] Finished re-wiring all protocol flows to new Domain

* [NOD-1500] Fix some mempool and kaspaminer compilation errors

* [NOD-1500] Deleted util/{block,tx,daghash} and dbaccess

* [NOD-1500] util.CoinbaseTransactionIndex -> transactionhelper.CoinbaseTransactionIndex

* [NOD-1500] Fix txsigner

* [NOD-1500] Removed all references to util/subnetworkid

* [NOD-1500] Update RpcGetBlock related messages

* [NOD-1500] Many more compilation fixes

* [NOD-1500] Return full list of missing blocks for orphan resolution

* [NOD-1500] Fixed handshake

* [NOD-1500] Fixed flowcontext compilation

* [NOD-1500] Update users of StartIBDIfRequired to handle error

* [NOD-1500] Removed some more fields from RPC

* [NOD-1500] Fix the getBlockTemplate flow

* [NOD-1500] Fix HandleGetCurrentNetwork

* [NOD-1500] Remove redundant code

* [NOD-1500] Remove obsolete notifications

* [NOD-1500] Split MiningManager and Consensus to separate fields in Domain

* [NOD-1500] Update two wrong references to location of txscript

* [NOD-1500] Added comments

* [NOD-1500] Fix some tests

* [NOD-1500] Removed serialization logic from appmessage

* [NOD-1500] Rename database/serialization/messages.proto to dbobjects.proto

* [NOD-1500] Delete integration tests

* [NOD-1500] Remove txsort

* [NOD-1500] Fix tiny bug

* [NOD-1500] Remove rogue dependancy on bchd

* [NOD-1500] Some stylistic fixes
2020-11-08 11:55:54 +02:00
Ori Newman
e3be67c3c7 [NOD-1516] Implement isBlockInHeaderPruningPointFuture (#1006) 2020-11-05 14:18:50 +02:00
Ori Newman
215ab512cd [NOD-1515] Allow to process pruning point while in SyncStateMissingUTXOSet (#1005) 2020-11-05 12:39:39 +02:00
stasatdaglabs
d0fc728c23 [NOD-1514] Allow full block validation during IBD. (#1004) 2020-11-05 12:18:16 +02:00
stasatdaglabs
1c710daf98 [NOD-1501] Finish implementing GetSyncInfo (#1002)
* [NOD-1501] Rename IsBlockInHeaderPruningPointFutureAndVirtualPast to IsBlockInHeaderPruningPointFuture.

* [NOD-1501] Create syncinfo.go.

* [NOD-1501] Implement resolveSyncState.

* [NOD-1501] Fix ChooseSelectedParent.

* [NOD-1501] Fix merge errors.

* [NOD-1501] Finish implementing getSyncState.

* [NOD-1501] Fix bad equality check.

* [NOD-1501] Fix merge errors.

* [NOD-1501] Pass targetTimePerBlock as int64 milliseconds.
2020-11-05 11:50:59 +02:00
Ori Newman
5566aaf95a [NOD-1512] Implement utxo deserialization (#1003)
* [NOD-1512] Implement UTXO set deserialization

* [NOD-1512] Remove redundant file

* [NOD-1512] Don't use big endian for serialization

* [NOD-1512] Use Read/Write element

* [NOD-1512] Unexport ReadElement

* [NOD-1512] Fix StageVirtualUTXOSet

* [NOD-1512] Get rid of dagParams in consensusStateManager

* [NOD-1512] Get rid of dagParams in consensusStateManager
2020-11-05 10:59:49 +02:00
Ori Newman
52c73d3a08 [NOD-1511] Implement missingBlockBodyHashes (#1000)
* [NOD-1511] Implement missingBlockBodyHashes

* [NOD-1511] Rename selectedparentiterator.go to blockiterator.go

* [NOD-1511] Fix condition

* [NOD-1511] Simplify missingBlocks logic
2020-11-05 10:11:42 +02:00
Ori Newman
baf8d25656 [NOD-1510] Update reachability reindex root based on header tips selected tip (#1001) 2020-11-04 16:55:59 +02:00
Ori Newman
2eb0d946e5 [NOD-1509] Implement StageVirtualUTXOSet (#999)
* [NOD-1509] Implement StageVirtualUTXODiff

* [NOD-1509] Fix HasUTXOByOutpoint, get rid of database.ErrNotFound, and fix errors
2020-11-04 16:30:08 +02:00
Elichai Turkel
ce95c6dc9d [NOD-1464] difficulty refactoring (#986)
* Refactor the Difficulty adjastment to the new design

* Add the necessary things to the factory for the DAA constructor

* Add missing dagParams to difficultymanager constructor

* Use DAGTraversal for blueBlockWindow, and don't store PowMax compactBits
2020-11-04 11:35:29 +02:00
Elichai Turkel
6a46cb2be6 [NOD-1503] Pruning Manager (#994)
* Update pruningmanager interface

* Add a ProtoUTXOSet to hashserialization

* Update miningmanager with all the necessary stores and managers

* Implement mining manager

* Prune P.AC not in V.Past

* PruningManager fix all review comments
2020-11-04 10:29:45 +02:00
Ori Newman
f06dc7ea90 [NOD-1508] Implement VirtualUTXOSetIterator (#998) 2020-11-03 18:38:57 +02:00
Ori Newman
7f2ef708a6 [NOD-1506] Implement SetPruningPointUTXOSet (#996)
* [NOD-1506] Implement SetPruningPointUTXOSet

* [NOD-1506] Rename ErrHeaderlessBlockInIBD->ErrMissingBlockHeaderInIBD

* [NOD-1506] Change virtualHeaderHash
2020-11-03 18:24:45 +02:00
Ori Newman
4ab2e0d498 [NOD-1507] Implement model.DBTransaction (#997) 2020-11-03 18:08:02 +02:00
stasatdaglabs
ca9161024f [NOD-1501] Add SyncManager-related interfaces (#995)
* [NOD-1501] Add logAndMeasureExecutionTime to SyncManager methods.

* [NOD-1501] Implement antiPastHashesBetween.

* [NOD-1501] Implement createBlockLocator.

* [NOD-1501] Implement findNextBlockLocatorBoundaries.

* [NOD-1501] Rename IsBlockHeaderInPruningPointFutureAndVirtualPast to IsBlockInHeaderPruningPointFutureAndVirtualPast.

* [NOD-1501] Add GetSyncInfo.

* [NOD-1501] Make go vet happy.

* [NOD-1501] Rename sync states.

* [NOD-1501] Move maxHashesInGetHashesBetween to antipast.go.

* [NOD-1501] Rename maxHashesInAntiPastHashesBetween.

* [NOD-1501] Implement LowestChainBlockAboveOrEqualToBlueScore.

* [NOD-1501] Fix bad variable name.

* [NOD-1501] Fix LowestChainBlockAboveOrEqualToBlueScore.

* [NOD-1501] Clarify LowestChainBlockAboveOrEqualToBlueScore.
2020-11-03 17:08:52 +02:00
Svarog
8dc246a2a7 [NOD-1498] Consensus State Store (#992)
* [NOD-1420] Start working on ConsensusStateManager. Might be redundant due to recent changes

* [NOD-1420] Convert model to externalapi in utxo_algerbra helpers

* [NOD-1420] Add UTXO-diff algebra

* [NOD-1420] Prepare skeleton of calculateAcceptanceDataAndMultiset

* [NOD-1420] Added skeleton for AddBlockToVirtual

* [NOD-1420] Implement PopulateTransactionWithUTXOEntries

* [NOD-1420] Implement restorePastUTXO

* [NOD-1420] Implement finality check

* [NOD-1420] Move handling of tips to consensusStateManager

* [NOD-1420] Implement calculateAcceptanceDataAndMultiset

* [NOD-1420] Start implementing resolveBlockStatus

* [NOD-1420] Implement resolveBlockStatus

* [NOD-1420] Update related fields in end of resolveSingleBlockStatus

* [NOD-1420] Start working on selectVirtualParents

* [NOD-1420] Implemented BlockHeap

* [NOD-1420] Implement selectVirtualParents

* [NOD-1420] Implement updateVirtual

* [NOD-1420] Added comments where they were missing

* [NOD-1420] Place all consensusStateManager functions in correct files

* [NOD-1420] Return the missing outpoints from populateTransactionWithUTXOEntriesFromVirtualOrDiff

* [NOD-1420] Outpoint.ID -> TransactionID

* [NOD-1420] Fix Stringer tests

* [NOD-1420] Copy hash.FromString into utils

* [NOD-1420] SetParents should return an error

* [NOD-1420] Remove all reachabilityManager references from consensusStateManager

* [NOD-1420] Remove VirtualData. Get the info from the stores where needed

* [NOD-1420] Invert parameters to IsAncestorOf

* [NOD-1420] Use model.AcceptanceData

* [NOD-1420] Don't return accumulatedMassBefore in error cases

* [NOD-1420] Don't expect store functions to return nil when the requested data was found - instead add HasXXX functions

* [NOD-1420] addTransactionToMultiset sets isCoinbase properly

* [NOD-1420] expected hash string length is externalapi.DomainHashSize * 2

* [NOD-1420] Rename reachabilityTree -> reachabilityManager + updateReindexRoot if isNextVirtualSelectedParent

* [NOD-1420] ValidateCoinbaseTransaction in csm.verifyAndBuildUTXO

* [NOD-1420] Re-write HAsUTXODiffChild

* [NOD-1420] delete past_utxo.go.bak

* [NOD-1420] Implement validateCoinbaseTransaction in CSM

* [NOD-1420] Imlemented missing functionality in ValidateTransactionAndPopulateWithConsensusData

* [NOD-1420] Moved merge depth logic to MergeDepthManager

* [NOD-1420] Add logs

* [NOD-1498] Implement tips-related methods of consensusStateStore

* [NOD-1498] Implement consensusStateStore virtualDiffParents functionality

* [NOD-1498] Implement ConsensusStateStore UTXO-Set part

* [NOD-1498] Implement rest of consensusStateStore methods

* [NOD-1498] Use io.ReadFull instead of r.Read

* [NOD-1498] Added comments

* [NOD-1498] Move utxo serialization to protobufs

* [NOD-1498] Add comments

* [NOD-1498] Minor fixes in ConsensusStateStore

* [NOD-1498] Use empty bucket key + simplify serializeUTXOEntry
2020-11-03 14:42:26 +02:00
Ori Newman
c7f2de73df [NOD-1502] Implement RestorePastUTXOSetIterator (#993)
* [NOD-1502] Implement RestorePastUTXOSetIterator

* [NOD-1502] Rename newUtxoSetIterator->newUTXOSetIterator
2020-11-02 18:36:55 +02:00
Ori Newman
3f979399b1 [NOD-1478] Implement coinbase manager (#990)
* [NOD-1478] Implement coinbase manager

* [NOD-1478] Add arguments to factory

* [NOD-1478] Remove validation functions from CoinbaseManager

* [NOD-1478] Remove ValidateCoinbaseTransactionInContext

* [NOD-1478] Add consts to constants package

* [NOD-1478] Move scriptPublicKeyMaxLength to constants.go
2020-11-02 16:51:47 +02:00
Ori Newman
2abd4a274b [NOD-1496] Implement headers only verification (#987)
* [NOD-1496] Implement headers only verification

* [NOD-1496] Add checkParentsExist

* [NOD-1496] Stage block statuses in block processor

* [NOD-1496] Rename AddBlock->AddHeaderTip

* [NOD-1496] Return early from validateAndInsertBlock on header only and put ValidateProofOfWorkAndDifficulty inside validateBlock
2020-11-02 16:30:59 +02:00
Svarog
c5707f64dc [NOD-1420] Implement consensusStateManager (#985)
* [NOD-1420] Start working on ConsensusStateManager. Might be redundant due to recent changes

* [NOD-1420] Convert model to externalapi in utxo_algerbra helpers

* [NOD-1420] Add UTXO-diff algebra

* [NOD-1420] Prepare skeleton of calculateAcceptanceDataAndMultiset

* [NOD-1420] Added skeleton for AddBlockToVirtual

* [NOD-1420] Implement PopulateTransactionWithUTXOEntries

* [NOD-1420] Implement restorePastUTXO

* [NOD-1420] Implement finality check

* [NOD-1420] Move handling of tips to consensusStateManager

* [NOD-1420] Implement calculateAcceptanceDataAndMultiset

* [NOD-1420] Start implementing resolveBlockStatus

* [NOD-1420] Implement resolveBlockStatus

* [NOD-1420] Update related fields in end of resolveSingleBlockStatus

* [NOD-1420] Start working on selectVirtualParents

* [NOD-1420] Implemented BlockHeap

* [NOD-1420] Implement selectVirtualParents

* [NOD-1420] Implement updateVirtual

* [NOD-1420] Added comments where they were missing

* [NOD-1420] Place all consensusStateManager functions in correct files

* [NOD-1420] Return the missing outpoints from populateTransactionWithUTXOEntriesFromVirtualOrDiff

* [NOD-1420] Outpoint.ID -> TransactionID

* [NOD-1420] Fix Stringer tests

* [NOD-1420] Copy hash.FromString into utils

* [NOD-1420] SetParents should return an error

* [NOD-1420] Remove all reachabilityManager references from consensusStateManager

* [NOD-1420] Remove VirtualData. Get the info from the stores where needed

* [NOD-1420] Invert parameters to IsAncestorOf

* [NOD-1420] Use model.AcceptanceData

* [NOD-1420] Don't return accumulatedMassBefore in error cases

* [NOD-1420] Don't expect store functions to return nil when the requested data was found - instead add HasXXX functions

* [NOD-1420] addTransactionToMultiset sets isCoinbase properly

* [NOD-1420] expected hash string length is externalapi.DomainHashSize * 2

* [NOD-1420] Rename reachabilityTree -> reachabilityManager + updateReindexRoot if isNextVirtualSelectedParent

* [NOD-1420] ValidateCoinbaseTransaction in csm.verifyAndBuildUTXO

* [NOD-1420] Re-write HAsUTXODiffChild

* [NOD-1420] delete past_utxo.go.bak

* [NOD-1420] Implement validateCoinbaseTransaction in CSM

* [NOD-1420] Imlemented missing functionality in ValidateTransactionAndPopulateWithConsensusData

* [NOD-1420] Moved merge depth logic to MergeDepthManager

* [NOD-1420] Add logs
2020-11-02 16:18:53 +02:00
stasatdaglabs
62bb841e89 [NOD-1497] Add additional methods to consensus' external API (#991)
* [NOD-1497] Add missing APIs.

* [NOD-1497] Rename some new APIs.

* [NOD-1497] Implement getBlock and getBlockHeader.

* [NOD-1497] Implement getPruningPointUTXOSet.

* [NOD-1497] Implement getSelectedParent.

* [NOD-1497] Implement getBlockInfo.

* [NOD-1497] Fix merge errors.

* [NOD-1497] Implement syncManager-related functions in consensus.

* [NOD-1497] Implement SetPruningPointUTXOSet in consensus.

* [NOD-1497] Add dependency from syncManager to dagTraversalManager.

* [NOD-1497] Move IsBlockHeaderInPruningPointFutureAndVirtualPast to syncManager.

* [NOD-1497] Rename lowHigh to lowHash.
2020-11-02 13:24:03 +02:00
stasatdaglabs
23cccb6396 [NOD-1497] Add additional methods to consensus external API (#989)
* [NOD-1497] Add missing APIs.

* [NOD-1497] Rename some new APIs.

* [NOD-1497] Add fields to BlockInfo.

* [NOD-1497] Add comments over BlockInfo and BlockLocator.

* [NOD-1497] Rename GetSelectedParent to GetVirtualSelectedParent.

* [NOD-1497] Add SetPruningPointUTXOSet.

* [NOD-1497] Rename GetHashesAbovePruningPoint to GetMissingBlockBodyHashes.

* [NOD-1497] Fix rename error.
2020-11-02 12:05:33 +02:00
Elichai Turkel
87ad9dfc59 [NOD-1423] Refactor the miner and mempool (#981)
* Make TransactionOutputEstimatedSerializedSize public

* Update the mempool interface

* Refactor the mempool to the new design

* refactor txselection and blocktemplatebuilder to the new design

* Update the mining manager

* Update the MiningManager factory

* mempool fix requested changed
2020-11-01 18:27:49 +02:00
Ori Newman
c59adaa4db [NOD-1494] Remove transactions from data store delete (#984)
* [NOD-1494] Remove transactions from data store delete

* [NOD-1494] Remove redundant underscores
2020-10-29 18:15:11 +02:00
Ori Newman
14fbe50636 [NOD-1493] Implement serialization in data stores (#983)
* [NOD-1493] Implement serialization in data stores

* [NOD-1493] Remove redundant functions

* [NOD-1493] Use bluesAnticoneSizesToDBBluesAnticoneSizes inside BlockGHOSTDAGDataToDBBlockGHOSTDAGData
2020-10-29 17:39:35 +02:00
Elichai Turkel
d3ede3a46f Add new ErrMissingTxOut and ErrInvalidTransactionsInNewBlock errors (#972)
* Add new ErrMissingTxOut error

* Add tests for ruleError wrapping

* Update consensus to use new ErrMissingTxOut type where appropriate

* Add new ErrInvalidTransactionsInNewBlock error

* Add wrapping tests for ErrInvalidTransactionsInNewBlock

* Fix Review suggestions

* Fix broken serialization(add pointer redirection)
2020-10-29 16:59:00 +02:00
stasatdaglabs
01c7c67aed [NOD-1493] Implement serialization in AcceptanceDataStore, BlockRelationStore, BlockStatusStore, and BlockStore (#982)
* [NOD-1493] Add DbHashToDomainHash and DomainHashToDbHash.

* [NOD-1493] Use DbHashToDomainHash and DomainHashToDbHash.

* [NOD-1493] Begin implementing serializeAcceptanceData.

* [NOD-1493] Extract serialization blockHeader logic to serialization.

* [NOD-1493] Extract serialization acceptance data logic to serialization.

* [NOD-1493] Implement acceptance data serialization/deserialization.

* [NOD-1493] Implement transaction serialization/deserialization.

* [NOD-1493] Implement outpoint serialization/deserialization.

* [NOD-1493] Implement transaction ID serialization/deserialization.

* [NOD-1493] Implement subnetwork ID serialization/deserialization.

* [NOD-1493] Implement block relation serialization/deserialization.

* [NOD-1493] Implement block status serialization/deserialization.

* [NOD-1493] Implement block serialization/deserialization.

* [NOD-1493] Implement serialization/deserialization in BlockRelationStore.

* [NOD-1493] Implement serialization/deserialization in BlockStatusStore.

* [NOD-1493] Implement serialization/deserialization in BlockStore.

* [NOD-1493] Make go vet happy.

* [NOD-1493] Use DomainHashesToDbHashes.
2020-10-29 16:49:05 +02:00
Elichai Turkel
971d50b684 [NOD-1418] Implement DAG Traversal (#953)
* Implement DAG Traversal

* Update the DAGTraversalManager interface
2020-10-29 16:48:41 +02:00
stasatdaglabs
9cf1557c37 [NOD-1493] Implement types for serialization (#980)
* [NOD-1493] Add DbAcceptanceData.

* [NOD-1493] Add DbBlockRelations.

* [NOD-1493] Add DbBlockStatus.

* [NOD-1493] Add DbBlockGhostdagData.

* [NOD-1493] Add DbMultiset.

* [NOD-1493] Add DbPruningPoint.

* [NOD-1493] Add DbUtxoSet.

* [NOD-1493] Add DbReachabilityData.

* [NOD-1493] Add DbReachabilityReindexRoot.

* [NOD-1493] Add DbUtxoDiff.

* [NOD-1493] Add DbUtxoDiffChild.

* [NOD-1493] Make sure everything is lowercase.

* [NOD-1493] Add DbHash.

* [NOD-1493] Fix BlockHeaderStore.
2020-10-29 12:18:18 +02:00
stasatdaglabs
126e2e49bb [NOD-1493] Implement serialization/deserialization inside BlockHeaderStore (#979)
* [NOD-1492] Rename dbmanager to database.

* [NOD-1492] Write messages.proto for DbBlock and DbTransaction.

* [NOD-1492] Implement serializeHeader.

* [NOD-1492] Implement deserializeHeader.
2020-10-29 11:15:14 +02:00
stasatdaglabs
c88266afed [NOD-1492] Implement GHOSTDAGDataStore, MultisetStore, PruningStore, ReachabilityDataStore, and UTXODiffStore (#977)
* [NOD-1492] Implement GHOSTDAGDataStore.

* [NOD-1492] Implement MultisetStore.

* [NOD-1492] Implement PruningStore.

* [NOD-1492] Implement ReachabilityDataStore.

* [NOD-1492] Implement UTXODiffStore.

* [NOD-1492] Pluralize the multiset bucket name.

* [NOD-1492] In PruningPoint and PruningPointSerializedUTXOSet, don't use IsStaged.

* [NOD-1492] Leave pruning point serialization/deserialization for future implementation.

* [NOD-1492] Leave reachability reindex root serialization/deserialization for future implementation.

* [NOD-1492] Leave utxo diff child serialization/deserialization for future implementation.

* [NOD-1492] Add Serialize() to Multiset.

* [NOD-1492] Also check serializedUTXOSetStaging in IsStaged.

* [NOD-1492] Also check utxoDiffChildStaging in IsStaged.

* [NOD-1492] Fix UTXODiffStore.Delete.
2020-10-28 17:13:14 +02:00
Ori Newman
7402f3fb0e [NOD-1492] Implement some data stores (#978)
* [NOD-1492] Implement some data stores

* [NOD-1492] Remove pointers to acceptance data

* [NOD-1492] Fix receiver names

* [NOD-1492] Implement delete for acceptanceDataStore

* [NOD-1492] In blockRelationStore rename IsAnythingStaged to IsStaged

* [NOD-1492] Rename bucket name
2020-10-28 16:30:55 +02:00
Ori Newman
eae8bce941 [NOD-1491] Implement block headers store (#976)
* [NOD-1491] Implement block headers store

* [NOD-1491] Don't commit transaction and delete from staging too
2020-10-28 14:34:00 +02:00
Ori Newman
8c0275421a [NOD-1490] Implement database manager (#975) 2020-10-28 12:59:34 +02:00
Ori Newman
a436b30ebf [NOD-1417] Implement reachability (#964)
* [NOD-1417] Implement reachability

* [NOD-1417] Rename package name

* [NOD-1417] Add UpdateReindexRoot to interface api

* [NOD-1417] Remove redundant type

* [NOD-1417] Rename reachabilityTreeManager/reachabilityTree to reachabilityManager

* [NOD-1417] Fix typo

* [NOD-1417] Remove redundant copyright message

* [NOD-1417] Fix comment
2020-10-28 12:19:50 +02:00
Ori Newman
a132f55302 [NOD-1477] Add selected parent to merge set (#967)
* [NOD-1477] Add selected parent to merge set

* [NOD-1469] Init BluesAnticoneSizes

* [NOD-1477] Undo changes in hash comparison
2020-10-28 11:44:08 +02:00
Ori Newman
be56fb7e8b [NOD-1488] Get rid of dbaccess (#973)
* [NOD-1488] Get rid of dbaccess

* [NOD-1488] Rename dbwrapper to dbmanager

* [NOD-1488] Create DBWriter interface

* [NOD-1488] Fix block header store

* [NOD-1488] Rename dbwrapper.go to dbmanager.go
2020-10-28 11:34:06 +02:00
stasatdaglabs
4fbe130592 [NOD-1489] Add BlockHeaderStore (#974)
* [NOD-1489] Add BlockHeaderStore.

* [NOD-1489] Use BlockHeaderStore.
2020-10-28 10:35:18 +02:00
Ori Newman
ed6d8243ef [NOD-1487] Implement dagtopology's IsAncestorOfAny and IsInSelectedParentChainOf (#971)
* [NOD-1487] Implement dagtopology's IsAncestorOfAny and IsInSelectedParentChainOf

* [NOD-1487] Fix IsInSelectedParentChainOf to use reachabilityTree
2020-10-27 17:46:30 +02:00
Ori Newman
03790ad8a2 [NOD-1469] Implement past median time (#968)
* [NOD-1469] Implement past median time

* [NOD-1469] Move BlueWindow to DAGTraversalManager
2020-10-27 17:45:47 +02:00
stasatdaglabs
97b5b0b875 [NOD-1416] Implement BlockProcessor. (#969)
* [NOD-1416] Add entry/exit logs to all the functions.

* [NOD-1416] Build some scaffolding inside BlockProcessor.

* [NOD-1416] Implement selectParentsForNewBlock.

* [NOD-1416] Implement validateBlock.

* [NOD-1476] Fix merge errors.

* [NOD-1416] Move buildBlock and validateAndInsertBlock to separate files.

* [NOD-1416] Begin implementing buildBlock.

* [NOD-1416] Implement newBlockDifficulty.

* [NOD-1416] Add skeletons for the rest of the buildBlock functions.

* [NOD-1416] Implement newBlockUTXOCommitment.

* [NOD-1416] Implement newBlockAcceptedIDMerkleRoot.

* [NOD-1416] Implement newBlockHashMerkleRoot.

* [NOD-1416] Fix bad function call.

* [NOD-1416] Implement validateHeaderAndProofOfWork and validateBody.

* [NOD-1416] Use ValidateProofOfWorkAndDifficulty.

* [NOD-1416] Finish validateAndInsertBlock.

* [NOD-1416] Implement newBlockHashMerkleRoot.

* [NOD-1416] Implement newBlockAcceptedIDMerkleRoot.

* [NOD-1416] Fix a comment.

* [NOD-1416] Implement newBlockCoinbaseTransaction.

* [NOD-1416] Add VirtualBlockHash.

* [NOD-1416] Add ParentHashes and SelectedParent to VirtualData().

* [NOD-1416] Make go vet happy.

* [NOD-1416] Implement discardAllChanges.

* [NOD-1416] Implement commitAllChanges.

* [NOD-1416] Fix factory.

* [NOD-1416] Make go vet happy.

* [NOD-1416] Format factory.

* [NOD-1416] Pass transactionsWithCoinbase to buildHeader.

* [NOD-1416] Call VirtualData() from buildHeader.

* [NOD-1416] Fix a typo.

* [NOD-1416] Fix in-out-of-context/header-body confusion.

* [NOD-1416] Extract LogAndMeasureExecutionTime.

* [NOD-1416] Add a comment about LogAndMeasureExecutionTime.

* [NOD-1416] Simplify discardAllChanges and commitAllChanges.

* [NOD-1416] If in-context validations fail, discard all changes and store the block with StatusInvalid.

* [NOD-1416] Add a comment above Store.

* [NOD-1416] Use errors.As instead of errors.Is.
2020-10-27 17:24:15 +02:00
Ori Newman
f62183473c [NOD-1486] Make coinbase mass and size 0 (#970) 2020-10-27 17:14:02 +02:00
Ori Newman
aeb4b96560 [NOD-1451] Implement Validators (#966)
* [NOD-1451] Implement block validator

* [NOD-1451] Implement block validator

* [NOD-1451] Fix merge errors

* [NOD-1451] Implement block validator

* [NOD-1451] Implement checkTransactionInIsolation

* [NOD-1451] Copy txscript to validator

* [NOD-1451] Change txscript to new design

* [NOD-1451] Add checkTransactionInContext

* [NOD-1451] Add checkBlockSize

* [NOD-1451] Add error handling

* [NOD-1451] Implement checkTransactionInContext

* [NOD-1451] Add checkTransactionMass placeholder

* [NOD-1451] Finish validators

* [NOD-1451] Add comments and stringers

* [NOD-1451] Return model.TransactionValidator interface

* [NOD-1451] Premake rule errors for each "code"

* [NOD-1451] Populate transaction mass

* [NOD-1451] Renmae functions

* [NOD-1451] Always use skipPow=false

* [NOD-1451] Renames

* [NOD-1451] Remove redundant types from WriteElement

* [NOD-1451] Fix error message

* [NOD-1451] Add checkTransactionPayload

* [NOD-1451] Add ValidateProofOfWorkAndDifficulty to block validator interface

* [NOD-1451] Move stringers to model

* [NOD-1451] Fix error message
2020-10-26 17:33:39 +02:00
stasatdaglabs
b413760136 [NOD-1476] Make further design changes (#965)
* [NOD-1476] Add dependency to BlockRelationStore in BlockProcessor.

* [NOD-1476] Add dependency to BlockStatusStore in BlockValidator.

* [NOD-1476] Add dependency to GHOSTDAGManager in BlockValidator.

* [NOD-1476] Rename CalculateConsensusStateChanges to AddBlockToVirtual.

* [NOD-1476] Remove RestoreDiffFromVirtual.

* [NOD-1476] Remove RestorePastUTXOSet.

* [NOD-1476] Add dependency to GHOSTDAGDataStore in ConsensusStateManager.

* [NOD-1476] Rename CalculateAcceptanceDataAndUTXOMultiset to just CalculateAcceptanceData.

* [NOD-1476] Remove UTXODiffManager and add dependencies to AcceptanceManager.

* [NOD-1476] Rename CalculateAcceptanceData to CalculateAcceptanceDataAndMultiset.

* [NOD-1476] Add dependency to DAGTopologyManager from ConsensusStateManager.

* [NOD-1476] Add dependency to BlockStore from ConsensusStateManager.

* [NOD-1476] Add dependency to PruningManager from ConsensusStateManager.

* [NOD-1476] Remove unnecessary stuff from ConsensusStateChanges.

* [NOD-1476] Add dependency to UTXODiffStore from ConsensusStateManager.

* [NOD-1476] Add tips to BlockRelationsStore.

* [NOD-1476] Add dependency to BlockRelationsStore from ConsensusStateManager.

* [NOD-1476] Remove Tips() from ConsensusStateStore.

* [NOD-1476] Remove acceptanceManager.

* [NOD-1476] Remove irrelevant functions out of ConsensusStateManager.
2020-10-25 15:19:20 +02:00
stasatdaglabs
45882343e6 [NOD-1475] Implement stage/discard/commit functionality for data structures (#962)
* [NOD-1475] Add Stage, Discard, and Commit methods to all stores.

* [NOD-1475] Simplify interfaces for processes.

* [NOD-1475] Fix GHOSTDAGManager.

* [NOD-1475] Simplify ChooseSelectedParent.

* [NOD-1475] Remove errors from Stage functions.

* [NOD-1475] Add IsStaged to all data structures.

* [NOD-1475] Remove isDisqualified from CalculateConsensusStateChanges.

* [NOD-1475] Add dependency from ConsensusStateManager to BlockStatusStore.

* [NOD-1475] Fix a comment.

* [NOD-1475] Add ReachabilityReindexRoot to reachabilityDataStore.

* [NOD-1475] Fix a comment.

* [NOD-1475] Rename IsStaged to IsAnythingStaged.
2020-10-21 12:37:22 +03:00
Svarog
4c1f24da82 [NOD-1466] Move UTXODiffStore from ConsensusStateManager to UTXODiffManager (#961) 2020-10-21 10:19:41 +03:00
stasatdaglabs
8c63835971 [NOD-1461] Make further design changes (#959)
* [NOD-1461] Split blockValidator and TransactionValidator.

* [NOD-1461] Remove feeDataStore.

* [NOD-1461] Move tips out of ConsensusStateManager and into DAGTopologyManager.

* [NOD-1461] Add UTXODiffManager.

* [NOD-1461] Add RestoreDiffFromVirtual.

* [NOD-1461] Add AcceptanceManager.

* [NOD-1461] Replace SetTips with AddTip.

* [NOD-1461] Fix merge errors.

* [NOD-1461] Rename CoinbaseData to DomainCoinbaseData.
2020-10-20 09:35:58 +03:00
stasatdaglabs
a96a5fd2ef [NOD-1462] Simplify consensus external API (#958)
* [NOD-1461] Change the external api interface to not having anything besides DomainTransactions and DomainBlocks.

* [NOD-1462] Move external api types to a separate package.

* [NOD-1462] Clarify which model we're using in miningmanager.

* [NOD-1462] Extract coinbase data to its own struct.

* [NOD-1462] Add a comment above CoinbaseData.

* [NOD-1462] Fix the comment above CoinbaseData.
2020-10-19 17:59:04 +03:00
stasatdaglabs
9a62fae012 [NOD-1458] Rename blockRelationStore.Insert to Update. 2020-10-18 17:34:49 +03:00
stasatdaglabs
81a10e9f89 [NOD-1458] Make further design changes (#956)
* [NOD-1458] Rename RestoreUTXOSet to RestorePastUTXOSet.

* [NOD-1458] Make CalculateAcceptanceDataAndMultiset take BlockGHOSTDAGData and nothing else.

* [NOD-1458] Make ConsensusStateStore's Update take ConsensusStateChanges instead of just UTXODiff.

* [NOD-1458] Add Tips() to ConsensusStateStore.

* [NOD-1458] Make all implementation structs private.

* [NOD-1458] Remove BlockAtDepth and add highHash to ChainBlockAtBlueScore.

* [NOD-1458] Rename CalculateAcceptanceDataAndMultiset to CalculateAcceptanceDataAndUTXOMultiset.

* [NOD-1458] Add a dependency to GHOSTDAGManager from ConsensusStateManager.

* [NOD-1458] Add ChooseSelectedParent to GHOSTDAGManager.

* [NOD-1458] Add DifficultyManager.

* [NOD-1458] Add PastMedianTimeManager.

* [NOD-1458] Add Hash() to Multiset.

* [NOD-1458] Add a dependency to ghostdagManager from blockProcessor.

* [NOD-1458] Add errors to all interfaces that need them.

* [NOD-1458] Uppercasify types in comments.

* [NOD-1458] Fix a bad comment.

* [NOD-1458] Fix a comment.

* [NOD-1458] Rename ChainBlockAtBlueScore to HighestChainBlockBelowBlueScore.

* [NOD-1458] Replace BlockAndTransactionValidator with an anonymous interface.
2020-10-18 12:34:00 +03:00
stasatdaglabs
db475bd511 [NOD-1460] Make the miningmanager package structure similar to consensus package's (#957)
* [NOD-1460] Move the miningmanager interfaces into its model package.

* [NOD-1460] Decouple miningmanager model from appmessage.

* [NOD-1460] Decouple miningmanager model from util.

* [NOD-1460] Make miningmanager implementation structs unexported.
2020-10-18 10:52:41 +03:00
Ori Newman
eef5f27a87 [NOD-1422] Implement GHOSTDAG (#950)
* [NOD-1422] Implement GHOSTDAG

* [NOD-1422] Rename bluest->findSelectedParent

* [NOD-1422] Remove preallocations from MergeSetBlues and add preallocation in candidateBluesAnticoneSizes

* [NOD-1422] Rename blockghostdagdata.go to ghostdag.go
2020-10-14 16:47:04 +03:00
Svarog
790dc74581 [NOD-1457] Pass DomainDBContext to all constructors, instead of passing a general dbContext (#955)
* [NOD-1457] Pass DomainDBContext to all constructors, instead of passing a general dbContext

* [NOD-1457] Add NewTx to DomainDBContext

* [NOD-1457] Added comment
2020-10-14 09:59:27 +03:00
stasatdaglabs
4f36accd81 [NOD-1413] Make some additional interface changes (#954)
* [NOD-1413] Remove /cmd/addblock

* [NOD-1413] Define and implement TransactionValidator.

* [NOD-1413] Make changes to ConsensusStateManager's interface.

* [NOD-1413] Make changes to PruningManager's interface.

* [NOD-1413] Make changes to DAGTraversalManager's interface.

* [NOD-1413] Make changes to MultisetStore's interface.

* [NOD-1413] Make changes to UTXODiffStore's interface.

* [NOD-1413] Make changes to UTXODiffStore's interface harder.

* [NOD-1413] Make changes to AcceptanceDataStore's interface harder.

* [NOD-1413] Make changes to PruningStore's interface.

* [NOD-1413] Delete BlockIndex.

* [NOD-1413] Add FeeDataStore.

* [NOD-1413] Update BlockMessageStore's interface.

* [NOD-1413] Fix interface violations.

* [NOD-1413] Add FeeDataStore to BlockProcessor.

* [NOD-1413] Make go vet happy.

* [NOD-1413] Add missing fields to ConsensusStateChanges.

* [NOD-1413] Add another missing field to ConsensusStateChanges.

* [NOD-1413] Add a reference to blockStore in consensusStateManager.

* [NOD-1413] Add missing methods to UTXODiffStore.

* [NOD-1413] Rename pruningPointStore to pruningStore everywhere.

* [NOD-1413] Remove superfluous parameters from CalculateConsensusStateChanges.

* [NOD-1413] Add missing dependencies to PruningManager.

* [NOD-1413] Remove implementation-y functions from TransactionValidator's interface.

* [NOD-1413] Make go vet happy.

* [NOD-1413] Add a couple of methods to DAGTopologyManager.

* [NOD-1413] Fix a typo in a file name.

* [NOD-1413] Remove non-interface functions from Validator.
2020-10-13 17:55:31 +03:00
stasatdaglabs
04ead57731 [NOD-1413] Remove /cmd/addblock (#951) 2020-10-12 13:23:19 +03:00
stasatdaglabs
e9951bc34a [NOD-1413] Decouple the model package from everything (#949)
* [NOD-1416] Move processes/datastructures interfaces into the model package.

* [NOD-1416] Decouple the model from dbaccess.

* [NOD-1413] Implement DomainBlock and DomainTransaction.

* [NOD-1413] Decouple model from appmessage.

* [NOD-1413] Decouple model from util.

* [NOD-1413] Decouple model from subnetworkid.

* [NOD-1413] Remove an unused const.

* [NOD-1413] Add DomainHash and DomainTransactionID.

* [NOD-1413] Decouple model from daghash.

* [NOD-1413] Decouple model from mstime.

* [NOD-1413] Decouple model from go-secp256k1.

* [NOD-1413] Add a proxy over dbaccess.

* [NOD-1413] Add comments over all added types.

* [NOD-1413] Fix a comment.

* [NOD-1413] Get rid of DomainTime.

* [NOD-1413] Simplify BlockGHOSTDAGData.
2020-10-11 14:32:41 +03:00
Ori Newman
74d13e271e [NOD-1419] Implement DAG topology (#948)
* [NOD-1419] Implement DAG topology

* [NOD-1419] Add isHashInSlice
2020-10-08 14:00:25 +03:00
stasatdaglabs
9181481fc8 [NOD-1413] Remove Handlers from Consensus (#947)
* [NOD-1413] Remove Handlers from Consensus.

* [NOD-1413] Remove ResolveFinalityConflicts.
2020-10-06 16:35:47 +03:00
Ori Newman
62ddd8fe1c [NOD-1444] Implement getHeaders RPC command (#944)
* [NOD-1444] Implement getHeaders RPC command

* [NOD-1444] Fix tests and comments

* [NOD-1444] Fix error message

* [NOD-1444] Make GetHeaders propagate header serialization errors

* [NOD-1444] RLock the dag on GetHeaders

* [NOD-1444] Change the error field number to 1000
2020-10-06 11:18:31 +03:00
stasatdaglabs
7891f73cb0 [NOD-1414] Write domain interfaces and stub implementations for the new kaspadstate architecture (#941)
* [NOD-1414] Add interfaces for Factory and State.

* [NOD-1414] Create interfaces for algorithms and data stores.

* [NOD-1414] Create empty implementations for algorithms and data stores.

* [NOD-1414] Add new functions for all the implementations.

* [NOD-1414] Begin filling in the interfaces.

* [NOD-1414] Fill in the interfaces for the data structures.

* [NOD-1414] Fill in the interfaces for the algorithms.

* [NOD-1414] Fix a bug in package names.

* [NOD-1414] Connect up the various interfaces.

* [NOD-1414] Add stubs to all the implementations.

* [NOD-1414] Create MiningManager and its Factory.

* [NOD-1414] Add interfaces for mempool and blockTemplateBuilder.

* [NOD-1414] Add implementation structs for miningManager.

* [NOD-1414] Add stub implementations for mempool and blockTemplateBuilder.

* [NOD-1414] Rename state to kaspadState.

* [NOD-1414] Restructure where interfaces sit.

* [NOD-1414] Restructure where interfaces sit in the algorithms package as well.

* [NOD-1414] Move remaining models out of models.go.

* [NOD-1414] Modified some interfaces.

* [NOD-1414] Make go vet happy.

* [NOD-1414] Move SerializedUTXOSet into PruningManager.

* [NOD-1414] Modify FindNextPruningPoint to return found and nextPruningPointUTXOSet.

* [NOD-1414] Add IsDAGAncestorOf.

* [NOD-1414] Add PruningPoint().

* [NOD-1414] Add Entry() to ReadOnlyUTXOSet.

* [NOD-1414] Add MergeSet() to BlockGHOSTDAGData.

* [NOD-1414] Write comments for all the exported types and functions in miningmanager.

* [NOD-1414] Add comments to the upper levels of KaspadState.

* [NOD-1414] Replace AddNode with ReachabilityChangeset.

* [NOD-1414] Add payAddress and extraData to GetBlockTemplate.

* [NOD-1414] Add scriptPublicKey and extraData to BuildBlock.

* [NOD-1414] Rename algorithms to processes.

* [NOD-1414] Rename kaspadState to consensus.

* [NOD-1414] Add ValidateAgainstPastUTXO and ValidateFinality.

* [NOD-1414] Add BlockGHOSTDAGData to ReachabilityChangeset.

* [NOD-1414] Fix the comment over Mempool.

* [NOD-1414] Fix the comment over ValidateTransaction.

* [NOD-1414] Fill up the data structures.

* [NOD-1414] Add comments to remaining uncommented items in miningmanager.

* [NOD-1414] Add comments to structs and constructors.

* [NOD-1414] Rename Set to Insert.

* [NOD-1414] Add comments to everything inside datastructures.

* [NOD-1414] Add comments to everything inside models.

* [NOD-1414] Add comments to the interfaces in processes.

* [NOD-1414] Add comments to everything in processes.

* [NOD-1414] Make go vet happy.

* [NOD-1414] Rename scriptPublicKey to coinbaseScriptPublicKey.

* [NOD-1414] Add handlers to the consensus.

* [NOD-1414] Add highHash to blockAtDepth.

* [NOD-1414] Add resolveFinalityConflict.

* [NOD-1414] Reorg BlockValidator.

* [NOD-1414] In ResolveFinalityConflicts, rename blockHash to newFinalityBlockHash.

* [NOD-1414] Fix a comment.

* [NOD-1414] Make reachability structs public.

* [NOD-1414] Make UTXO structs public.
2020-10-06 10:34:04 +03:00
Yuval Shaul
a361d62945 Merge remote-tracking branch 'origin/v0.6.2-dev' 2020-08-16 13:51:25 +03:00
Yuval Shaul
aac173ed72 Merge remote-tracking branch 'origin/v0.6.1-dev' 2020-08-12 10:48:51 +03:00
stasatdaglabs
5f3fb0bf9f [NOD-1238] Fix acceptance index never being initialized. (#859) 2020-08-11 12:04:54 +03:00
Mike Zak
61f383a713 Merge remote-tracking branch 'origin/v0.6.0-dev' 2020-08-09 09:09:27 +03:00
Mike Zak
c62bdb2fa1 Merge remote-tracking branch 'origin/v0.5.0-dev' 2020-07-01 15:05:02 +03:00
Svarog
c88869778d [NOD-869] Add a print after os.Exit(1) to see if it is ever called (#701) 2020-04-22 11:37:30 +03:00
Ori Newman
3fd647b291 [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer (#700)
* [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer

* [NOD-858] SetShouldSendBlockLocator(false) on OnBlockLocator

* [NOD-858] Rename shouldSendBlockLocator->wasBlockLocatorRequested

* [NOD-858] Move panic to shouldReplaceSyncPeer
2020-04-13 15:49:46 +03:00
Mike Zak
2f255952b7 Updated to version v0.3.1 2020-04-13 15:10:27 +03:00
780 changed files with 82783 additions and 47824 deletions

7
.codecov.yml Normal file
View File

@@ -0,0 +1,7 @@
coverage:
status:
patch: off
project:
default:
informational: true

196
.github/workflows/SetPageFileSize.ps1 vendored Normal file
View File

@@ -0,0 +1,196 @@
<#
# MIT License (MIT) Copyright (c) 2020 Maxim Lobanov and contributors
# Source: https://github.com/al-cheb/configure-pagefile-action/blob/master/scripts/SetPageFileSize.ps1
.SYNOPSIS
Configure Pagefile on Windows machine
.NOTES
Author: Aleksandr Chebotov
.EXAMPLE
SetPageFileSize.ps1 -MinimumSize 4GB -MaximumSize 8GB -DiskRoot "D:"
#>
param(
[System.UInt64] $MinimumSize = 8gb ,
[System.UInt64] $MaximumSize = 8gb ,
[System.String] $DiskRoot = "D:"
)
# https://referencesource.microsoft.com/#System.IdentityModel/System/IdentityModel/NativeMethods.cs,619688d876febbe1
# https://www.geoffchappell.com/studies/windows/km/ntoskrnl/api/mm/modwrite/create.htm
# https://referencesource.microsoft.com/#mscorlib/microsoft/win32/safehandles/safefilehandle.cs,9b08210f3be75520
# https://referencesource.microsoft.com/#mscorlib/system/security/principal/tokenaccesslevels.cs,6eda91f498a38586
# https://www.autoitscript.com/forum/topic/117993-api-ntcreatepagingfile/
$source = @'
using System;
using System.ComponentModel;
using System.Diagnostics;
using System.Runtime.InteropServices;
using System.Security.Principal;
using System.Text;
using Microsoft.Win32;
using Microsoft.Win32.SafeHandles;
namespace Util
{
class NativeMethods
{
[StructLayout(LayoutKind.Sequential)]
internal struct LUID
{
internal uint LowPart;
internal uint HighPart;
}
[StructLayout(LayoutKind.Sequential)]
internal struct LUID_AND_ATTRIBUTES
{
internal LUID Luid;
internal uint Attributes;
}
[StructLayout(LayoutKind.Sequential)]
internal struct TOKEN_PRIVILEGE
{
internal uint PrivilegeCount;
internal LUID_AND_ATTRIBUTES Privilege;
internal static readonly uint Size = (uint)Marshal.SizeOf(typeof(TOKEN_PRIVILEGE));
}
[StructLayoutAttribute(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
internal struct UNICODE_STRING
{
internal UInt16 length;
internal UInt16 maximumLength;
internal string buffer;
}
[DllImport("kernel32.dll", SetLastError=true)]
internal static extern IntPtr LocalFree(IntPtr handle);
[DllImport("advapi32.dll", ExactSpelling = true, CharSet = CharSet.Unicode, SetLastError = true, PreserveSig = false)]
internal static extern bool LookupPrivilegeValueW(
[In] string lpSystemName,
[In] string lpName,
[Out] out LUID luid
);
[DllImport("advapi32.dll", SetLastError = true, PreserveSig = false)]
internal static extern bool AdjustTokenPrivileges(
[In] SafeCloseHandle tokenHandle,
[In] bool disableAllPrivileges,
[In] ref TOKEN_PRIVILEGE newState,
[In] uint bufferLength,
[Out] out TOKEN_PRIVILEGE previousState,
[Out] out uint returnLength
);
[DllImport("advapi32.dll", CharSet = CharSet.Auto, SetLastError = true, PreserveSig = false)]
internal static extern bool OpenProcessToken(
[In] IntPtr processToken,
[In] int desiredAccess,
[Out] out SafeCloseHandle tokenHandle
);
[DllImport("ntdll.dll", CharSet = CharSet.Unicode, SetLastError = true, CallingConvention = CallingConvention.StdCall)]
internal static extern Int32 NtCreatePagingFile(
[In] ref UNICODE_STRING pageFileName,
[In] ref Int64 minimumSize,
[In] ref Int64 maximumSize,
[In] UInt32 flags
);
[DllImport("kernel32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
internal static extern uint QueryDosDeviceW(
string lpDeviceName,
StringBuilder lpTargetPath,
int ucchMax
);
}
public sealed class SafeCloseHandle: SafeHandleZeroOrMinusOneIsInvalid
{
[DllImport("kernel32.dll", ExactSpelling = true, SetLastError = true)]
internal extern static bool CloseHandle(IntPtr handle);
private SafeCloseHandle() : base(true)
{
}
public SafeCloseHandle(IntPtr preexistingHandle, bool ownsHandle) : base(ownsHandle)
{
SetHandle(preexistingHandle);
}
override protected bool ReleaseHandle()
{
return CloseHandle(handle);
}
}
public class PageFile
{
public static void SetPageFileSize(long minimumValue, long maximumValue, string lpDeviceName)
{
SetPageFilePrivilege();
StringBuilder lpTargetPath = new StringBuilder(260);
UInt32 resultQueryDosDevice = NativeMethods.QueryDosDeviceW(lpDeviceName, lpTargetPath, lpTargetPath.Capacity);
if (resultQueryDosDevice == 0)
{
throw new Win32Exception(Marshal.GetLastWin32Error());
}
string pageFilePath = lpTargetPath.ToString() + "\\pagefile.sys";
NativeMethods.UNICODE_STRING pageFileName = new NativeMethods.UNICODE_STRING
{
length = (ushort)(pageFilePath.Length * 2),
maximumLength = (ushort)(2 * (pageFilePath.Length + 1)),
buffer = pageFilePath
};
Int32 resultNtCreatePagingFile = NativeMethods.NtCreatePagingFile(ref pageFileName, ref minimumValue, ref maximumValue, 0);
if (resultNtCreatePagingFile != 0)
{
throw new Win32Exception(Marshal.GetLastWin32Error());
}
Console.WriteLine("PageFile: {0} / {1} bytes for {2}", minimumValue, maximumValue, pageFilePath);
}
static void SetPageFilePrivilege()
{
const int SE_PRIVILEGE_ENABLED = 0x00000002;
const int AdjustPrivileges = 0x00000020;
const int Query = 0x00000008;
NativeMethods.LUID luid;
NativeMethods.LookupPrivilegeValueW(null, "SeCreatePagefilePrivilege", out luid);
SafeCloseHandle hToken;
NativeMethods.OpenProcessToken(
Process.GetCurrentProcess().Handle,
AdjustPrivileges | Query,
out hToken
);
NativeMethods.TOKEN_PRIVILEGE previousState;
NativeMethods.TOKEN_PRIVILEGE newState;
uint previousSize = 0;
newState.PrivilegeCount = 1;
newState.Privilege.Luid = luid;
newState.Privilege.Attributes = SE_PRIVILEGE_ENABLED;
NativeMethods.AdjustTokenPrivileges(hToken, false, ref newState, NativeMethods.TOKEN_PRIVILEGE.Size, out previousState, out previousSize);
}
}
}
'@
Add-Type -TypeDefinition $source
# Set SetPageFileSize
[Util.PageFile]::SetPageFileSize($minimumSize, $maximumSize, $diskRoot)

70
.github/workflows/go.yml vendored Normal file
View File

@@ -0,0 +1,70 @@
name: Go
on:
push:
pull_request:
# edtited - "title, body, or the base branch of the PR is modified"
# synchronize - "commit(s) pushed to the pull request"
types: [opened, synchronize, edited, reopened]
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-16.04, macos-10.15 ]
name: Testing on on ${{ matrix.os }}
steps:
- name: Fix windows CRLF
run: git config --global core.autocrlf false
- name: Check out code into the Go module directory
uses: actions/checkout@v2
# We need to increase the page size because the tests run out of memory on github CI windows.
# Use the powershell script from this github action: https://github.com/al-cheb/configure-pagefile-action/blob/master/scripts/SetPageFileSize.ps1
# MIT License (MIT) Copyright (c) 2020 Maxim Lobanov and contributors
- name: Increase page size on windows
if: runner.os == 'Windows'
shell: powershell
run: powershell -command .\.github\workflows\SetPageFileSize.ps1
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.14
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
- name: Go Cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Test
shell: bash
run: ./build_and_test.sh
coverage:
runs-on: ubuntu-20.04
name: Produce code coverage
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.14
- name: Create coverage file
# Because of https://github.com/golang/go/issues/27333 this seem to "fail" even though nothing is wrong, so ignore the failure
run: go test -json -covermode=atomic -coverpkg=./... -coverprofile coverage.txt ./... || true
- name: Upload coverage file
run: bash <(curl -s https://codecov.io/bash)

18
.gitignore vendored
View File

@@ -13,6 +13,21 @@ kaspad.db
*.o
*.a
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Real binaries, build with `go build .`
kaspad
cmd/gencerts/gencerts
cmd/kaspactl/kaspactl
cmd/kasminer/kaspaminer
*.exe
*.exe~
# Output of the go coverage tool
*.out
# Folders
_obj
@@ -31,8 +46,7 @@ _cgo_export.*
_testmain.go
*.exe
# IDE
.idea
.vscode

19
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,19 @@
# Contributing to Kaspad
Any contribution to Kaspad is very welcome.
## Getting started
If you want to start contributing to Kaspad and don't know where to start, you can pick an issue from
the [list](https://github.com/kaspanet/kaspad/issues).
If you want to make a big change it's better to discuss it first by opening an issue or talk about it in
[Discord](https://discord.gg/WmGhhzk) to avoid duplicate work.
## Pull Request process
Any pull request should be opened against the development branch of the target version. The development branch format is
as follows: `vx.y.z-dev`, for example: `v0.8.5-dev`.
All pull requests should pass the checks written in `build_and_test.sh`, so it's recommended to run this script before
submitting your PR.

View File

@@ -9,12 +9,16 @@ Warning: This is pre-alpha software. There's no guarantee anything works.
Kaspad is the reference full node Kaspa implementation written in Go (golang).
This project is currently under active development and is in a pre-Alpha state.
This project is currently under active development and is in a pre-Alpha state.
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
## What is kaspa
Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations and sub-second block times. It is based on [the PHANTOM protocol](https://eprint.iacr.org/2018/104.pdf), a generalization of Nakamoto consensus.
## Requirements
Latest version of [Go](http://golang.org) (currently 1.13).
Go 1.14 or later.
## Installation
@@ -27,23 +31,17 @@ Latest version of [Go](http://golang.org) (currently 1.13).
```bash
$ go version
$ go env GOROOT GOPATH
```
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
recommended that `GOPATH` is set to a directory in your home directory such as
`~/dev/go` to avoid write permission issues. It is also recommended to add
`$GOPATH/bin` to your `PATH` at this point.
- Run the following commands to obtain and install kaspad including all dependencies:
```bash
$ git clone https://github.com/kaspanet/kaspad $GOPATH/src/github.com/kaspanet/kaspad
$ cd $GOPATH/src/github.com/kaspanet/kaspad
$ git clone https://github.com/kaspanet/kaspad
$ cd kaspad
$ go install . ./cmd/...
```
- Kaspad (and utilities) should now be installed in `$GOPATH/bin`. If you did
- Kaspad (and utilities) should now be installed in `$(go env GOPATH)/bin`. If you did
not already add the bin directory to your system path during Go installation,
you are encouraged to do so now.
@@ -53,10 +51,8 @@ $ go install . ./cmd/...
Kaspad has several configuration options available to tweak how it runs, but all
of the basic operations work with zero configuration.
#### Linux/BSD/POSIX/Source
```bash
$ ./kaspad
$ kaspad
```
## Discord
@@ -69,9 +65,8 @@ is used for this project.
## Documentation
The documentation is a work-in-progress. It is located in the [docs](https://github.com/kaspanet/kaspad/tree/master/docs) folder.
The documentation is a work-in-progress.
## License
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).

View File

@@ -7,9 +7,9 @@ import (
"runtime"
"time"
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
"github.com/kaspanet/kaspad/domain/blockdag/indexers"
"github.com/kaspanet/kaspad/infrastructure/os/signal"
"github.com/kaspanet/kaspad/util/profiling"
"github.com/kaspanet/kaspad/version"
@@ -46,7 +46,7 @@ func StartApp() error {
// initializes logging and configures it accordingly.
cfg, err := config.LoadConfig()
if err != nil {
fmt.Fprint(os.Stderr, err)
fmt.Fprintln(os.Stderr, err)
return err
}
defer panics.HandlePanic(log, "MAIN", nil)
@@ -123,16 +123,6 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
return nil
}
// Drop indexes and exit if requested.
if app.cfg.DropAcceptanceIndex {
if err := indexers.DropAcceptanceIndex(databaseContext); err != nil {
log.Errorf("%s", err)
return err
}
return nil
}
// Create componentManager and start it.
componentManager, err := NewComponentManager(app.cfg, databaseContext, interrupt)
if err != nil {
@@ -188,8 +178,8 @@ func removeDatabase(cfg *config.Config) error {
return os.RemoveAll(dbPath)
}
func openDB(cfg *config.Config) (*dbaccess.DatabaseContext, error) {
func openDB(cfg *config.Config) (database.Database, error) {
dbPath := databasePath(cfg)
log.Infof("Loading database from '%s'", dbPath)
return dbaccess.New(dbPath)
return ldb.NewLevelDB(dbPath)
}

View File

@@ -1,431 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"bytes"
"compress/bzip2"
"io/ioutil"
"math"
"os"
"testing"
"github.com/kaspanet/kaspad/util/daghash"
)
// genesisCoinbaseTx is the coinbase transaction for the genesis blocks for
// the main network and test network.
var genesisCoinbaseTxIns = []*TxIn{
{
PreviousOutpoint: Outpoint{
TxID: daghash.TxID{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, 0x45, /* |.......E| */
0x54, 0x68, 0x65, 0x20, 0x54, 0x69, 0x6d, 0x65, /* |The Time| */
0x73, 0x20, 0x30, 0x33, 0x2f, 0x4a, 0x61, 0x6e, /* |s 03/Jan| */
0x2f, 0x32, 0x30, 0x30, 0x39, 0x20, 0x43, 0x68, /* |/2009 Ch| */
0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x6f, 0x72, /* |ancellor| */
0x20, 0x6f, 0x6e, 0x20, 0x62, 0x72, 0x69, 0x6e, /* | on brin| */
0x6b, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, 0x63, /* |k of sec|*/
0x6f, 0x6e, 0x64, 0x20, 0x62, 0x61, 0x69, 0x6c, /* |ond bail| */
0x6f, 0x75, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, /* |out for |*/
0x62, 0x61, 0x6e, 0x6b, 0x73, /* |banks| */
},
Sequence: math.MaxUint64,
},
}
var genesisCoinbaseTxOuts = []*TxOut{
{
Value: 0x12a05f200,
ScriptPubKey: []byte{
0x41, 0x04, 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, /* |A.g....U| */
0x48, 0x27, 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, /* |H'.g..q0| */
0xb7, 0x10, 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, /* |..\..(.9| */
0x09, 0xa6, 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, /* |..yb...a| */
0xde, 0xb6, 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, /* |..I..?L.| */
0x38, 0xc4, 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, /* |8..U....| */
0x12, 0xde, 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, /* |..\8M...| */
0x8d, 0x57, 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, /* |.W.Lp+k.| */
0x1d, 0x5f, 0xac, /* |._.| */
},
},
}
var genesisCoinbaseTx = NewNativeMsgTx(1, genesisCoinbaseTxIns, genesisCoinbaseTxOuts)
// BenchmarkWriteVarInt1 performs a benchmark on how long it takes to write
// a single byte variable length integer.
func BenchmarkWriteVarInt1(b *testing.B) {
for i := 0; i < b.N; i++ {
WriteVarInt(ioutil.Discard, 1)
}
}
// BenchmarkWriteVarInt3 performs a benchmark on how long it takes to write
// a three byte variable length integer.
func BenchmarkWriteVarInt3(b *testing.B) {
for i := 0; i < b.N; i++ {
WriteVarInt(ioutil.Discard, 65535)
}
}
// BenchmarkWriteVarInt5 performs a benchmark on how long it takes to write
// a five byte variable length integer.
func BenchmarkWriteVarInt5(b *testing.B) {
for i := 0; i < b.N; i++ {
WriteVarInt(ioutil.Discard, 4294967295)
}
}
// BenchmarkWriteVarInt9 performs a benchmark on how long it takes to write
// a nine byte variable length integer.
func BenchmarkWriteVarInt9(b *testing.B) {
for i := 0; i < b.N; i++ {
WriteVarInt(ioutil.Discard, 18446744073709551615)
}
}
// BenchmarkReadVarInt1 performs a benchmark on how long it takes to read
// a single byte variable length integer.
func BenchmarkReadVarInt1(b *testing.B) {
buf := []byte{0x01}
r := bytes.NewReader(buf)
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
ReadVarInt(r)
}
}
// BenchmarkReadVarInt3 performs a benchmark on how long it takes to read
// a three byte variable length integer.
func BenchmarkReadVarInt3(b *testing.B) {
buf := []byte{0x0fd, 0xff, 0xff}
r := bytes.NewReader(buf)
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
ReadVarInt(r)
}
}
// BenchmarkReadVarInt5 performs a benchmark on how long it takes to read
// a five byte variable length integer.
func BenchmarkReadVarInt5(b *testing.B) {
buf := []byte{0xfe, 0xff, 0xff, 0xff, 0xff}
r := bytes.NewReader(buf)
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
ReadVarInt(r)
}
}
// BenchmarkReadVarInt9 performs a benchmark on how long it takes to read
// a nine byte variable length integer.
func BenchmarkReadVarInt9(b *testing.B) {
buf := []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
r := bytes.NewReader(buf)
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
ReadVarInt(r)
}
}
// BenchmarkReadVarStr4 performs a benchmark on how long it takes to read a
// four byte variable length string.
func BenchmarkReadVarStr4(b *testing.B) {
buf := []byte{0x04, 't', 'e', 's', 't'}
r := bytes.NewReader(buf)
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
ReadVarString(r, 0)
}
}
// BenchmarkReadVarStr10 performs a benchmark on how long it takes to read a
// ten byte variable length string.
func BenchmarkReadVarStr10(b *testing.B) {
buf := []byte{0x0a, 't', 'e', 's', 't', '0', '1', '2', '3', '4', '5'}
r := bytes.NewReader(buf)
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
ReadVarString(r, 0)
}
}
// BenchmarkWriteVarStr4 performs a benchmark on how long it takes to write a
// four byte variable length string.
func BenchmarkWriteVarStr4(b *testing.B) {
for i := 0; i < b.N; i++ {
WriteVarString(ioutil.Discard, "test")
}
}
// BenchmarkWriteVarStr10 performs a benchmark on how long it takes to write a
// ten byte variable length string.
func BenchmarkWriteVarStr10(b *testing.B) {
for i := 0; i < b.N; i++ {
WriteVarString(ioutil.Discard, "test012345")
}
}
// BenchmarkReadOutpoint performs a benchmark on how long it takes to read a
// transaction outpoint.
func BenchmarkReadOutpoint(b *testing.B) {
buf := []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Previous output index
}
r := bytes.NewReader(buf)
var op Outpoint
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
readOutpoint(r, 0, 0, &op)
}
}
// BenchmarkWriteOutpoint performs a benchmark on how long it takes to write a
// transaction outpoint.
func BenchmarkWriteOutpoint(b *testing.B) {
op := &Outpoint{
TxID: daghash.TxID{},
Index: 0,
}
for i := 0; i < b.N; i++ {
writeOutpoint(ioutil.Discard, 0, 0, op)
}
}
// BenchmarkReadTxOut performs a benchmark on how long it takes to read a
// transaction output.
func BenchmarkReadTxOut(b *testing.B) {
buf := []byte{
0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of scriptPubKey
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c,
0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c,
0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4,
0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6,
0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e,
0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58,
0xee, // 65-byte signature
0xac, // OP_CHECKSIG
}
r := bytes.NewReader(buf)
var txOut TxOut
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
readTxOut(r, 0, 0, &txOut)
scriptPool.Return(txOut.ScriptPubKey)
}
}
// BenchmarkWriteTxOut performs a benchmark on how long it takes to write
// a transaction output.
func BenchmarkWriteTxOut(b *testing.B) {
txOut := blockOne.Transactions[0].TxOut[0]
for i := 0; i < b.N; i++ {
WriteTxOut(ioutil.Discard, 0, 0, txOut)
}
}
// BenchmarkReadTxIn performs a benchmark on how long it takes to read a
// transaction input.
func BenchmarkReadTxIn(b *testing.B) {
buf := []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Previous output index
0x07, // Varint for length of signature script
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Sequence
}
r := bytes.NewReader(buf)
var txIn TxIn
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
readTxIn(r, 0, 0, &txIn)
scriptPool.Return(txIn.SignatureScript)
}
}
// BenchmarkWriteTxIn performs a benchmark on how long it takes to write
// a transaction input.
func BenchmarkWriteTxIn(b *testing.B) {
txIn := blockOne.Transactions[0].TxIn[0]
for i := 0; i < b.N; i++ {
writeTxIn(ioutil.Discard, 0, 0, txIn, txEncodingFull)
}
}
// BenchmarkDeserializeTx performs a benchmark on how long it takes to
// deserialize a small transaction.
func BenchmarkDeserializeTxSmall(b *testing.B) {
buf := []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x01, // Varint for number of input transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index
0x07, // Varint for length of signature script
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Sequence
0x01, // Varint for number of output transactions
0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of scriptPubKey
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c,
0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c,
0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4,
0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6,
0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e,
0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58,
0xee, // 65-byte signature
0xac, // OP_CHECKSIG
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time
}
r := bytes.NewReader(buf)
var tx MsgTx
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
tx.Deserialize(r)
}
}
// BenchmarkDeserializeTxLarge performs a benchmark on how long it takes to
// deserialize a very large transaction.
func BenchmarkDeserializeTxLarge(b *testing.B) {
fi, err := os.Open("testdata/megatx.bin.bz2")
if err != nil {
b.Fatalf("Failed to read transaction data: %v", err)
}
defer fi.Close()
buf, err := ioutil.ReadAll(bzip2.NewReader(fi))
if err != nil {
b.Fatalf("Failed to read transaction data: %v", err)
}
r := bytes.NewReader(buf)
var tx MsgTx
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
tx.Deserialize(r)
}
}
// BenchmarkSerializeTx performs a benchmark on how long it takes to serialize
// a transaction.
func BenchmarkSerializeTx(b *testing.B) {
tx := blockOne.Transactions[0]
for i := 0; i < b.N; i++ {
tx.Serialize(ioutil.Discard)
}
}
// BenchmarkReadBlockHeader performs a benchmark on how long it takes to
// deserialize a block header.
func BenchmarkReadBlockHeader(b *testing.B) {
buf := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock
0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2,
0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61,
0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32,
0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // MerkleRoot
0x29, 0xab, 0x5f, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0x00, // TxnCount Varint
}
r := bytes.NewReader(buf)
var header BlockHeader
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
readBlockHeader(r, 0, &header)
}
}
// BenchmarkWriteBlockHeader performs a benchmark on how long it takes to
// serialize a block header.
func BenchmarkWriteBlockHeader(b *testing.B) {
header := blockOne.Header
for i := 0; i < b.N; i++ {
writeBlockHeader(ioutil.Discard, 0, &header)
}
}
// BenchmarkTxHash performs a benchmark on how long it takes to hash a
// transaction.
func BenchmarkTxHash(b *testing.B) {
for i := 0; i < b.N; i++ {
genesisCoinbaseTx.TxHash()
}
}
// BenchmarkDoubleHashB performs a benchmark on how long it takes to perform a
// double hash returning a byte slice.
func BenchmarkDoubleHashB(b *testing.B) {
var buf bytes.Buffer
if err := genesisCoinbaseTx.Serialize(&buf); err != nil {
b.Errorf("Serialize: unexpected error: %v", err)
return
}
txBytes := buf.Bytes()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = daghash.DoubleHashB(txBytes)
}
}
// BenchmarkDoubleHashH performs a benchmark on how long it takes to perform
// a double hash returning a daghash.Hash.
func BenchmarkDoubleHashH(b *testing.B) {
var buf bytes.Buffer
if err := genesisCoinbaseTx.Serialize(&buf); err != nil {
b.Errorf("Serialize: unexpected error: %v", err)
return
}
txBytes := buf.Bytes()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = daghash.DoubleHashH(txBytes)
}
}
// BenchmarkDoubleHashWriter performs a benchmark on how long it takes to perform
// a double hash via the writer returning a daghash.Hash.
func BenchmarkDoubleHashWriter(b *testing.B) {
var buf bytes.Buffer
err := genesisCoinbaseTx.Serialize(&buf)
if err != nil {
b.Fatalf("Serialize: unexpected error: %+v", err)
}
txBytes := buf.Bytes()
b.ResetTimer()
for i := 0; i < b.N; i++ {
writer := daghash.NewDoubleHashWriter()
_, _ = writer.Write(txBytes)
writer.Finalize()
}
}

View File

@@ -5,34 +5,12 @@
package appmessage
import (
"encoding/binary"
"fmt"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
)
// MaxVarIntPayload is the maximum payload size for a variable length integer.
const MaxVarIntPayload = 9
// MaxInvPerMsg is the maximum number of inventory vectors that can be in any type of kaspa inv message.
const MaxInvPerMsg = 1 << 17
var (
// littleEndian is a convenience variable since binary.LittleEndian is
// quite long.
littleEndian = binary.LittleEndian
// bigEndian is a convenience variable since binary.BigEndian is quite
// long.
bigEndian = binary.BigEndian
)
// errNonCanonicalVarInt is the common format string used for non-canonically
// encoded variable length integer errors.
var errNonCanonicalVarInt = "non-canonical varint %x - discriminant %x must " +
@@ -40,473 +18,3 @@ var errNonCanonicalVarInt = "non-canonical varint %x - discriminant %x must " +
// errNoEncodingForType signifies that there's no encoding for the given type.
var errNoEncodingForType = errors.New("there's no encoding for this type")
// int64Time represents a unix timestamp with milliseconds precision encoded with
// an int64. It is used as a way to signal the readElement function how to decode
// a timestamp into a Go mstime.Time since it is otherwise ambiguous.
type int64Time mstime.Time
// ReadElement reads the next sequence of bytes from r using little endian
// depending on the concrete type of element pointed to.
func ReadElement(r io.Reader, element interface{}) error {
// Attempt to read the element based on the concrete type via fast
// type assertions first.
switch e := element.(type) {
case *int32:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = int32(rv)
return nil
case *uint32:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = rv
return nil
case *int64:
rv, err := binaryserializer.Uint64(r, littleEndian)
if err != nil {
return err
}
*e = int64(rv)
return nil
case *uint64:
rv, err := binaryserializer.Uint64(r, littleEndian)
if err != nil {
return err
}
*e = rv
return nil
case *uint8:
rv, err := binaryserializer.Uint8(r)
if err != nil {
return err
}
*e = rv
return nil
case *bool:
rv, err := binaryserializer.Uint8(r)
if err != nil {
return err
}
if rv == 0x00 {
*e = false
} else {
*e = true
}
return nil
// Unix timestamp encoded as an int64.
case *int64Time:
rv, err := binaryserializer.Uint64(r, binary.LittleEndian)
if err != nil {
return err
}
*e = int64Time(mstime.UnixMilliseconds(int64(rv)))
return nil
// Message header checksum.
case *[4]byte:
_, err := io.ReadFull(r, e[:])
if err != nil {
return err
}
return nil
// Message header command.
case *MessageCommand:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = MessageCommand(rv)
return nil
// IP address.
case *[16]byte:
_, err := io.ReadFull(r, e[:])
if err != nil {
return err
}
return nil
case *daghash.Hash:
_, err := io.ReadFull(r, e[:])
if err != nil {
return err
}
return nil
case *id.ID:
return e.Deserialize(r)
case *subnetworkid.SubnetworkID:
_, err := io.ReadFull(r, e[:])
if err != nil {
return err
}
return nil
case *ServiceFlag:
rv, err := binaryserializer.Uint64(r, littleEndian)
if err != nil {
return err
}
*e = ServiceFlag(rv)
return nil
case *KaspaNet:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = KaspaNet(rv)
return nil
}
return errors.Wrapf(errNoEncodingForType, "couldn't find a way to read type %T", element)
}
// readElements reads multiple items from r. It is equivalent to multiple
// calls to readElement.
func readElements(r io.Reader, elements ...interface{}) error {
for _, element := range elements {
err := ReadElement(r, element)
if err != nil {
return err
}
}
return nil
}
// WriteElement writes the little endian representation of element to w.
func WriteElement(w io.Writer, element interface{}) error {
// Attempt to write the element based on the concrete type via fast
// type assertions first.
switch e := element.(type) {
case int32:
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
return nil
case uint32:
err := binaryserializer.PutUint32(w, littleEndian, e)
if err != nil {
return err
}
return nil
case int64:
err := binaryserializer.PutUint64(w, littleEndian, uint64(e))
if err != nil {
return err
}
return nil
case uint64:
err := binaryserializer.PutUint64(w, littleEndian, e)
if err != nil {
return err
}
return nil
case uint8:
err := binaryserializer.PutUint8(w, e)
if err != nil {
return err
}
return nil
case bool:
var err error
if e {
err = binaryserializer.PutUint8(w, 0x01)
} else {
err = binaryserializer.PutUint8(w, 0x00)
}
if err != nil {
return err
}
return nil
// Message header checksum.
case [4]byte:
_, err := w.Write(e[:])
if err != nil {
return err
}
return nil
// Message header command.
case MessageCommand:
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
return nil
// IP address.
case [16]byte:
_, err := w.Write(e[:])
if err != nil {
return err
}
return nil
case *daghash.Hash:
_, err := w.Write(e[:])
if err != nil {
return err
}
return nil
case *id.ID:
return e.Serialize(w)
case *subnetworkid.SubnetworkID:
_, err := w.Write(e[:])
if err != nil {
return err
}
return nil
case ServiceFlag:
err := binaryserializer.PutUint64(w, littleEndian, uint64(e))
if err != nil {
return err
}
return nil
case KaspaNet:
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
return nil
}
return errors.Wrapf(errNoEncodingForType, "couldn't find a way to write type %T", element)
}
// writeElements writes multiple items to w. It is equivalent to multiple
// calls to writeElement.
func writeElements(w io.Writer, elements ...interface{}) error {
for _, element := range elements {
err := WriteElement(w, element)
if err != nil {
return err
}
}
return nil
}
// ReadVarInt reads a variable length integer from r and returns it as a uint64.
func ReadVarInt(r io.Reader) (uint64, error) {
discriminant, err := binaryserializer.Uint8(r)
if err != nil {
return 0, err
}
var rv uint64
switch discriminant {
case 0xff:
sv, err := binaryserializer.Uint64(r, littleEndian)
if err != nil {
return 0, err
}
rv = sv
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
min := uint64(0x100000000)
if rv < min {
return 0, messageError("readVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
case 0xfe:
sv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return 0, err
}
rv = uint64(sv)
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
min := uint64(0x10000)
if rv < min {
return 0, messageError("readVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
case 0xfd:
sv, err := binaryserializer.Uint16(r, littleEndian)
if err != nil {
return 0, err
}
rv = uint64(sv)
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
min := uint64(0xfd)
if rv < min {
return 0, messageError("readVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
default:
rv = uint64(discriminant)
}
return rv, nil
}
// WriteVarInt serializes val to w using a variable number of bytes depending
// on its value.
func WriteVarInt(w io.Writer, val uint64) error {
if val < 0xfd {
_, err := w.Write([]byte{uint8(val)})
return errors.WithStack(err)
}
if val <= math.MaxUint16 {
var buf [3]byte
buf[0] = 0xfd
littleEndian.PutUint16(buf[1:], uint16(val))
_, err := w.Write(buf[:])
return errors.WithStack(err)
}
if val <= math.MaxUint32 {
var buf [5]byte
buf[0] = 0xfe
littleEndian.PutUint32(buf[1:], uint32(val))
_, err := w.Write(buf[:])
return errors.WithStack(err)
}
var buf [9]byte
buf[0] = 0xff
littleEndian.PutUint64(buf[1:], val)
_, err := w.Write(buf[:])
return errors.WithStack(err)
}
// VarIntSerializeSize returns the number of bytes it would take to serialize
// val as a variable length integer.
func VarIntSerializeSize(val uint64) int {
// The value is small enough to be represented by itself, so it's
// just 1 byte.
if val < 0xfd {
return 1
}
// Discriminant 1 byte plus 2 bytes for the uint16.
if val <= math.MaxUint16 {
return 3
}
// Discriminant 1 byte plus 4 bytes for the uint32.
if val <= math.MaxUint32 {
return 5
}
// Discriminant 1 byte plus 8 bytes for the uint64.
return 9
}
// ReadVarString reads a variable length string from r and returns it as a Go
// string. A variable length string is encoded as a variable length integer
// containing the length of the string followed by the bytes that represent the
// string itself. An error is returned if the length is greater than the
// maximum block payload size since it helps protect against memory exhaustion
// attacks and forced panics through malformed messages.
func ReadVarString(r io.Reader, pver uint32) (string, error) {
count, err := ReadVarInt(r)
if err != nil {
return "", err
}
// Prevent variable length strings that are larger than the maximum
// message size. It would be possible to cause memory exhaustion and
// panics without a sane upper bound on this count.
if count > MaxMessagePayload {
str := fmt.Sprintf("variable length string is too long "+
"[count %d, max %d]", count, MaxMessagePayload)
return "", messageError("ReadVarString", str)
}
buf := make([]byte, count)
_, err = io.ReadFull(r, buf)
if err != nil {
return "", err
}
return string(buf), nil
}
// WriteVarString serializes str to w as a variable length integer containing
// the length of the string followed by the bytes that represent the string
// itself.
func WriteVarString(w io.Writer, str string) error {
err := WriteVarInt(w, uint64(len(str)))
if err != nil {
return err
}
_, err = w.Write([]byte(str))
return err
}
// ReadVarBytes reads a variable length byte array. A byte array is encoded
// as a varInt containing the length of the array followed by the bytes
// themselves. An error is returned if the length is greater than the
// passed maxAllowed parameter which helps protect against memory exhaustion
// attacks and forced panics through malformed messages. The fieldName
// parameter is only used for the error message so it provides more context in
// the error.
func ReadVarBytes(r io.Reader, pver uint32, maxAllowed uint32,
fieldName string) ([]byte, error) {
count, err := ReadVarInt(r)
if err != nil {
return nil, err
}
// Prevent byte array larger than the max message size. It would
// be possible to cause memory exhaustion and panics without a sane
// upper bound on this count.
if count > uint64(maxAllowed) {
str := fmt.Sprintf("%s is larger than the max allowed size "+
"[count %d, max %d]", fieldName, count, maxAllowed)
return nil, messageError("ReadVarBytes", str)
}
b := make([]byte, count)
_, err = io.ReadFull(r, b)
if err != nil {
return nil, err
}
return b, nil
}
// WriteVarBytes serializes a variable length byte array to w as a varInt
// containing the number of bytes, followed by the bytes themselves.
func WriteVarBytes(w io.Writer, pver uint32, bytes []byte) error {
slen := uint64(len(bytes))
err := WriteVarInt(w, slen)
if err != nil {
return err
}
_, err = w.Write(bytes)
return err
}

View File

@@ -1,695 +1,44 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"bytes"
"github.com/pkg/errors"
"io"
"reflect"
"strings"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
)
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// mainnetGenesisHash is the hash of the first block in the block DAG for the
// main network (genesis block).
var mainnetGenesisHash = &daghash.Hash{
var mainnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25,
0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,
0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1,
0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,
}
})
// simnetGenesisHash is the hash of the first block in the block DAG for the
// simulation test network.
var simnetGenesisHash = &daghash.Hash{
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a,
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
}
var simnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x9d, 0x89, 0xb0, 0x6e, 0xb3, 0x47, 0xb5, 0x6e,
0xcd, 0x6c, 0x63, 0x99, 0x45, 0x91, 0xd5, 0xce,
0x9b, 0x43, 0x05, 0xc1, 0xa5, 0x5e, 0x2a, 0xda,
0x90, 0x4c, 0xf0, 0x6c, 0x4d, 0x5f, 0xd3, 0x62,
})
// mainnetGenesisMerkleRoot is the hash of the first transaction in the genesis
// block for the main network.
var mainnetGenesisMerkleRoot = &daghash.Hash{
var mainnetGenesisMerkleRoot = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x4a, 0x5e, 0x1e, 0x4b, 0xaa, 0xb8, 0x9f, 0x3a,
0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f,
0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a,
0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b,
}
})
var exampleAcceptedIDMerkleRoot = &daghash.Hash{
var exampleAcceptedIDMerkleRoot = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C,
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
}
})
var exampleUTXOCommitment = &daghash.Hash{
var exampleUTXOCommitment = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C,
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
}
// TestElementEncoding tests appmessage encode and decode for various element types. This
// is mainly to test the "fast" paths in readElement and writeElement which use
// type assertions to avoid reflection when possible.
func TestElementEncoding(t *testing.T) {
tests := []struct {
in interface{} // Value to encode
buf []byte // Encoded value
}{
{int32(1), []byte{0x01, 0x00, 0x00, 0x00}},
{uint32(256), []byte{0x00, 0x01, 0x00, 0x00}},
{
int64(65536),
[]byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00},
},
{
uint64(4294967296),
[]byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00},
},
{
true,
[]byte{0x01},
},
{
false,
[]byte{0x00},
},
{
[4]byte{0x01, 0x02, 0x03, 0x04},
[]byte{0x01, 0x02, 0x03, 0x04},
},
{
MessageCommand(0x10),
[]byte{
0x10, 0x00, 0x00, 0x00,
},
},
{
[16]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
},
[]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
},
},
{
(*daghash.Hash)(&[daghash.HashSize]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
}),
[]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
},
},
{
ServiceFlag(SFNodeNetwork),
[]byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
},
{
KaspaNet(Mainnet),
[]byte{0x1d, 0xf7, 0xdc, 0x3d},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Write to appmessage format.
var buf bytes.Buffer
err := WriteElement(&buf, test.in)
if err != nil {
t.Errorf("writeElement #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("writeElement #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Read from appmessage format.
rbuf := bytes.NewReader(test.buf)
val := test.in
if reflect.ValueOf(test.in).Kind() != reflect.Ptr {
val = reflect.New(reflect.TypeOf(test.in)).Interface()
}
err = ReadElement(rbuf, val)
if err != nil {
t.Errorf("readElement #%d error %v", i, err)
continue
}
ival := val
if reflect.ValueOf(test.in).Kind() != reflect.Ptr {
ival = reflect.Indirect(reflect.ValueOf(val)).Interface()
}
if !reflect.DeepEqual(ival, test.in) {
t.Errorf("readElement #%d\n got: %s want: %s", i,
spew.Sdump(ival), spew.Sdump(test.in))
continue
}
}
}
// TestElementEncodingErrors performs negative tests against appmessage encode and decode
// of various element types to confirm error paths work correctly.
func TestElementEncodingErrors(t *testing.T) {
type writeElementReflect int32
tests := []struct {
in interface{} // Value to encode
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
{int32(1), 0, io.ErrShortWrite, io.EOF},
{uint32(256), 0, io.ErrShortWrite, io.EOF},
{int64(65536), 0, io.ErrShortWrite, io.EOF},
{true, 0, io.ErrShortWrite, io.EOF},
{[4]byte{0x01, 0x02, 0x03, 0x04}, 0, io.ErrShortWrite, io.EOF},
{
MessageCommand(10),
0, io.ErrShortWrite, io.EOF,
},
{
[16]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
},
0, io.ErrShortWrite, io.EOF,
},
{
(*daghash.Hash)(&[daghash.HashSize]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
}),
0, io.ErrShortWrite, io.EOF,
},
{ServiceFlag(SFNodeNetwork), 0, io.ErrShortWrite, io.EOF},
{KaspaNet(Mainnet), 0, io.ErrShortWrite, io.EOF},
// Type with no supported encoding.
{writeElementReflect(0), 0, errNoEncodingForType, errNoEncodingForType},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteElement(w, test.in)
if !errors.Is(err, test.writeErr) {
t.Errorf("writeElement #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from appmessage format.
r := newFixedReader(test.max, nil)
val := test.in
if reflect.ValueOf(test.in).Kind() != reflect.Ptr {
val = reflect.New(reflect.TypeOf(test.in)).Interface()
}
err = ReadElement(r, val)
if !errors.Is(err, test.readErr) {
t.Errorf("readElement #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestVarIntEncoding tests appmessage encode and decode for variable length integers.
func TestVarIntEncoding(t *testing.T) {
tests := []struct {
value uint64 // Value to encode
buf []byte // Encoded value
}{
// Latest protocol version.
// Single byte
{0, []byte{0x00}},
// Max single byte
{0xfc, []byte{0xfc}},
// Min 2-byte
{0xfd, []byte{0xfd, 0x0fd, 0x00}},
// Max 2-byte
{0xffff, []byte{0xfd, 0xff, 0xff}},
// Min 4-byte
{0x10000, []byte{0xfe, 0x00, 0x00, 0x01, 0x00}},
// Max 4-byte
{0xffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff}},
// Min 8-byte
{
0x100000000,
[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00},
},
// Max 8-byte
{
0xffffffffffffffff,
[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
buf := &bytes.Buffer{}
err := WriteVarInt(buf, test.value)
if err != nil {
t.Errorf("WriteVarInt #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("WriteVarInt #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
val, err := ReadVarInt(rbuf)
if err != nil {
t.Errorf("ReadVarInt #%d error %v", i, err)
continue
}
if val != test.value {
t.Errorf("ReadVarInt #%d\n got: %x want: %x", i,
val, test.value)
continue
}
}
}
// TestVarIntEncodingErrors performs negative tests against appmessage encode and decode
// of variable length integers to confirm error paths work correctly.
func TestVarIntEncodingErrors(t *testing.T) {
tests := []struct {
in uint64 // Value to encode
buf []byte // Encoded value
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force errors on discriminant.
{0, []byte{0x00}, 0, io.ErrShortWrite, io.EOF},
// Force errors on 2-byte read/write.
{0xfd, []byte{0xfd}, 0, io.ErrShortWrite, io.EOF}, // error on writing length
{0xfd, []byte{0xfd}, 2, io.ErrShortWrite, io.ErrUnexpectedEOF}, // error on writing actual data
// Force errors on 4-byte read/write.
{0x10000, []byte{0xfe}, 0, io.ErrShortWrite, io.EOF}, // error on writing length
{0x10000, []byte{0xfe}, 2, io.ErrShortWrite, io.ErrUnexpectedEOF}, // error on writing actual data
// Force errors on 8-byte read/write.
{0x100000000, []byte{0xff}, 0, io.ErrShortWrite, io.EOF}, // error on writing length
{0x100000000, []byte{0xff}, 2, io.ErrShortWrite, io.ErrUnexpectedEOF}, // error on writing actual data
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteVarInt(w, test.in)
if !errors.Is(err, test.writeErr) {
t.Errorf("WriteVarInt #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from appmessage format.
r := newFixedReader(test.max, test.buf)
_, err = ReadVarInt(r)
if !errors.Is(err, test.readErr) {
t.Errorf("ReadVarInt #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestVarIntNonCanonical ensures variable length integers that are not encoded
// canonically return the expected error.
func TestVarIntNonCanonical(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
name string // Test name for easier identification
in []byte // Value to decode
pver uint32 // Protocol version for appmessage encoding
}{
{
"0 encoded with 3 bytes", []byte{0xfd, 0x00, 0x00},
pver,
},
{
"max single-byte value encoded with 3 bytes",
[]byte{0xfd, 0xfc, 0x00}, pver,
},
{
"0 encoded with 5 bytes",
[]byte{0xfe, 0x00, 0x00, 0x00, 0x00}, pver,
},
{
"max three-byte value encoded with 5 bytes",
[]byte{0xfe, 0xff, 0xff, 0x00, 0x00}, pver,
},
{
"0 encoded with 9 bytes",
[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
pver,
},
{
"max five-byte value encoded with 9 bytes",
[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00},
pver,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from appmessage format.
rbuf := bytes.NewReader(test.in)
val, err := ReadVarInt(rbuf)
if msgErr := &(MessageError{}); !errors.As(err, &msgErr) {
t.Errorf("ReadVarInt #%d (%s) unexpected error %v", i,
test.name, err)
continue
}
if val != 0 {
t.Errorf("ReadVarInt #%d (%s)\n got: %d want: 0", i,
test.name, val)
continue
}
}
}
// TestVarIntEncoding tests the serialize size for variable length integers.
func TestVarIntSerializeSize(t *testing.T) {
tests := []struct {
val uint64 // Value to get the serialized size for
size int // Expected serialized size
}{
// Single byte
{0, 1},
// Max single byte
{0xfc, 1},
// Min 2-byte
{0xfd, 3},
// Max 2-byte
{0xffff, 3},
// Min 4-byte
{0x10000, 5},
// Max 4-byte
{0xffffffff, 5},
// Min 8-byte
{0x100000000, 9},
// Max 8-byte
{0xffffffffffffffff, 9},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
serializedSize := VarIntSerializeSize(test.val)
if serializedSize != test.size {
t.Errorf("VarIntSerializeSize #%d got: %d, want: %d", i,
serializedSize, test.size)
continue
}
}
}
// TestVarStringEncoding tests appmessage encode and decode for variable length strings.
func TestVarStringEncoding(t *testing.T) {
pver := ProtocolVersion
// str256 is a string that takes a 2-byte varint to encode.
str256 := strings.Repeat("test", 64)
tests := []struct {
in string // String to encode
out string // String to decoded value
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
// Empty string
{"", "", []byte{0x00}, pver},
// Single byte varint + string
{"Test", "Test", append([]byte{0x04}, []byte("Test")...), pver},
// 2-byte varint + string
{str256, str256, append([]byte{0xfd, 0x00, 0x01}, []byte(str256)...), pver},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
var buf bytes.Buffer
err := WriteVarString(&buf, test.in)
if err != nil {
t.Errorf("WriteVarString #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("WriteVarString #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
val, err := ReadVarString(rbuf, test.pver)
if err != nil {
t.Errorf("ReadVarString #%d error %v", i, err)
continue
}
if val != test.out {
t.Errorf("ReadVarString #%d\n got: %s want: %s", i,
val, test.out)
continue
}
}
}
// TestVarStringEncodingErrors performs negative tests against appmessage encode and
// decode of variable length strings to confirm error paths work correctly.
func TestVarStringEncodingErrors(t *testing.T) {
pver := ProtocolVersion
// str256 is a string that takes a 2-byte varint to encode.
str256 := strings.Repeat("test", 64)
tests := []struct {
in string // Value to encode
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Latest protocol version with intentional read/write errors.
// Force errors on empty string.
{"", []byte{0x00}, pver, 0, io.ErrShortWrite, io.EOF},
// Force error on single byte varint + string.
{"Test", []byte{0x04}, pver, 2, io.ErrShortWrite, io.ErrUnexpectedEOF},
// Force errors on 2-byte varint + string.
{str256, []byte{0xfd}, pver, 2, io.ErrShortWrite, io.ErrUnexpectedEOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteVarString(w, test.in)
if !errors.Is(err, test.writeErr) {
t.Errorf("WriteVarString #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from appmessage format.
r := newFixedReader(test.max, test.buf)
_, err = ReadVarString(r, test.pver)
if !errors.Is(err, test.readErr) {
t.Errorf("ReadVarString #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestVarStringOverflowErrors performs tests to ensure deserializing variable
// length strings intentionally crafted to use large values for the string
// length are handled properly. This could otherwise potentially be used as an
// attack vector.
func TestVarStringOverflowErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
err error // Expected error
}{
{[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
pver, &MessageError{}},
{[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
pver, &MessageError{}},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
_, err := ReadVarString(rbuf, test.pver)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("ReadVarString #%d wrong error got: %v, "+
"want: %v", i, err, reflect.TypeOf(test.err))
continue
}
}
}
// TestVarBytesEncoding tests appmessage encode and decode for variable length byte array.
func TestVarBytesEncoding(t *testing.T) {
pver := ProtocolVersion
// bytes256 is a byte array that takes a 2-byte varint to encode.
bytes256 := bytes.Repeat([]byte{0x01}, 256)
tests := []struct {
in []byte // Byte Array to write
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
// Empty byte array
{[]byte{}, []byte{0x00}, pver},
// Single byte varint + byte array
{[]byte{0x01}, []byte{0x01, 0x01}, pver},
// 2-byte varint + byte array
{bytes256, append([]byte{0xfd, 0x00, 0x01}, bytes256...), pver},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
var buf bytes.Buffer
err := WriteVarBytes(&buf, test.pver, test.in)
if err != nil {
t.Errorf("WriteVarBytes #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("WriteVarBytes #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
val, err := ReadVarBytes(rbuf, test.pver, MaxMessagePayload,
"test payload")
if err != nil {
t.Errorf("ReadVarBytes #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("ReadVarBytes #%d\n got: %s want: %s", i,
val, test.buf)
continue
}
}
}
// TestVarBytesEncodingErrors performs negative tests against appmessage encode and
// decode of variable length byte arrays to confirm error paths work correctly.
func TestVarBytesEncodingErrors(t *testing.T) {
pver := ProtocolVersion
// bytes256 is a byte array that takes a 2-byte varint to encode.
bytes256 := bytes.Repeat([]byte{0x01}, 256)
tests := []struct {
in []byte // Byte Array to write
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Latest protocol version with intentional read/write errors.
// Force errors on empty byte array.
{[]byte{}, []byte{0x00}, pver, 0, io.ErrShortWrite, io.EOF},
// Force error on single byte varint + byte array.
{[]byte{0x01, 0x02, 0x03}, []byte{0x04}, pver, 2, io.ErrShortWrite, io.ErrUnexpectedEOF},
// Force errors on 2-byte varint + byte array.
{bytes256, []byte{0xfd}, pver, 2, io.ErrShortWrite, io.ErrUnexpectedEOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteVarBytes(w, test.pver, test.in)
if !errors.Is(err, test.writeErr) {
t.Errorf("WriteVarBytes #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from appmessage format.
r := newFixedReader(test.max, test.buf)
_, err = ReadVarBytes(r, test.pver, MaxMessagePayload,
"test payload")
if !errors.Is(err, test.readErr) {
t.Errorf("ReadVarBytes #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestVarBytesOverflowErrors performs tests to ensure deserializing variable
// length byte arrays intentionally crafted to use large values for the array
// length are handled properly. This could otherwise potentially be used as an
// attack vector.
func TestVarBytesOverflowErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
err error // Expected error
}{
{[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
pver, &MessageError{}},
{[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
pver, &MessageError{}},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
_, err := ReadVarBytes(rbuf, test.pver, MaxMessagePayload,
"test payload")
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("ReadVarBytes #%d wrong error got: %v, "+
"want: %v", i, err, reflect.TypeOf(test.err))
continue
}
}
}
})

View File

@@ -0,0 +1,270 @@
package appmessage
import (
"encoding/hex"
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionid"
"github.com/kaspanet/kaspad/util/mstime"
)
// DomainBlockToMsgBlock converts an externalapi.DomainBlock to MsgBlock
func DomainBlockToMsgBlock(domainBlock *externalapi.DomainBlock) *MsgBlock {
msgTxs := make([]*MsgTx, 0, len(domainBlock.Transactions))
for _, domainTransaction := range domainBlock.Transactions {
msgTxs = append(msgTxs, DomainTransactionToMsgTx(domainTransaction))
}
return &MsgBlock{
Header: *DomainBlockHeaderToBlockHeader(domainBlock.Header),
Transactions: msgTxs,
}
}
// DomainBlockHeaderToBlockHeader converts an externalapi.BlockHeader to MsgBlockHeader
func DomainBlockHeaderToBlockHeader(domainBlockHeader externalapi.BlockHeader) *MsgBlockHeader {
return &MsgBlockHeader{
Version: domainBlockHeader.Version(),
ParentHashes: domainBlockHeader.ParentHashes(),
HashMerkleRoot: domainBlockHeader.HashMerkleRoot(),
AcceptedIDMerkleRoot: domainBlockHeader.AcceptedIDMerkleRoot(),
UTXOCommitment: domainBlockHeader.UTXOCommitment(),
Timestamp: mstime.UnixMilliseconds(domainBlockHeader.TimeInMilliseconds()),
Bits: domainBlockHeader.Bits(),
Nonce: domainBlockHeader.Nonce(),
}
}
// MsgBlockToDomainBlock converts a MsgBlock to externalapi.DomainBlock
func MsgBlockToDomainBlock(msgBlock *MsgBlock) *externalapi.DomainBlock {
transactions := make([]*externalapi.DomainTransaction, 0, len(msgBlock.Transactions))
for _, msgTx := range msgBlock.Transactions {
transactions = append(transactions, MsgTxToDomainTransaction(msgTx))
}
return &externalapi.DomainBlock{
Header: BlockHeaderToDomainBlockHeader(&msgBlock.Header),
Transactions: transactions,
}
}
// BlockHeaderToDomainBlockHeader converts a MsgBlockHeader to externalapi.BlockHeader
func BlockHeaderToDomainBlockHeader(blockHeader *MsgBlockHeader) externalapi.BlockHeader {
return blockheader.NewImmutableBlockHeader(
blockHeader.Version,
blockHeader.ParentHashes,
blockHeader.HashMerkleRoot,
blockHeader.AcceptedIDMerkleRoot,
blockHeader.UTXOCommitment,
blockHeader.Timestamp.UnixMilliseconds(),
blockHeader.Bits,
blockHeader.Nonce,
)
}
// DomainTransactionToMsgTx converts an externalapi.DomainTransaction into an MsgTx
func DomainTransactionToMsgTx(domainTransaction *externalapi.DomainTransaction) *MsgTx {
txIns := make([]*TxIn, 0, len(domainTransaction.Inputs))
for _, input := range domainTransaction.Inputs {
txIns = append(txIns, domainTransactionInputToTxIn(input))
}
txOuts := make([]*TxOut, 0, len(domainTransaction.Outputs))
for _, output := range domainTransaction.Outputs {
txOuts = append(txOuts, domainTransactionOutputToTxOut(output))
}
return &MsgTx{
Version: domainTransaction.Version,
TxIn: txIns,
TxOut: txOuts,
LockTime: domainTransaction.LockTime,
SubnetworkID: domainTransaction.SubnetworkID,
Gas: domainTransaction.Gas,
PayloadHash: domainTransaction.PayloadHash,
Payload: domainTransaction.Payload,
}
}
func domainTransactionOutputToTxOut(domainTransactionOutput *externalapi.DomainTransactionOutput) *TxOut {
return &TxOut{
Value: domainTransactionOutput.Value,
ScriptPubKey: domainTransactionOutput.ScriptPublicKey,
}
}
func domainTransactionInputToTxIn(domainTransactionInput *externalapi.DomainTransactionInput) *TxIn {
return &TxIn{
PreviousOutpoint: *domainOutpointToOutpoint(domainTransactionInput.PreviousOutpoint),
SignatureScript: domainTransactionInput.SignatureScript,
Sequence: domainTransactionInput.Sequence,
}
}
func domainOutpointToOutpoint(domainOutpoint externalapi.DomainOutpoint) *Outpoint {
return NewOutpoint(
&domainOutpoint.TransactionID,
domainOutpoint.Index)
}
// MsgTxToDomainTransaction converts an MsgTx into externalapi.DomainTransaction
func MsgTxToDomainTransaction(msgTx *MsgTx) *externalapi.DomainTransaction {
transactionInputs := make([]*externalapi.DomainTransactionInput, 0, len(msgTx.TxIn))
for _, txIn := range msgTx.TxIn {
transactionInputs = append(transactionInputs, txInToDomainTransactionInput(txIn))
}
transactionOutputs := make([]*externalapi.DomainTransactionOutput, 0, len(msgTx.TxOut))
for _, txOut := range msgTx.TxOut {
transactionOutputs = append(transactionOutputs, txOutToDomainTransactionOutput(txOut))
}
payload := make([]byte, 0)
if msgTx.Payload != nil {
payload = msgTx.Payload
}
return &externalapi.DomainTransaction{
Version: msgTx.Version,
Inputs: transactionInputs,
Outputs: transactionOutputs,
LockTime: msgTx.LockTime,
SubnetworkID: msgTx.SubnetworkID,
Gas: msgTx.Gas,
PayloadHash: msgTx.PayloadHash,
Payload: payload,
}
}
func txOutToDomainTransactionOutput(txOut *TxOut) *externalapi.DomainTransactionOutput {
return &externalapi.DomainTransactionOutput{
Value: txOut.Value,
ScriptPublicKey: txOut.ScriptPubKey,
}
}
func txInToDomainTransactionInput(txIn *TxIn) *externalapi.DomainTransactionInput {
return &externalapi.DomainTransactionInput{
PreviousOutpoint: *outpointToDomainOutpoint(&txIn.PreviousOutpoint), //TODO
SignatureScript: txIn.SignatureScript,
Sequence: txIn.Sequence,
}
}
func outpointToDomainOutpoint(outpoint *Outpoint) *externalapi.DomainOutpoint {
return &externalapi.DomainOutpoint{
TransactionID: outpoint.TxID,
Index: outpoint.Index,
}
}
// RPCTransactionToDomainTransaction converts RPCTransactions to DomainTransactions
func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externalapi.DomainTransaction, error) {
inputs := make([]*externalapi.DomainTransactionInput, len(rpcTransaction.Inputs))
for i, input := range rpcTransaction.Inputs {
transactionIDBytes, err := hex.DecodeString(input.PreviousOutpoint.TransactionID)
if err != nil {
return nil, err
}
transactionID, err := transactionid.FromBytes(transactionIDBytes)
if err != nil {
return nil, err
}
previousOutpoint := &externalapi.DomainOutpoint{
TransactionID: *transactionID,
Index: input.PreviousOutpoint.Index,
}
signatureScript, err := hex.DecodeString(input.SignatureScript)
if err != nil {
return nil, err
}
inputs[i] = &externalapi.DomainTransactionInput{
PreviousOutpoint: *previousOutpoint,
SignatureScript: signatureScript,
Sequence: input.Sequence,
}
}
outputs := make([]*externalapi.DomainTransactionOutput, len(rpcTransaction.Outputs))
for i, output := range rpcTransaction.Outputs {
scriptPublicKey, err := hex.DecodeString(output.ScriptPublicKey.Script)
if err != nil {
return nil, err
}
outputs[i] = &externalapi.DomainTransactionOutput{
Value: output.Amount,
ScriptPublicKey: &externalapi.ScriptPublicKey{Script: scriptPublicKey, Version: output.ScriptPublicKey.Version},
}
}
subnetworkIDBytes, err := hex.DecodeString(rpcTransaction.SubnetworkID)
if err != nil {
return nil, err
}
subnetworkID, err := subnetworks.FromBytes(subnetworkIDBytes)
if err != nil {
return nil, err
}
payloadHashBytes, err := hex.DecodeString(rpcTransaction.PayloadHash)
if err != nil {
return nil, err
}
payloadHash, err := externalapi.NewDomainHashFromByteSlice(payloadHashBytes)
if err != nil {
return nil, err
}
payload, err := hex.DecodeString(rpcTransaction.Payload)
if err != nil {
return nil, err
}
return &externalapi.DomainTransaction{
Version: rpcTransaction.Version,
Inputs: inputs,
Outputs: outputs,
LockTime: rpcTransaction.LockTime,
SubnetworkID: *subnetworkID,
Gas: rpcTransaction.LockTime,
PayloadHash: *payloadHash,
Payload: payload,
}, nil
}
// DomainTransactionToRPCTransaction converts DomainTransactions to RPCTransactions
func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransaction) *RPCTransaction {
inputs := make([]*RPCTransactionInput, len(transaction.Inputs))
for i, input := range transaction.Inputs {
transactionID := input.PreviousOutpoint.TransactionID.String()
previousOutpoint := &RPCOutpoint{
TransactionID: transactionID,
Index: input.PreviousOutpoint.Index,
}
signatureScript := hex.EncodeToString(input.SignatureScript)
inputs[i] = &RPCTransactionInput{
PreviousOutpoint: previousOutpoint,
SignatureScript: signatureScript,
Sequence: input.Sequence,
}
}
outputs := make([]*RPCTransactionOutput, len(transaction.Outputs))
for i, output := range transaction.Outputs {
scriptPublicKey := hex.EncodeToString(output.ScriptPublicKey.Script)
outputs[i] = &RPCTransactionOutput{
Amount: output.Value,
ScriptPublicKey: &RPCScriptPublicKey{Script: scriptPublicKey, Version: output.ScriptPublicKey.Version},
}
}
subnetworkID := hex.EncodeToString(transaction.SubnetworkID[:])
payloadHash := transaction.PayloadHash.String()
payload := hex.EncodeToString(transaction.Payload)
return &RPCTransaction{
Version: transaction.Version,
Inputs: inputs,
Outputs: outputs,
LockTime: transaction.LockTime,
SubnetworkID: subnetworkID,
Gas: transaction.LockTime,
PayloadHash: payloadHash,
Payload: payload,
}
}

View File

@@ -34,24 +34,34 @@ const (
CmdVerAck
CmdRequestAddresses
CmdAddresses
CmdRequestIBDBlocks
CmdRequestHeaders
CmdBlock
CmdTx
CmdPing
CmdPong
CmdRequestBlockLocator
CmdBlockLocator
CmdSelectedTip
CmdRequestSelectedTip
CmdInvRelayBlock
CmdRequestRelayBlocks
CmdInvTransaction
CmdRequestTransactions
CmdIBDBlock
CmdRequestNextIBDBlocks
CmdDoneIBDBlocks
CmdDoneHeaders
CmdTransactionNotFound
CmdReject
CmdHeader
CmdRequestNextHeaders
CmdRequestIBDRootUTXOSetAndBlock
CmdIBDRootUTXOSetChunk
CmdRequestIBDBlocks
CmdIBDRootNotFound
CmdRequestIBDRootHash
CmdIBDRootHash
CmdIBDBlockLocator
CmdIBDBlockLocatorHighestHash
CmdBlockHeaders
CmdRequestNextIBDRootUTXOSetChunk
CmdDoneIBDRootUTXOSetChunks
// rpc
CmdGetCurrentNetworkRequestMessage
@@ -76,15 +86,15 @@ const (
CmdAddPeerResponseMessage
CmdSubmitTransactionRequestMessage
CmdSubmitTransactionResponseMessage
CmdNotifyChainChangedRequestMessage
CmdNotifyChainChangedResponseMessage
CmdChainChangedNotificationMessage
CmdNotifyVirtualSelectedParentChainChangedRequestMessage
CmdNotifyVirtualSelectedParentChainChangedResponseMessage
CmdVirtualSelectedParentChainChangedNotificationMessage
CmdGetBlockRequestMessage
CmdGetBlockResponseMessage
CmdGetSubnetworkRequestMessage
CmdGetSubnetworkResponseMessage
CmdGetChainFromBlockRequestMessage
CmdGetChainFromBlockResponseMessage
CmdGetVirtualSelectedParentChainFromBlockRequestMessage
CmdGetVirtualSelectedParentChainFromBlockResponseMessage
CmdGetBlocksRequestMessage
CmdGetBlocksResponseMessage
CmdGetBlockCountRequestMessage
@@ -101,81 +111,115 @@ const (
CmdGetMempoolEntriesResponseMessage
CmdShutDownRequestMessage
CmdShutDownResponseMessage
CmdGetHeadersRequestMessage
CmdGetHeadersResponseMessage
CmdNotifyUTXOsChangedRequestMessage
CmdNotifyUTXOsChangedResponseMessage
CmdUTXOsChangedNotificationMessage
CmdGetUTXOsByAddressesRequestMessage
CmdGetUTXOsByAddressesResponseMessage
CmdGetVirtualSelectedParentBlueScoreRequestMessage
CmdGetVirtualSelectedParentBlueScoreResponseMessage
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage
CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage
CmdVirtualSelectedParentBlueScoreChangedNotificationMessage
)
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
var ProtocolMessageCommandToString = map[MessageCommand]string{
CmdVersion: "Version",
CmdVerAck: "VerAck",
CmdRequestAddresses: "RequestAddresses",
CmdAddresses: "Addresses",
CmdRequestIBDBlocks: "RequestBlocks",
CmdBlock: "Block",
CmdTx: "Tx",
CmdPing: "Ping",
CmdPong: "Pong",
CmdRequestBlockLocator: "RequestBlockLocator",
CmdBlockLocator: "BlockLocator",
CmdSelectedTip: "SelectedTip",
CmdRequestSelectedTip: "RequestSelectedTip",
CmdInvRelayBlock: "InvRelayBlock",
CmdRequestRelayBlocks: "RequestRelayBlocks",
CmdInvTransaction: "InvTransaction",
CmdRequestTransactions: "RequestTransactions",
CmdIBDBlock: "IBDBlock",
CmdRequestNextIBDBlocks: "RequestNextIBDBlocks",
CmdDoneIBDBlocks: "DoneIBDBlocks",
CmdTransactionNotFound: "TransactionNotFound",
CmdReject: "Reject",
CmdVersion: "Version",
CmdVerAck: "VerAck",
CmdRequestAddresses: "RequestAddresses",
CmdAddresses: "Addresses",
CmdRequestHeaders: "RequestHeaders",
CmdBlock: "Block",
CmdTx: "Tx",
CmdPing: "Ping",
CmdPong: "Pong",
CmdRequestBlockLocator: "RequestBlockLocator",
CmdBlockLocator: "BlockLocator",
CmdInvRelayBlock: "InvRelayBlock",
CmdRequestRelayBlocks: "RequestRelayBlocks",
CmdInvTransaction: "InvTransaction",
CmdRequestTransactions: "RequestTransactions",
CmdIBDBlock: "IBDBlock",
CmdDoneHeaders: "DoneHeaders",
CmdTransactionNotFound: "TransactionNotFound",
CmdReject: "Reject",
CmdHeader: "Header",
CmdRequestNextHeaders: "RequestNextHeaders",
CmdRequestIBDRootUTXOSetAndBlock: "RequestPruningUTXOSetAndBlock",
CmdIBDRootUTXOSetChunk: "IBDRootUTXOSetChunk",
CmdRequestIBDBlocks: "RequestIBDBlocks",
CmdIBDRootNotFound: "IBDRootNotFound",
CmdRequestIBDRootHash: "IBDRequestIBDRootHash",
CmdIBDRootHash: "IBDIBDRootHash",
CmdIBDBlockLocator: "IBDBlockLocator",
CmdIBDBlockLocatorHighestHash: "IBDBlockLocatorHighestHash",
CmdBlockHeaders: "BlockHeaders",
CmdRequestNextIBDRootUTXOSetChunk: "RequestNextIBDRootUTXOSetChunk",
CmdDoneIBDRootUTXOSetChunks: "DoneIBDRootUTXOSetChunks",
}
// RPCMessageCommandToString maps all MessageCommands to their string representation
var RPCMessageCommandToString = map[MessageCommand]string{
CmdGetCurrentNetworkRequestMessage: "GetCurrentNetworkRequest",
CmdGetCurrentNetworkResponseMessage: "GetCurrentNetworkResponse",
CmdSubmitBlockRequestMessage: "SubmitBlockRequest",
CmdSubmitBlockResponseMessage: "SubmitBlockResponse",
CmdGetBlockTemplateRequestMessage: "GetBlockTemplateRequest",
CmdGetBlockTemplateResponseMessage: "GetBlockTemplateResponse",
CmdGetBlockTemplateTransactionMessage: "CmdGetBlockTemplateTransaction",
CmdNotifyBlockAddedRequestMessage: "NotifyBlockAddedRequest",
CmdNotifyBlockAddedResponseMessage: "NotifyBlockAddedResponse",
CmdBlockAddedNotificationMessage: "BlockAddedNotification",
CmdGetPeerAddressesRequestMessage: "GetPeerAddressesRequest",
CmdGetPeerAddressesResponseMessage: "GetPeerAddressesResponse",
CmdGetSelectedTipHashRequestMessage: "GetSelectedTipHashRequest",
CmdGetSelectedTipHashResponseMessage: "GetSelectedTipHashResponse",
CmdGetMempoolEntryRequestMessage: "GetMempoolEntryRequest",
CmdGetMempoolEntryResponseMessage: "GetMempoolEntryResponse",
CmdGetConnectedPeerInfoRequestMessage: "GetConnectedPeerInfoRequest",
CmdGetConnectedPeerInfoResponseMessage: "GetConnectedPeerInfoResponse",
CmdAddPeerRequestMessage: "AddPeerRequest",
CmdAddPeerResponseMessage: "AddPeerResponse",
CmdSubmitTransactionRequestMessage: "SubmitTransactionRequest",
CmdSubmitTransactionResponseMessage: "SubmitTransactionResponse",
CmdNotifyChainChangedRequestMessage: "NotifyChainChangedRequest",
CmdNotifyChainChangedResponseMessage: "NotifyChainChangedResponse",
CmdChainChangedNotificationMessage: "ChainChangedNotification",
CmdGetBlockRequestMessage: "GetBlockRequest",
CmdGetBlockResponseMessage: "GetBlockResponse",
CmdGetSubnetworkRequestMessage: "GetSubnetworkRequest",
CmdGetSubnetworkResponseMessage: "GetSubnetworkResponse",
CmdGetChainFromBlockRequestMessage: "GetChainFromBlockRequest",
CmdGetChainFromBlockResponseMessage: "GetChainFromBlockResponse",
CmdGetBlocksRequestMessage: "GetBlocksRequest",
CmdGetBlocksResponseMessage: "GetBlocksResponse",
CmdGetBlockCountRequestMessage: "GetBlockCountRequest",
CmdGetBlockCountResponseMessage: "GetBlockCountResponse",
CmdGetBlockDAGInfoRequestMessage: "GetBlockDAGInfoRequest",
CmdGetBlockDAGInfoResponseMessage: "GetBlockDAGInfoResponse",
CmdResolveFinalityConflictRequestMessage: "ResolveFinalityConflictRequest",
CmdResolveFinalityConflictResponseMessage: "ResolveFinalityConflictResponse",
CmdNotifyFinalityConflictsRequestMessage: "NotifyFinalityConflictsRequest",
CmdNotifyFinalityConflictsResponseMessage: "NotifyFinalityConflictsResponse",
CmdFinalityConflictNotificationMessage: "FinalityConflictNotification",
CmdFinalityConflictResolvedNotificationMessage: "FinalityConflictResolvedNotification",
CmdGetMempoolEntriesRequestMessage: "GetMempoolEntriesRequestMessage",
CmdGetMempoolEntriesResponseMessage: "GetMempoolEntriesResponseMessage",
CmdGetCurrentNetworkRequestMessage: "GetCurrentNetworkRequest",
CmdGetCurrentNetworkResponseMessage: "GetCurrentNetworkResponse",
CmdSubmitBlockRequestMessage: "SubmitBlockRequest",
CmdSubmitBlockResponseMessage: "SubmitBlockResponse",
CmdGetBlockTemplateRequestMessage: "GetBlockTemplateRequest",
CmdGetBlockTemplateResponseMessage: "GetBlockTemplateResponse",
CmdGetBlockTemplateTransactionMessage: "CmdGetBlockTemplateTransaction",
CmdNotifyBlockAddedRequestMessage: "NotifyBlockAddedRequest",
CmdNotifyBlockAddedResponseMessage: "NotifyBlockAddedResponse",
CmdBlockAddedNotificationMessage: "BlockAddedNotification",
CmdGetPeerAddressesRequestMessage: "GetPeerAddressesRequest",
CmdGetPeerAddressesResponseMessage: "GetPeerAddressesResponse",
CmdGetSelectedTipHashRequestMessage: "GetSelectedTipHashRequest",
CmdGetSelectedTipHashResponseMessage: "GetSelectedTipHashResponse",
CmdGetMempoolEntryRequestMessage: "GetMempoolEntryRequest",
CmdGetMempoolEntryResponseMessage: "GetMempoolEntryResponse",
CmdGetConnectedPeerInfoRequestMessage: "GetConnectedPeerInfoRequest",
CmdGetConnectedPeerInfoResponseMessage: "GetConnectedPeerInfoResponse",
CmdAddPeerRequestMessage: "AddPeerRequest",
CmdAddPeerResponseMessage: "AddPeerResponse",
CmdSubmitTransactionRequestMessage: "SubmitTransactionRequest",
CmdSubmitTransactionResponseMessage: "SubmitTransactionResponse",
CmdNotifyVirtualSelectedParentChainChangedRequestMessage: "NotifyVirtualSelectedParentChainChangedRequest",
CmdNotifyVirtualSelectedParentChainChangedResponseMessage: "NotifyVirtualSelectedParentChainChangedResponse",
CmdVirtualSelectedParentChainChangedNotificationMessage: "VirtualSelectedParentChainChangedNotification",
CmdGetBlockRequestMessage: "GetBlockRequest",
CmdGetBlockResponseMessage: "GetBlockResponse",
CmdGetSubnetworkRequestMessage: "GetSubnetworkRequest",
CmdGetSubnetworkResponseMessage: "GetSubnetworkResponse",
CmdGetVirtualSelectedParentChainFromBlockRequestMessage: "GetVirtualSelectedParentChainFromBlockRequest",
CmdGetVirtualSelectedParentChainFromBlockResponseMessage: "GetVirtualSelectedParentChainFromBlockResponse",
CmdGetBlocksRequestMessage: "GetBlocksRequest",
CmdGetBlocksResponseMessage: "GetBlocksResponse",
CmdGetBlockCountRequestMessage: "GetBlockCountRequest",
CmdGetBlockCountResponseMessage: "GetBlockCountResponse",
CmdGetBlockDAGInfoRequestMessage: "GetBlockDAGInfoRequest",
CmdGetBlockDAGInfoResponseMessage: "GetBlockDAGInfoResponse",
CmdResolveFinalityConflictRequestMessage: "ResolveFinalityConflictRequest",
CmdResolveFinalityConflictResponseMessage: "ResolveFinalityConflictResponse",
CmdNotifyFinalityConflictsRequestMessage: "NotifyFinalityConflictsRequest",
CmdNotifyFinalityConflictsResponseMessage: "NotifyFinalityConflictsResponse",
CmdFinalityConflictNotificationMessage: "FinalityConflictNotification",
CmdFinalityConflictResolvedNotificationMessage: "FinalityConflictResolvedNotification",
CmdGetMempoolEntriesRequestMessage: "GetMempoolEntriesRequest",
CmdGetMempoolEntriesResponseMessage: "GetMempoolEntriesResponse",
CmdGetHeadersRequestMessage: "GetHeadersRequest",
CmdGetHeadersResponseMessage: "GetHeadersResponse",
CmdNotifyUTXOsChangedRequestMessage: "NotifyUTXOsChangedRequest",
CmdNotifyUTXOsChangedResponseMessage: "NotifyUTXOsChangedResponse",
CmdUTXOsChangedNotificationMessage: "UTXOsChangedNotification",
CmdGetUTXOsByAddressesRequestMessage: "GetUTXOsByAddressesRequest",
CmdGetUTXOsByAddressesResponseMessage: "GetUTXOsByAddressesResponse",
CmdGetVirtualSelectedParentBlueScoreRequestMessage: "GetVirtualSelectedParentBlueScoreRequest",
CmdGetVirtualSelectedParentBlueScoreResponseMessage: "GetVirtualSelectedParentBlueScoreResponse",
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: "NotifyVirtualSelectedParentBlueScoreChangedRequest",
CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage: "NotifyVirtualSelectedParentBlueScoreChangedResponse",
CmdVirtualSelectedParentBlueScoreChangedNotificationMessage: "VirtualSelectedParentBlueScoreChangedNotification",
}
// Message is an interface that describes a kaspa message. A type that

View File

@@ -1,195 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"fmt"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"io"
"math"
)
// BaseBlockHeaderPayload is the base number of bytes a block header can be,
// not including the list of parent block headers.
// Version 4 bytes + Timestamp 8 bytes + Bits 4 bytes + Nonce 8 bytes +
// + NumParentBlocks 1 byte + HashMerkleRoot hash +
// + AcceptedIDMerkleRoot hash + UTXOCommitment hash.
// To get total size of block header len(ParentHashes) * daghash.HashSize should be
// added to this value
const BaseBlockHeaderPayload = 25 + 3*(daghash.HashSize)
// MaxNumParentBlocks is the maximum number of parent blocks a block can reference.
// Currently set to 255 as the maximum number NumParentBlocks can be due to it being a byte
const MaxNumParentBlocks = 255
// MaxBlockHeaderPayload is the maximum number of bytes a block header can be.
// BaseBlockHeaderPayload + up to MaxNumParentBlocks hashes of parent blocks
const MaxBlockHeaderPayload = BaseBlockHeaderPayload + (MaxNumParentBlocks * daghash.HashSize)
// BlockHeader defines information about a block and is used in the kaspa
// block (MsgBlock) and headers (MsgHeader) messages.
type BlockHeader struct {
// Version of the block. This is not the same as the protocol version.
Version int32
// Hashes of the parent block headers in the blockDAG.
ParentHashes []*daghash.Hash
// HashMerkleRoot is the merkle tree reference to hash of all transactions for the block.
HashMerkleRoot *daghash.Hash
// AcceptedIDMerkleRoot is merkle tree reference to hash all transactions
// accepted form the block.Blues
AcceptedIDMerkleRoot *daghash.Hash
// UTXOCommitment is an ECMH UTXO commitment to the block UTXO.
UTXOCommitment *daghash.Hash
// Time the block was created.
Timestamp mstime.Time
// Difficulty target for the block.
Bits uint32
// Nonce used to generate the block.
Nonce uint64
}
// NumParentBlocks return the number of entries in ParentHashes
func (h *BlockHeader) NumParentBlocks() byte {
numParents := len(h.ParentHashes)
if numParents > math.MaxUint8 {
panic(errors.Errorf("number of parents is %d, which is more than one byte can fit", numParents))
}
return byte(numParents)
}
// BlockHash computes the block identifier hash for the given block header.
func (h *BlockHeader) BlockHash() *daghash.Hash {
// Encode the header and double sha256 everything prior to the number of
// transactions.
writer := daghash.NewDoubleHashWriter()
err := writeBlockHeader(writer, 0, h)
if err != nil {
// It seems like this could only happen if the writer returned an error.
// and this writer should never return an error (no allocations or possible failures)
// the only non-writer error path here is unknown types in `WriteElement`
panic(fmt.Sprintf("BlockHash() failed. this should never fail unless BlockHeader was changed. err: %+v", err))
}
res := writer.Finalize()
return &res
}
// IsGenesis returns true iff this block is a genesis block
func (h *BlockHeader) IsGenesis() bool {
return h.NumParentBlocks() == 0
}
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding block headers stored to disk, such as in a
// database, as opposed to decoding block headers from the appmessage.
func (h *BlockHeader) KaspaDecode(r io.Reader, pver uint32) error {
return readBlockHeader(r, pver, h)
}
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding block headers to be stored to disk, such as in a
// database, as opposed to encoding block headers for the appmessage.
func (h *BlockHeader) KaspaEncode(w io.Writer, pver uint32) error {
return writeBlockHeader(w, pver, h)
}
// Deserialize decodes a block header from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field.
func (h *BlockHeader) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of readBlockHeader.
return readBlockHeader(r, 0, h)
}
// Serialize encodes a block header from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field.
func (h *BlockHeader) Serialize(w io.Writer) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of writeBlockHeader.
return writeBlockHeader(w, 0, h)
}
// SerializeSize returns the number of bytes it would take to serialize the
// block header.
func (h *BlockHeader) SerializeSize() int {
return BaseBlockHeaderPayload + int(h.NumParentBlocks())*daghash.HashSize
}
// NewBlockHeader returns a new BlockHeader using the provided version, previous
// block hash, hash merkle root, accepted ID merkle root, difficulty bits, and nonce used to generate the
// block with defaults or calclulated values for the remaining fields.
func NewBlockHeader(version int32, parentHashes []*daghash.Hash, hashMerkleRoot *daghash.Hash,
acceptedIDMerkleRoot *daghash.Hash, utxoCommitment *daghash.Hash, bits uint32, nonce uint64) *BlockHeader {
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
return &BlockHeader{
Version: version,
ParentHashes: parentHashes,
HashMerkleRoot: hashMerkleRoot,
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
UTXOCommitment: utxoCommitment,
Timestamp: mstime.Now(),
Bits: bits,
Nonce: nonce,
}
}
// readBlockHeader reads a kaspa block header from r. See Deserialize for
// decoding block headers stored to disk, such as in a database, as opposed to
// decoding from the appmessage.
func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
var numParentBlocks byte
err := readElements(r, &bh.Version, &numParentBlocks)
if err != nil {
return err
}
bh.ParentHashes = make([]*daghash.Hash, numParentBlocks)
for i := byte(0); i < numParentBlocks; i++ {
hash := &daghash.Hash{}
err := ReadElement(r, hash)
if err != nil {
return err
}
bh.ParentHashes[i] = hash
}
bh.HashMerkleRoot = &daghash.Hash{}
bh.AcceptedIDMerkleRoot = &daghash.Hash{}
bh.UTXOCommitment = &daghash.Hash{}
return readElements(r, bh.HashMerkleRoot, bh.AcceptedIDMerkleRoot, bh.UTXOCommitment,
(*int64Time)(&bh.Timestamp), &bh.Bits, &bh.Nonce)
}
// writeBlockHeader writes a kaspa block header to w. See Serialize for
// encoding block headers to be stored to disk, such as in a database, as
// opposed to encoding for the appmessage.
func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error {
timestamp := bh.Timestamp.UnixMilliseconds()
if err := writeElements(w, bh.Version, bh.NumParentBlocks()); err != nil {
return err
}
for _, hash := range bh.ParentHashes {
if err := WriteElement(w, hash); err != nil {
return err
}
}
return writeElements(w, bh.HashMerkleRoot, bh.AcceptedIDMerkleRoot, bh.UTXOCommitment, timestamp, bh.Bits, bh.Nonce)
}

View File

@@ -1,345 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"bytes"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/random"
"reflect"
"testing"
)
// TestBlockHeader tests the BlockHeader API.
func TestBlockHeader(t *testing.T) {
nonce, err := random.Uint64()
if err != nil {
t.Errorf("random.Uint64: Error generating nonce: %v", err)
}
hashes := []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash}
merkleHash := mainnetGenesisMerkleRoot
acceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot
bits := uint32(0x1d00ffff)
bh := NewBlockHeader(1, hashes, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce)
// Ensure we get the same data back out.
if !reflect.DeepEqual(bh.ParentHashes, hashes) {
t.Errorf("NewBlockHeader: wrong prev hashes - got %v, want %v",
spew.Sprint(bh.ParentHashes), spew.Sprint(hashes))
}
if !bh.HashMerkleRoot.IsEqual(merkleHash) {
t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v",
spew.Sprint(bh.HashMerkleRoot), spew.Sprint(merkleHash))
}
if bh.Bits != bits {
t.Errorf("NewBlockHeader: wrong bits - got %v, want %v",
bh.Bits, bits)
}
if bh.Nonce != nonce {
t.Errorf("NewBlockHeader: wrong nonce - got %v, want %v",
bh.Nonce, nonce)
}
}
// TestBlockHeaderEncoding tests the BlockHeader appmessage encode and decode for various
// protocol versions.
func TestBlockHeaderEncoding(t *testing.T) {
nonce := uint64(123123) // 0x000000000001e0f3
pver := ProtocolVersion
// baseBlockHdr is used in the various tests as a baseline BlockHeader.
bits := uint32(0x1d00ffff)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment,
Timestamp: mstime.UnixMilliseconds(0x17315ed0f99),
Bits: bits,
Nonce: nonce,
}
// baseBlockHdrEncoded is the appmessage encoded bytes of baseBlockHdr.
baseBlockHdrEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumParentBlocks
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25, // mainnetGenesisHash
0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,
0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1,
0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simnetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
0x4a, 0x5e, 0x1e, 0x4b, 0xaa, 0xb8, 0x9f, 0x3a, // HashMerkleRoot
0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f,
0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a,
0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // AcceptedIDMerkleRoot
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // UTXOCommitment
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
}
tests := []struct {
in *BlockHeader // Data to encode
out *BlockHeader // Expected decoded data
buf []byte // Encoded data
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
{
baseBlockHdr,
baseBlockHdr,
baseBlockHdrEncoded,
ProtocolVersion,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
var buf bytes.Buffer
err := writeBlockHeader(&buf, test.pver, test.in)
if err != nil {
t.Errorf("writeBlockHeader #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("writeBlockHeader #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
buf.Reset()
err = test.in.KaspaEncode(&buf, pver)
if err != nil {
t.Errorf("KaspaEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("KaspaEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the block header from appmessage format.
var bh BlockHeader
rbuf := bytes.NewReader(test.buf)
err = readBlockHeader(rbuf, test.pver, &bh)
if err != nil {
t.Errorf("readBlockHeader #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&bh, test.out) {
t.Errorf("readBlockHeader #%d\n got: %s want: %s", i,
spew.Sdump(&bh), spew.Sdump(test.out))
continue
}
rbuf = bytes.NewReader(test.buf)
err = bh.KaspaDecode(rbuf, pver)
if err != nil {
t.Errorf("KaspaDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&bh, test.out) {
t.Errorf("KaspaDecode #%d\n got: %s want: %s", i,
spew.Sdump(&bh), spew.Sdump(test.out))
continue
}
}
}
// TestBlockHeaderSerialize tests BlockHeader serialize and deserialize.
func TestBlockHeaderSerialize(t *testing.T) {
nonce := uint64(123123) // 0x01e0f3
// baseBlockHdr is used in the various tests as a baseline BlockHeader.
bits := uint32(0x1d00ffff)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment,
Timestamp: mstime.UnixMilliseconds(0x17315ed0f99),
Bits: bits,
Nonce: nonce,
}
// baseBlockHdrEncoded is the appmessage encoded bytes of baseBlockHdr.
baseBlockHdrEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumParentBlocks
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25, // mainnetGenesisHash
0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,
0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1,
0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simnetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
0x4a, 0x5e, 0x1e, 0x4b, 0xaa, 0xb8, 0x9f, 0x3a, // HashMerkleRoot
0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f,
0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a,
0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // AcceptedIDMerkleRoot
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // UTXOCommitment
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
}
tests := []struct {
in *BlockHeader // Data to encode
out *BlockHeader // Expected decoded data
buf []byte // Serialized data
}{
{
baseBlockHdr,
baseBlockHdr,
baseBlockHdrEncoded,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the block header.
var buf bytes.Buffer
err := test.in.Serialize(&buf)
if err != nil {
t.Errorf("Serialize #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("Serialize #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Deserialize the block header.
var bh BlockHeader
rbuf := bytes.NewReader(test.buf)
err = bh.Deserialize(rbuf)
if err != nil {
t.Errorf("Deserialize #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&bh, test.out) {
t.Errorf("Deserialize #%d\n got: %s want: %s", i,
spew.Sdump(&bh), spew.Sdump(test.out))
continue
}
}
}
// TestBlockHeaderSerializeSize performs tests to ensure the serialize size for
// various block headers is accurate.
func TestBlockHeaderSerializeSize(t *testing.T) {
nonce := uint64(123123) // 0x1e0f3
bits := uint32(0x1d00ffff)
timestamp := mstime.UnixMilliseconds(0x495fab29000)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
genesisBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
tests := []struct {
in *BlockHeader // Block header to encode
size int // Expected serialized size
}{
// Block with no transactions.
{genesisBlockHdr, 121},
// First block in the mainnet block DAG.
{baseBlockHdr, 185},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
serializedSize := test.in.SerializeSize()
if serializedSize != test.size {
t.Errorf("BlockHeader.SerializeSize: #%d got: %d, want: "+
"%d", i, serializedSize, test.size)
continue
}
}
}
func TestIsGenesis(t *testing.T) {
nonce := uint64(123123) // 0x1e0f3
bits := uint32(0x1d00ffff)
timestamp := mstime.UnixMilliseconds(0x495fab29000)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
genesisBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
tests := []struct {
in *BlockHeader // Block header to encode
isGenesis bool // Expected result for call of .IsGenesis
}{
{genesisBlockHdr, true},
{baseBlockHdr, false},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
isGenesis := test.in.IsGenesis()
if isGenesis != test.isGenesis {
t.Errorf("BlockHeader.IsGenesis: #%d got: %t, want: %t",
i, isGenesis, test.isGenesis)
}
}
}

View File

@@ -0,0 +1,19 @@
package appmessage
// BlockHeadersMessage represents a kaspa BlockHeaders message
type BlockHeadersMessage struct {
baseMessage
BlockHeaders []*MsgBlockHeader
}
// Command returns the protocol command string for the message
func (msg *BlockHeadersMessage) Command() MessageCommand {
return CmdBlockHeaders
}
// NewBlockHeadersMessage returns a new kaspa BlockHeaders message
func NewBlockHeadersMessage(blockHeaders []*MsgBlockHeader) *BlockHeadersMessage {
return &BlockHeadersMessage{
BlockHeaders: blockHeaders,
}
}

View File

@@ -0,0 +1,22 @@
package appmessage
// MsgIBDRootNotFound implements the Message interface and represents a kaspa
// IBDRootNotFound message. It is used to notify the IBD root that was requested
// by other peer was not found.
//
// This message has no payload.
type MsgIBDRootNotFound struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgIBDRootNotFound) Command() MessageCommand {
return CmdIBDRootNotFound
}
// NewMsgIBDRootNotFound returns a new kaspa IBDRootNotFound message that conforms to the
// Message interface.
func NewMsgIBDRootNotFound() *MsgIBDRootNotFound {
return &MsgIBDRootNotFound{}
}

View File

@@ -4,58 +4,15 @@
package appmessage
import (
"fmt"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// MaxAddressesPerMsg is the maximum number of addresses that can be in a single
// kaspa Addresses message (MsgAddresses).
const MaxAddressesPerMsg = 1000
// MsgAddresses implements the Message interface and represents a kaspa
// Addresses message. It is used to provide a list of known active peers on the
// network. An active peer is considered one that has transmitted a message
// within the last 3 hours. Nodes which have not transmitted in that time
// frame should be forgotten. Each message is limited to a maximum number of
// addresses, which is currently 1000. As a result, multiple messages must
// be used to relay the full list.
//
// Use the AddAddress function to build up the list of known addresses when
// sending an Addresses message to another peer.
// Addresses message.
type MsgAddresses struct {
baseMessage
IncludeAllSubnetworks bool
SubnetworkID *subnetworkid.SubnetworkID
AddrList []*NetAddress
}
// AddAddress adds a known active peer to the message.
func (msg *MsgAddresses) AddAddress(na *NetAddress) error {
if len(msg.AddrList)+1 > MaxAddressesPerMsg {
str := fmt.Sprintf("too many addresses in message [max %d]",
MaxAddressesPerMsg)
return messageError("MsgAddresses.AddAddress", str)
}
msg.AddrList = append(msg.AddrList, na)
return nil
}
// AddAddresses adds multiple known active peers to the message.
func (msg *MsgAddresses) AddAddresses(netAddrs ...*NetAddress) error {
for _, na := range netAddrs {
err := msg.AddAddress(na)
if err != nil {
return err
}
}
return nil
}
// ClearAddresses removes all addresses from the message.
func (msg *MsgAddresses) ClearAddresses() {
msg.AddrList = []*NetAddress{}
AddressList []*NetAddress
}
// Command returns the protocol command string for the message. This is part
@@ -66,10 +23,8 @@ func (msg *MsgAddresses) Command() MessageCommand {
// NewMsgAddresses returns a new kaspa Addresses message that conforms to the
// Message interface. See MsgAddresses for details.
func NewMsgAddresses(includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) *MsgAddresses {
func NewMsgAddresses(addressList []*NetAddress) *MsgAddresses {
return &MsgAddresses{
IncludeAllSubnetworks: includeAllSubnetworks,
SubnetworkID: subnetworkID,
AddrList: make([]*NetAddress, 0, MaxAddressesPerMsg),
AddressList: addressList,
}
}

View File

@@ -1,58 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"net"
"testing"
"github.com/davecgh/go-spew/spew"
)
// TestAddresses tests the MsgAddresses API.
func TestAddresses(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(3)
msg := NewMsgAddresses(false, nil)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgAddresses: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure NetAddresses are added properly.
tcpAddr := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
na := NewNetAddress(tcpAddr, SFNodeNetwork)
err := msg.AddAddress(na)
if err != nil {
t.Errorf("AddAddress: %v", err)
}
if msg.AddrList[0] != na {
t.Errorf("AddAddress: wrong address added - got %v, want %v",
spew.Sprint(msg.AddrList[0]), spew.Sprint(na))
}
// Ensure the address list is cleared properly.
msg.ClearAddresses()
if len(msg.AddrList) != 0 {
t.Errorf("ClearAddresses: address list is not empty - "+
"got %v [%v], want %v", len(msg.AddrList),
spew.Sprint(msg.AddrList[0]), 0)
}
// Ensure adding more than the max allowed addresses per message returns
// error.
for i := 0; i < MaxAddressesPerMsg+1; i++ {
err = msg.AddAddress(na)
}
if err == nil {
t.Errorf("AddAddress: expected error on too many addresses " +
"not received")
}
err = msg.AddAddresses(na)
if err == nil {
t.Errorf("AddAddresses: expected error on too many addresses " +
"not received")
}
}

View File

@@ -5,13 +5,7 @@
package appmessage
import (
"bytes"
"fmt"
"io"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// defaultTransactionAlloc is the default size used for the backing array
@@ -46,7 +40,7 @@ type TxLoc struct {
// response to a getdata message (MsgGetData) for a given block hash.
type MsgBlock struct {
baseMessage
Header BlockHeader
Header MsgBlockHeader
Transactions []*MsgTx
}
@@ -60,161 +54,6 @@ func (msg *MsgBlock) ClearTransactions() {
msg.Transactions = make([]*MsgTx, 0, defaultTransactionAlloc)
}
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding blocks stored to disk, such as in a database, as
// opposed to decoding blocks from the appmessage.
func (msg *MsgBlock) KaspaDecode(r io.Reader, pver uint32) error {
err := readBlockHeader(r, pver, &msg.Header)
if err != nil {
return err
}
txCount, err := ReadVarInt(r)
if err != nil {
return err
}
// Prevent more transactions than could possibly fit into a block.
// It would be possible to cause memory exhaustion and panics without
// a sane upper bound on this count.
if txCount > MaxTxPerBlock {
str := fmt.Sprintf("too many transactions to fit into a block "+
"[count %d, max %d]", txCount, MaxTxPerBlock)
return messageError("MsgBlock.KaspaDecode", str)
}
msg.Transactions = make([]*MsgTx, 0, txCount)
for i := uint64(0); i < txCount; i++ {
tx := MsgTx{}
err := tx.KaspaDecode(r, pver)
if err != nil {
return err
}
msg.Transactions = append(msg.Transactions, &tx)
}
return nil
}
// Deserialize decodes a block from r into the receiver using a format that is
// suitable for long-term storage such as a database while respecting the
// Version field in the block. This function differs from KaspaDecode in that
// KaspaDecode decodes from the kaspa appmessage protocol as it was sent across the
// network. The appmessage encoding can technically differ depending on the protocol
// version and doesn't even really need to match the format of a stored block at
// all. As of the time this comment was written, the encoded block is the same
// in both instances, but there is a distinct difference and separating the two
// allows the API to be flexible enough to deal with changes.
func (msg *MsgBlock) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaDecode.
return msg.KaspaDecode(r, 0)
}
// DeserializeTxLoc decodes r in the same manner Deserialize does, but it takes
// a byte buffer instead of a generic reader and returns a slice containing the
// start and length of each transaction within the raw data that is being
// deserialized.
func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) {
fullLen := r.Len()
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of existing appmessage protocol functions.
err := readBlockHeader(r, 0, &msg.Header)
if err != nil {
return nil, err
}
txCount, err := ReadVarInt(r)
if err != nil {
return nil, err
}
// Prevent more transactions than could possibly fit into a block.
// It would be possible to cause memory exhaustion and panics without
// a sane upper bound on this count.
if txCount > MaxTxPerBlock {
str := fmt.Sprintf("too many transactions to fit into a block "+
"[count %d, max %d]", txCount, MaxTxPerBlock)
return nil, messageError("MsgBlock.DeserializeTxLoc", str)
}
// Deserialize each transaction while keeping track of its location
// within the byte stream.
msg.Transactions = make([]*MsgTx, 0, txCount)
txLocs := make([]TxLoc, txCount)
for i := uint64(0); i < txCount; i++ {
txLocs[i].TxStart = fullLen - r.Len()
tx := MsgTx{}
err := tx.Deserialize(r)
if err != nil {
return nil, err
}
msg.Transactions = append(msg.Transactions, &tx)
txLocs[i].TxLen = (fullLen - r.Len()) - txLocs[i].TxStart
}
return txLocs, nil
}
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding blocks to be stored to disk, such as in a
// database, as opposed to encoding blocks for the appmessage.
func (msg *MsgBlock) KaspaEncode(w io.Writer, pver uint32) error {
err := writeBlockHeader(w, pver, &msg.Header)
if err != nil {
return err
}
err = WriteVarInt(w, uint64(len(msg.Transactions)))
if err != nil {
return err
}
for _, tx := range msg.Transactions {
err = tx.KaspaEncode(w, pver)
if err != nil {
return err
}
}
return nil
}
// Serialize encodes the block to w using a format that suitable for long-term
// storage such as a database while respecting the Version field in the block.
// This function differs from KaspaEncode in that KaspaEncode encodes the block to
// the kaspa appmessage protocol in order to be sent across the network. The appmessage
// encoding can technically differ depending on the protocol version and doesn't
// even really need to match the format of a stored block at all. As of the
// time this comment was written, the encoded block is the same in both
// instances, but there is a distinct difference and separating the two allows
// the API to be flexible enough to deal with changes.
func (msg *MsgBlock) Serialize(w io.Writer) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaEncode.
return msg.KaspaEncode(w, 0)
}
// SerializeSize returns the number of bytes it would take to serialize the
// block.
func (msg *MsgBlock) SerializeSize() int {
// Block header bytes + Serialized varint size for the number of
// transactions.
n := msg.Header.SerializeSize() + VarIntSerializeSize(uint64(len(msg.Transactions)))
for _, tx := range msg.Transactions {
n += tx.SerializeSize()
}
return n
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgBlock) Command() MessageCommand {
@@ -227,17 +66,12 @@ func (msg *MsgBlock) MaxPayloadLength(pver uint32) uint32 {
return MaxMessagePayload
}
// BlockHash computes the block identifier hash for this block.
func (msg *MsgBlock) BlockHash() *daghash.Hash {
return msg.Header.BlockHash()
}
// ConvertToPartial clears out all the payloads of the subnetworks that are
// incompatible with the given subnetwork ID.
// Note: this operation modifies the block in place.
func (msg *MsgBlock) ConvertToPartial(subnetworkID *subnetworkid.SubnetworkID) {
func (msg *MsgBlock) ConvertToPartial(subnetworkID *externalapi.DomainSubnetworkID) {
for _, tx := range msg.Transactions {
if !tx.SubnetworkID.IsEqual(subnetworkID) {
if !tx.SubnetworkID.Equal(subnetworkID) {
tx.Payload = []byte{}
}
}
@@ -245,7 +79,7 @@ func (msg *MsgBlock) ConvertToPartial(subnetworkID *subnetworkid.SubnetworkID) {
// NewMsgBlock returns a new kaspa block message that conforms to the
// Message interface. See MsgBlock for details.
func NewMsgBlock(blockHeader *BlockHeader) *MsgBlock {
func NewMsgBlock(blockHeader *MsgBlockHeader) *MsgBlock {
return &MsgBlock{
Header: *blockHeader,
Transactions: make([]*MsgTx, 0, defaultTransactionAlloc),

View File

@@ -5,17 +5,15 @@
package appmessage
import (
"bytes"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// TestBlock tests the MsgBlock API.
@@ -71,46 +69,31 @@ func TestBlock(t *testing.T) {
}
}
// TestBlockHash tests the ability to generate the hash of a block accurately.
func TestBlockHash(t *testing.T) {
// Block 1 hash.
hashStr := "55d71bd49a8233bc9f0edbcbd0ad5d3eaebffe1fc6a6443a1c1f310fd02c11a5"
wantHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
// Ensure the hash produced is expected.
blockHash := blockOne.BlockHash()
if !blockHash.IsEqual(wantHash) {
t.Errorf("BlockHash: wrong hash - got %v, want %v",
spew.Sprint(blockHash), spew.Sprint(wantHash))
}
}
func TestConvertToPartial(t *testing.T) {
localSubnetworkID := &externalapi.DomainSubnetworkID{0x12}
transactions := []struct {
subnetworkID *subnetworkid.SubnetworkID
subnetworkID *externalapi.DomainSubnetworkID
payload []byte
expectedPayloadLength int
}{
{
subnetworkID: subnetworkid.SubnetworkIDNative,
subnetworkID: &subnetworks.SubnetworkIDNative,
payload: []byte{},
expectedPayloadLength: 0,
},
{
subnetworkID: subnetworkid.SubnetworkIDRegistry,
subnetworkID: &subnetworks.SubnetworkIDRegistry,
payload: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08},
expectedPayloadLength: 0,
},
{
subnetworkID: &subnetworkid.SubnetworkID{123},
subnetworkID: localSubnetworkID,
payload: []byte{0x01},
expectedPayloadLength: 1,
},
{
subnetworkID: &subnetworkid.SubnetworkID{234},
subnetworkID: &externalapi.DomainSubnetworkID{0x34},
payload: []byte{0x02},
expectedPayloadLength: 0,
},
@@ -122,388 +105,33 @@ func TestConvertToPartial(t *testing.T) {
block.Transactions = append(block.Transactions, NewSubnetworkMsgTx(1, nil, nil, transaction.subnetworkID, 0, payload))
}
block.ConvertToPartial(&subnetworkid.SubnetworkID{123})
block.ConvertToPartial(localSubnetworkID)
for _, transaction := range transactions {
for _, testTransaction := range transactions {
var subnetworkTx *MsgTx
for _, tx := range block.Transactions {
if tx.SubnetworkID.IsEqual(transaction.subnetworkID) {
subnetworkTx = tx
for _, blockTransaction := range block.Transactions {
if blockTransaction.SubnetworkID.Equal(testTransaction.subnetworkID) {
subnetworkTx = blockTransaction
}
}
if subnetworkTx == nil {
t.Errorf("ConvertToPartial: subnetworkID '%s' not found in block!", transaction.subnetworkID)
t.Errorf("ConvertToPartial: subnetworkID '%s' not found in block!", testTransaction.subnetworkID)
continue
}
payloadLength := len(subnetworkTx.Payload)
if payloadLength != transaction.expectedPayloadLength {
if payloadLength != testTransaction.expectedPayloadLength {
t.Errorf("ConvertToPartial: unexpected payload length for subnetwork '%s': expected: %d, got: %d",
transaction.subnetworkID, transaction.expectedPayloadLength, payloadLength)
testTransaction.subnetworkID, testTransaction.expectedPayloadLength, payloadLength)
}
}
}
// TestBlockEncoding tests the MsgBlock appmessage encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestBlockEncoding(t *testing.T) {
tests := []struct {
in *MsgBlock // Message to encode
out *MsgBlock // Expected decoded message
buf []byte // Encoded value
txLocs []TxLoc // Expected transaction locations
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
ProtocolVersion,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to appmessage format.
var buf bytes.Buffer
err := test.in.KaspaEncode(&buf, test.pver)
if err != nil {
t.Errorf("KaspaEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("KaspaEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the message from appmessage format.
var msg MsgBlock
rbuf := bytes.NewReader(test.buf)
err = msg.KaspaDecode(rbuf, test.pver)
if err != nil {
t.Errorf("KaspaDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&msg, test.out) {
t.Errorf("KaspaDecode #%d\n got: %s want: %s", i,
spew.Sdump(&msg), spew.Sdump(test.out))
continue
}
}
}
// TestBlockEncodingErrors performs negative tests against appmessage encode and decode
// of MsgBlock to confirm error paths work correctly.
func TestBlockEncodingErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
in *MsgBlock // Value to encode
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force error in version.
{&blockOne, blockOneBytes, pver, 0, io.ErrShortWrite, io.EOF},
// Force error in num block hashes.
{&blockOne, blockOneBytes, pver, 4, io.ErrShortWrite, io.EOF},
// Force error in prev block hash #1.
{&blockOne, blockOneBytes, pver, 5, io.ErrShortWrite, io.EOF},
// Force error in prev block hash #2.
{&blockOne, blockOneBytes, pver, 37, io.ErrShortWrite, io.EOF},
// Force error in hash merkle root.
{&blockOne, blockOneBytes, pver, 69, io.ErrShortWrite, io.EOF},
// Force error in accepted ID merkle root.
{&blockOne, blockOneBytes, pver, 101, io.ErrShortWrite, io.EOF},
// Force error in utxo commitment.
{&blockOne, blockOneBytes, pver, 133, io.ErrShortWrite, io.EOF},
// Force error in timestamp.
{&blockOne, blockOneBytes, pver, 165, io.ErrShortWrite, io.EOF},
// Force error in difficulty bits.
{&blockOne, blockOneBytes, pver, 173, io.ErrShortWrite, io.EOF},
// Force error in header nonce.
{&blockOne, blockOneBytes, pver, 177, io.ErrShortWrite, io.EOF},
// Force error in transaction count.
{&blockOne, blockOneBytes, pver, 185, io.ErrShortWrite, io.EOF},
// Force error in transactions.
{&blockOne, blockOneBytes, pver, 186, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := test.in.KaspaEncode(w, test.pver)
if !errors.Is(err, test.writeErr) {
t.Errorf("KaspaEncode #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from appmessage format.
var msg MsgBlock
r := newFixedReader(test.max, test.buf)
err = msg.KaspaDecode(r, test.pver)
if !errors.Is(err, test.readErr) {
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestBlockSerialize tests MsgBlock serialize and deserialize.
func TestBlockSerialize(t *testing.T) {
tests := []struct {
in *MsgBlock // Message to encode
out *MsgBlock // Expected decoded message
buf []byte // Serialized data
txLocs []TxLoc // Expected transaction locations
}{
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the block.
var buf bytes.Buffer
err := test.in.Serialize(&buf)
if err != nil {
t.Errorf("Serialize #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("Serialize #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Deserialize the block.
var block MsgBlock
rbuf := bytes.NewReader(test.buf)
err = block.Deserialize(rbuf)
if err != nil {
t.Errorf("Deserialize #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&block, test.out) {
t.Errorf("Deserialize #%d\n got: %s want: %s", i,
spew.Sdump(&block), spew.Sdump(test.out))
continue
}
// Deserialize the block while gathering transaction location
// information.
var txLocBlock MsgBlock
br := bytes.NewBuffer(test.buf)
txLocs, err := txLocBlock.DeserializeTxLoc(br)
if err != nil {
t.Errorf("DeserializeTxLoc #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&txLocBlock, test.out) {
t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i,
spew.Sdump(&txLocBlock), spew.Sdump(test.out))
continue
}
if !reflect.DeepEqual(txLocs, test.txLocs) {
t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i,
spew.Sdump(txLocs), spew.Sdump(test.txLocs))
continue
}
}
}
// TestBlockSerializeErrors performs negative tests against appmessage encode and
// decode of MsgBlock to confirm error paths work correctly.
func TestBlockSerializeErrors(t *testing.T) {
tests := []struct {
in *MsgBlock // Value to encode
buf []byte // Serialized data
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force error in version.
{&blockOne, blockOneBytes, 0, io.ErrShortWrite, io.EOF},
// Force error in numParentBlocks.
{&blockOne, blockOneBytes, 4, io.ErrShortWrite, io.EOF},
// Force error in prev block hash #1.
{&blockOne, blockOneBytes, 5, io.ErrShortWrite, io.EOF},
// Force error in prev block hash #2.
{&blockOne, blockOneBytes, 37, io.ErrShortWrite, io.EOF},
// Force error in hash merkle root.
{&blockOne, blockOneBytes, 69, io.ErrShortWrite, io.EOF},
// Force error in accepted ID merkle root.
{&blockOne, blockOneBytes, 101, io.ErrShortWrite, io.EOF},
// Force error in utxo commitment.
{&blockOne, blockOneBytes, 133, io.ErrShortWrite, io.EOF},
// Force error in timestamp.
{&blockOne, blockOneBytes, 165, io.ErrShortWrite, io.EOF},
// Force error in difficulty bits.
{&blockOne, blockOneBytes, 173, io.ErrShortWrite, io.EOF},
// Force error in header nonce.
{&blockOne, blockOneBytes, 177, io.ErrShortWrite, io.EOF},
// Force error in transaction count.
{&blockOne, blockOneBytes, 185, io.ErrShortWrite, io.EOF},
// Force error in transactions.
{&blockOne, blockOneBytes, 186, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the block.
w := newFixedWriter(test.max)
err := test.in.Serialize(w)
if !errors.Is(err, test.writeErr) {
t.Errorf("Serialize #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Deserialize the block.
var block MsgBlock
r := newFixedReader(test.max, test.buf)
err = block.Deserialize(r)
if !errors.Is(err, test.readErr) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
var txLocBlock MsgBlock
br := bytes.NewBuffer(test.buf[0:test.max])
_, err = txLocBlock.DeserializeTxLoc(br)
if !errors.Is(err, test.readErr) {
t.Errorf("DeserializeTxLoc #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestBlockOverflowErrors performs tests to ensure deserializing blocks which
// are intentionally crafted to use large values for the number of transactions
// are handled properly. This could otherwise potentially be used as an attack
// vector.
func TestBlockOverflowErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
err error // Expected error
}{
// Block that claims to have ~uint64(0) transactions.
{
[]byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainnetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simnetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, // HashMerkleRoot
0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61,
0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32,
0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // AcceptedIDMerkleRoot
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // UTXOCommitment
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x61, 0xbc, 0x66, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // TxnCount
}, pver, &MessageError{},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from appmessage format.
var msg MsgBlock
r := bytes.NewReader(test.buf)
err := msg.KaspaDecode(r, test.pver)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
// Deserialize from appmessage format.
r = bytes.NewReader(test.buf)
err = msg.Deserialize(r)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
// Deserialize with transaction location info from appmessage format.
br := bytes.NewBuffer(test.buf)
_, err = msg.DeserializeTxLoc(br)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("DeserializeTxLoc #%d wrong error got: %v, "+
"want: %v", i, err, reflect.TypeOf(test.err))
continue
}
}
}
// TestBlockSerializeSize performs tests to ensure the serialize size for
// various blocks is accurate.
func TestBlockSerializeSize(t *testing.T) {
// Block with no transactions.
noTxBlock := NewMsgBlock(&blockOne.Header)
tests := []struct {
in *MsgBlock // Block to encode
size int // Expected serialized size
}{
// Block with no transactions.
{noTxBlock, 186},
// First block in the mainnet block DAG.
{&blockOne, len(blockOneBytes)},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
serializedSize := test.in.SerializeSize()
if serializedSize != test.size {
t.Errorf("MsgBlock.SerializeSize: #%d got: %d, want: "+
"%d", i, serializedSize, test.size)
continue
}
}
}
// blockOne is the first block in the mainnet block DAG.
//blockOne is the first block in the mainnet block DAG.
var blockOne = MsgBlock{
Header: BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
Header: MsgBlockHeader{
Version: 0,
ParentHashes: []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment,
@@ -516,7 +144,7 @@ var blockOne = MsgBlock{
[]*TxIn{
{
PreviousOutpoint: Outpoint{
TxID: daghash.TxID{},
TxID: externalapi.DomainTransactionID{},
Index: 0xffffffff,
},
SignatureScript: []byte{
@@ -528,19 +156,21 @@ var blockOne = MsgBlock{
[]*TxOut{
{
Value: 0x12a05f200,
ScriptPubKey: []byte{
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c,
0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c,
0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4,
0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6,
0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e,
0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58,
0xee, // 65-byte signature
0xac, // OP_CHECKSIG
},
ScriptPubKey: &externalapi.ScriptPublicKey{
Script: []byte{
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c,
0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c,
0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4,
0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6,
0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e,
0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58,
0xee, // 65-byte signature
0xac, // OP_CHECKSIG
},
Version: 0},
},
}),
},
@@ -548,7 +178,7 @@ var blockOne = MsgBlock{
// Block one serialized bytes.
var blockOneBytes = []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x00, 0x00, // Version 0
0x02, // NumParentBlocks
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25, // mainnetGenesisHash
0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,
@@ -574,7 +204,7 @@ var blockOneBytes = []byte{
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0x01, // TxnCount
0x01, 0x00, 0x00, 0x00, // Version
0x00, 0x00, 0x00, 0x00, // Version
0x01, // Varint for number of transaction inputs
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,

View File

@@ -0,0 +1,108 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"math"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
)
// BaseBlockHeaderPayload is the base number of bytes a block header can be,
// not including the list of parent block headers.
// Version 4 bytes + Timestamp 8 bytes + Bits 4 bytes + Nonce 8 bytes +
// + NumParentBlocks 1 byte + HashMerkleRoot hash +
// + AcceptedIDMerkleRoot hash + UTXOCommitment hash.
// To get total size of block header len(ParentHashes) * externalapi.DomainHashSize should be
// added to this value
const BaseBlockHeaderPayload = 25 + 3*(externalapi.DomainHashSize)
// MaxNumParentBlocks is the maximum number of parent blocks a block can reference.
// Currently set to 255 as the maximum number NumParentBlocks can be due to it being a byte
const MaxNumParentBlocks = 255
// MaxBlockHeaderPayload is the maximum number of bytes a block header can be.
// BaseBlockHeaderPayload + up to MaxNumParentBlocks hashes of parent blocks
const MaxBlockHeaderPayload = BaseBlockHeaderPayload + (MaxNumParentBlocks * externalapi.DomainHashSize)
// MsgBlockHeader defines information about a block and is used in the kaspa
// block (MsgBlock) and headers (MsgHeader) messages.
type MsgBlockHeader struct {
baseMessage
// Version of the block. This is not the same as the protocol version.
Version uint16
// Hashes of the parent block headers in the blockDAG.
ParentHashes []*externalapi.DomainHash
// HashMerkleRoot is the merkle tree reference to hash of all transactions for the block.
HashMerkleRoot *externalapi.DomainHash
// AcceptedIDMerkleRoot is merkle tree reference to hash all transactions
// accepted form the block.Blues
AcceptedIDMerkleRoot *externalapi.DomainHash
// UTXOCommitment is an ECMH UTXO commitment to the block UTXO.
UTXOCommitment *externalapi.DomainHash
// Time the block was created.
Timestamp mstime.Time
// Difficulty target for the block.
Bits uint32
// Nonce used to generate the block.
Nonce uint64
}
// NumParentBlocks return the number of entries in ParentHashes
func (h *MsgBlockHeader) NumParentBlocks() byte {
numParents := len(h.ParentHashes)
if numParents > math.MaxUint8 {
panic(errors.Errorf("number of parents is %d, which is more than one byte can fit", numParents))
}
return byte(numParents)
}
// BlockHash computes the block identifier hash for the given block header.
func (h *MsgBlockHeader) BlockHash() *externalapi.DomainHash {
return consensushashing.HeaderHash(BlockHeaderToDomainBlockHeader(h))
}
// IsGenesis returns true iff this block is a genesis block
func (h *MsgBlockHeader) IsGenesis() bool {
return h.NumParentBlocks() == 0
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (h *MsgBlockHeader) Command() MessageCommand {
return CmdHeader
}
// NewBlockHeader returns a new MsgBlockHeader using the provided version, previous
// block hash, hash merkle root, accepted ID merkle root, difficulty bits, and nonce used to generate the
// block with defaults or calclulated values for the remaining fields.
func NewBlockHeader(version uint16, parentHashes []*externalapi.DomainHash, hashMerkleRoot *externalapi.DomainHash,
acceptedIDMerkleRoot *externalapi.DomainHash, utxoCommitment *externalapi.DomainHash, bits uint32, nonce uint64) *MsgBlockHeader {
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
return &MsgBlockHeader{
Version: version,
ParentHashes: parentHashes,
HashMerkleRoot: hashMerkleRoot,
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
UTXOCommitment: utxoCommitment,
Timestamp: mstime.Now(),
Bits: bits,
Nonce: nonce,
}
}

View File

@@ -0,0 +1,88 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/random"
)
// TestBlockHeader tests the MsgBlockHeader API.
func TestBlockHeader(t *testing.T) {
nonce, err := random.Uint64()
if err != nil {
t.Errorf("random.Uint64: Error generating nonce: %v", err)
}
hashes := []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}
merkleHash := mainnetGenesisMerkleRoot
acceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot
bits := uint32(0x1d00ffff)
bh := NewBlockHeader(1, hashes, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce)
// Ensure we get the same data back out.
if !reflect.DeepEqual(bh.ParentHashes, hashes) {
t.Errorf("NewBlockHeader: wrong prev hashes - got %v, want %v",
spew.Sprint(bh.ParentHashes), spew.Sprint(hashes))
}
if bh.HashMerkleRoot != merkleHash {
t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v",
spew.Sprint(bh.HashMerkleRoot), spew.Sprint(merkleHash))
}
if bh.Bits != bits {
t.Errorf("NewBlockHeader: wrong bits - got %v, want %v",
bh.Bits, bits)
}
if bh.Nonce != nonce {
t.Errorf("NewBlockHeader: wrong nonce - got %v, want %v",
bh.Nonce, nonce)
}
}
func TestIsGenesis(t *testing.T) {
nonce := uint64(123123) // 0x1e0f3
bits := uint32(0x1d00ffff)
timestamp := mstime.UnixMilliseconds(0x495fab29000)
baseBlockHdr := &MsgBlockHeader{
Version: 1,
ParentHashes: []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
genesisBlockHdr := &MsgBlockHeader{
Version: 1,
ParentHashes: []*externalapi.DomainHash{},
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
tests := []struct {
in *MsgBlockHeader // Block header to encode
isGenesis bool // Expected result for call of .IsGenesis
}{
{genesisBlockHdr, true},
{baseBlockHdr, false},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
isGenesis := test.in.IsGenesis()
if isGenesis != test.isGenesis {
t.Errorf("MsgBlockHeader.IsGenesis: #%d got: %t, want: %t",
i, isGenesis, test.isGenesis)
}
}
}

View File

@@ -1,7 +1,7 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MaxBlockLocatorsPerMsg is the maximum number of block locator hashes allowed
@@ -13,7 +13,7 @@ const MaxBlockLocatorsPerMsg = 500
// syncing with you.
type MsgBlockLocator struct {
baseMessage
BlockLocatorHashes []*daghash.Hash
BlockLocatorHashes []*externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
@@ -24,7 +24,7 @@ func (msg *MsgBlockLocator) Command() MessageCommand {
// NewMsgBlockLocator returns a new kaspa locator message that conforms to
// the Message interface. See MsgBlockLocator for details.
func NewMsgBlockLocator(locatorHashes []*daghash.Hash) *MsgBlockLocator {
func NewMsgBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgBlockLocator {
return &MsgBlockLocator{
BlockLocatorHashes: locatorHashes,
}

View File

@@ -3,19 +3,20 @@ package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestBlockLocator tests the MsgBlockLocator API.
func TestBlockLocator(t *testing.T) {
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
locatorHash, err := daghash.NewHashFromStr(hashStr)
locatorHash, err := externalapi.NewDomainHashFromString(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
msg := NewMsgBlockLocator([]*daghash.Hash{locatorHash})
msg := NewMsgBlockLocator([]*externalapi.DomainHash{locatorHash})
// Ensure the command is expected value.
wantCmd := MessageCommand(10)

View File

@@ -0,0 +1,22 @@
package appmessage
// MsgDoneHeaders implements the Message interface and represents a kaspa
// DoneHeaders message. It is used to notify the IBD syncing peer that the
// syncer sent all the requested headers.
//
// This message has no payload.
type MsgDoneHeaders struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgDoneHeaders) Command() MessageCommand {
return CmdDoneHeaders
}
// NewMsgDoneHeaders returns a new kaspa DoneIBDBlocks message that conforms to the
// Message interface.
func NewMsgDoneHeaders() *MsgDoneHeaders {
return &MsgDoneHeaders{}
}

View File

@@ -1,22 +0,0 @@
package appmessage
// MsgDoneIBDBlocks implements the Message interface and represents a kaspa
// DoneIBDBlocks message. It is used to notify the IBD syncing peer that the
// syncer sent all the requested blocks.
//
// This message has no payload.
type MsgDoneIBDBlocks struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgDoneIBDBlocks) Command() MessageCommand {
return CmdDoneIBDBlocks
}
// NewMsgDoneIBDBlocks returns a new kaspa DoneIBDBlocks message that conforms to the
// Message interface.
func NewMsgDoneIBDBlocks() *MsgDoneIBDBlocks {
return &MsgDoneIBDBlocks{}
}

View File

@@ -0,0 +1,16 @@
package appmessage
// MsgDoneIBDRootUTXOSetChunks represents a kaspa DoneIBDRootUTXOSetChunks message
type MsgDoneIBDRootUTXOSetChunks struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgDoneIBDRootUTXOSetChunks) Command() MessageCommand {
return CmdDoneIBDRootUTXOSetChunks
}
// NewMsgDoneIBDRootUTXOSetChunks returns a new MsgDoneIBDRootUTXOSetChunks.
func NewMsgDoneIBDRootUTXOSetChunks() *MsgDoneIBDRootUTXOSetChunks {
return &MsgDoneIBDRootUTXOSetChunks{}
}

View File

@@ -5,7 +5,6 @@
package appmessage
import (
"bytes"
"reflect"
"testing"
@@ -26,7 +25,7 @@ func TestIBDBlock(t *testing.T) {
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
// Ensure the command is expected value.
wantCmd := MessageCommand(17)
wantCmd := MessageCommand(15)
msg := NewMsgIBDBlock(NewMsgBlock(bh))
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgIBDBlock: wrong command - got %v want %v",
@@ -64,55 +63,3 @@ func TestIBDBlock(t *testing.T) {
len(msg.Transactions), 0)
}
}
// TestIBDBlockEncoding tests the MsgIBDBlock appmessage encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestIBDBlockEncoding(t *testing.T) {
tests := []struct {
in *MsgIBDBlock // Message to encode
out *MsgIBDBlock // Expected decoded message
buf []byte // Encoded value
txLocs []TxLoc // Expected transaction locations
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
{
&MsgIBDBlock{MsgBlock: &blockOne},
&MsgIBDBlock{MsgBlock: &blockOne},
blockOneBytes,
blockOneTxLocs,
ProtocolVersion,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to appmessage format.
var buf bytes.Buffer
err := test.in.KaspaEncode(&buf, test.pver)
if err != nil {
t.Errorf("KaspaEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("KaspaEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the message from appmessage format.
var msg MsgIBDBlock
msg.MsgBlock = new(MsgBlock)
rbuf := bytes.NewReader(test.buf)
err = msg.KaspaDecode(rbuf, test.pver)
if err != nil {
t.Errorf("KaspaDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&msg, test.out) {
t.Errorf("KaspaDecode #%d\n got: %s want: %s", i,
spew.Sdump(&msg), spew.Sdump(test.out))
continue
}
}
}

View File

@@ -0,0 +1,27 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgIBDBlockLocator represents a kaspa ibdBlockLocator message
type MsgIBDBlockLocator struct {
baseMessage
TargetHash *externalapi.DomainHash
BlockLocatorHashes []*externalapi.DomainHash
}
// Command returns the protocol command string for the message
func (msg *MsgIBDBlockLocator) Command() MessageCommand {
return CmdIBDBlockLocator
}
// NewMsgIBDBlockLocator returns a new kaspa ibdBlockLocator message
func NewMsgIBDBlockLocator(targetHash *externalapi.DomainHash,
blockLocatorHashes []*externalapi.DomainHash) *MsgIBDBlockLocator {
return &MsgIBDBlockLocator{
TargetHash: targetHash,
BlockLocatorHashes: blockLocatorHashes,
}
}

View File

@@ -0,0 +1,23 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgIBDBlockLocatorHighestHash represents a kaspa BlockLocatorHighestHash message
type MsgIBDBlockLocatorHighestHash struct {
baseMessage
HighestHash *externalapi.DomainHash
}
// Command returns the protocol command string for the message
func (msg *MsgIBDBlockLocatorHighestHash) Command() MessageCommand {
return CmdIBDBlockLocatorHighestHash
}
// NewMsgIBDBlockLocatorHighestHash returns a new BlockLocatorHighestHash message
func NewMsgIBDBlockLocatorHighestHash(highestHash *externalapi.DomainHash) *MsgIBDBlockLocatorHighestHash {
return &MsgIBDBlockLocatorHighestHash{
HighestHash: highestHash,
}
}

View File

@@ -0,0 +1,26 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgIBDRootHashMessage implements the Message interface and represents a kaspa
// IBDRootHash message. It is used as a reply to IBD root hash requests.
type MsgIBDRootHashMessage struct {
baseMessage
Hash *externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgIBDRootHashMessage) Command() MessageCommand {
return CmdIBDRootHash
}
// NewMsgIBDRootHashMessage returns a new kaspa IBDRootHash message that conforms to
// the Message interface. See MsgIBDRootHashMessage for details.
func NewMsgIBDRootHashMessage(hash *externalapi.DomainHash) *MsgIBDRootHashMessage {
return &MsgIBDRootHashMessage{
Hash: hash,
}
}

View File

@@ -0,0 +1,19 @@
package appmessage
// MsgIBDRootUTXOSetChunk represents a kaspa IBDRootUTXOSetChunk message
type MsgIBDRootUTXOSetChunk struct {
baseMessage
Chunk []byte
}
// Command returns the protocol command string for the message
func (msg *MsgIBDRootUTXOSetChunk) Command() MessageCommand {
return CmdIBDRootUTXOSetChunk
}
// NewMsgIBDRootUTXOSetChunk returns a new MsgIBDRootUTXOSetChunk.
func NewMsgIBDRootUTXOSetChunk(chunk []byte) *MsgIBDRootUTXOSetChunk {
return &MsgIBDRootUTXOSetChunk{
Chunk: chunk,
}
}

View File

@@ -1,7 +1,7 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgInvRelayBlock implements the Message interface and represents a kaspa
@@ -9,7 +9,7 @@ import (
// by sending their hash, and let the receiving node decide if it needs it.
type MsgInvRelayBlock struct {
baseMessage
Hash *daghash.Hash
Hash *externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
@@ -20,7 +20,7 @@ func (msg *MsgInvRelayBlock) Command() MessageCommand {
// NewMsgInvBlock returns a new kaspa invrelblk message that conforms to
// the Message interface. See MsgInvRelayBlock for details.
func NewMsgInvBlock(hash *daghash.Hash) *MsgInvRelayBlock {
func NewMsgInvBlock(hash *externalapi.DomainHash) *MsgInvRelayBlock {
return &MsgInvRelayBlock{
Hash: hash,
}

View File

@@ -1,7 +1,7 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MaxInvPerTxInvMsg is the maximum number of hashes that can
@@ -13,7 +13,7 @@ const MaxInvPerTxInvMsg = MaxInvPerMsg
// by sending their ID, and let the receiving node decide if it needs it.
type MsgInvTransaction struct {
baseMessage
TxIDs []*daghash.TxID
TxIDs []*externalapi.DomainTransactionID
}
// Command returns the protocol command string for the message. This is part
@@ -24,7 +24,7 @@ func (msg *MsgInvTransaction) Command() MessageCommand {
// NewMsgInvTransaction returns a new kaspa TxInv message that conforms to
// the Message interface. See MsgInvTransaction for details.
func NewMsgInvTransaction(ids []*daghash.TxID) *MsgInvTransaction {
func NewMsgInvTransaction(ids []*externalapi.DomainTransactionID) *MsgInvTransaction {
return &MsgInvTransaction{
TxIDs: ids,
}

View File

@@ -5,7 +5,7 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestAddresses implements the Message interface and represents a kaspa
@@ -17,7 +17,7 @@ import (
type MsgRequestAddresses struct {
baseMessage
IncludeAllSubnetworks bool
SubnetworkID *subnetworkid.SubnetworkID
SubnetworkID *externalapi.DomainSubnetworkID
}
// Command returns the protocol command string for the message. This is part
@@ -28,7 +28,7 @@ func (msg *MsgRequestAddresses) Command() MessageCommand {
// NewMsgRequestAddresses returns a new kaspa RequestAddresses message that conforms to the
// Message interface. See MsgRequestAddresses for details.
func NewMsgRequestAddresses(includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) *MsgRequestAddresses {
func NewMsgRequestAddresses(includeAllSubnetworks bool, subnetworkID *externalapi.DomainSubnetworkID) *MsgRequestAddresses {
return &MsgRequestAddresses{
IncludeAllSubnetworks: includeAllSubnetworks,
SubnetworkID: subnetworkID,

View File

@@ -1,17 +1,18 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestBlockLocator implements the Message interface and represents a kaspa
// RequestBlockLocator message. It is used to request a block locator between high
// and low hash.
// RequestBlockLocator message. It is used to request a block locator between low
// and high hash.
// The locator is returned via a locator message (MsgBlockLocator).
type MsgRequestBlockLocator struct {
baseMessage
HighHash *daghash.Hash
LowHash *daghash.Hash
LowHash *externalapi.DomainHash
HighHash *externalapi.DomainHash
Limit uint32
}
// Command returns the protocol command string for the message. This is part
@@ -23,9 +24,10 @@ func (msg *MsgRequestBlockLocator) Command() MessageCommand {
// NewMsgRequestBlockLocator returns a new RequestBlockLocator message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequestBlockLocator(highHash, lowHash *daghash.Hash) *MsgRequestBlockLocator {
func NewMsgRequestBlockLocator(lowHash, highHash *externalapi.DomainHash, limit uint32) *MsgRequestBlockLocator {
return &MsgRequestBlockLocator{
HighHash: highHash,
LowHash: lowHash,
HighHash: highHash,
Limit: limit,
}
}

View File

@@ -3,20 +3,20 @@ package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// TestRequestBlockLocator tests the MsgRequestBlockLocator API.
func TestRequestBlockLocator(t *testing.T) {
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
highHash, err := daghash.NewHashFromStr(hashStr)
highHash, err := externalapi.NewDomainHashFromString(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
// Ensure the command is expected value.
wantCmd := MessageCommand(9)
msg := NewMsgRequestBlockLocator(highHash, &daghash.ZeroHash)
msg := NewMsgRequestBlockLocator(highHash, &externalapi.DomainHash{}, 0)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequestBlockLocator: wrong command - got %v want %v",
cmd, wantCmd)

View File

@@ -0,0 +1,34 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestHeaders implements the Message interface and represents a kaspa
// RequestHeaders message. It is used to request a list of blocks starting after the
// low hash and until the high hash.
type MsgRequestHeaders struct {
baseMessage
LowHash *externalapi.DomainHash
HighHash *externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestHeaders) Command() MessageCommand {
return CmdRequestHeaders
}
// NewMsgRequstHeaders returns a new kaspa RequestHeaders message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequstHeaders(lowHash, highHash *externalapi.DomainHash) *MsgRequestHeaders {
return &MsgRequestHeaders{
LowHash: lowHash,
HighHash: highHash,
}
}

View File

@@ -7,34 +7,34 @@ package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// TestRequstIBDBlocks tests the MsgRequestIBDBlocks API.
// TestRequstIBDBlocks tests the MsgRequestHeaders API.
func TestRequstIBDBlocks(t *testing.T) {
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
lowHash, err := daghash.NewHashFromStr(hashStr)
lowHash, err := externalapi.NewDomainHashFromString(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
hashStr = "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
highHash, err := daghash.NewHashFromStr(hashStr)
hashStr = "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
highHash, err := externalapi.NewDomainHashFromString(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
// Ensure we get the same data back out.
msg := NewMsgRequstIBDBlocks(lowHash, highHash)
if !msg.HighHash.IsEqual(highHash) {
t.Errorf("NewMsgRequstIBDBlocks: wrong high hash - got %v, want %v",
msg := NewMsgRequstHeaders(lowHash, highHash)
if !msg.HighHash.Equal(highHash) {
t.Errorf("NewMsgRequstHeaders: wrong high hash - got %v, want %v",
msg.HighHash, highHash)
}
// Ensure the command is expected value.
wantCmd := MessageCommand(4)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequstIBDBlocks: wrong command - got %v want %v",
t.Errorf("NewMsgRequstHeaders: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -1,20 +1,15 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestIBDBlocks implements the Message interface and represents a kaspa
// RequestIBDBlocks message. It is used to request a list of blocks starting after the
// low hash and until the high hash.
// RequestIBDBlocks message. It is used to request blocks as part of the IBD
// protocol.
type MsgRequestIBDBlocks struct {
baseMessage
LowHash *daghash.Hash
HighHash *daghash.Hash
Hashes []*externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
@@ -23,12 +18,9 @@ func (msg *MsgRequestIBDBlocks) Command() MessageCommand {
return CmdRequestIBDBlocks
}
// NewMsgRequstIBDBlocks returns a new kaspa RequestIBDBlocks message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequstIBDBlocks(lowHash, highHash *daghash.Hash) *MsgRequestIBDBlocks {
// NewMsgRequestIBDBlocks returns a new MsgRequestIBDBlocks.
func NewMsgRequestIBDBlocks(hashes []*externalapi.DomainHash) *MsgRequestIBDBlocks {
return &MsgRequestIBDBlocks{
LowHash: lowHash,
HighHash: highHash,
Hashes: hashes,
}
}

View File

@@ -0,0 +1,26 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestIBDRootUTXOSetAndBlock implements the Message interface and represents a kaspa
// RequestIBDRootUTXOSetAndBlock message. It is used to request the UTXO set and block body
// of the IBD root block.
type MsgRequestIBDRootUTXOSetAndBlock struct {
baseMessage
IBDRoot *externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestIBDRootUTXOSetAndBlock) Command() MessageCommand {
return CmdRequestIBDRootUTXOSetAndBlock
}
// NewMsgRequestIBDRootUTXOSetAndBlock returns a new MsgRequestIBDRootUTXOSetAndBlock.
func NewMsgRequestIBDRootUTXOSetAndBlock(ibdRoot *externalapi.DomainHash) *MsgRequestIBDRootUTXOSetAndBlock {
return &MsgRequestIBDRootUTXOSetAndBlock{
IBDRoot: ibdRoot,
}
}

View File

@@ -0,0 +1,22 @@
package appmessage
// MsgRequestNextHeaders implements the Message interface and represents a kaspa
// RequestNextHeaders message. It is used to notify the IBD syncer peer to send
// more headers.
//
// This message has no payload.
type MsgRequestNextHeaders struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestNextHeaders) Command() MessageCommand {
return CmdRequestNextHeaders
}
// NewMsgRequestNextHeaders returns a new kaspa RequestNextHeaders message that conforms to the
// Message interface.
func NewMsgRequestNextHeaders() *MsgRequestNextHeaders {
return &MsgRequestNextHeaders{}
}

View File

@@ -1,22 +0,0 @@
package appmessage
// MsgRequestNextIBDBlocks implements the Message interface and represents a kaspa
// RequestNextIBDBlocks message. It is used to notify the IBD syncer peer to send
// more blocks.
//
// This message has no payload.
type MsgRequestNextIBDBlocks struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestNextIBDBlocks) Command() MessageCommand {
return CmdRequestNextIBDBlocks
}
// NewMsgRequestNextIBDBlocks returns a new kaspa RequestNextIBDBlocks message that conforms to the
// Message interface.
func NewMsgRequestNextIBDBlocks() *MsgRequestNextIBDBlocks {
return &MsgRequestNextIBDBlocks{}
}

View File

@@ -0,0 +1,16 @@
package appmessage
// MsgRequestNextIBDRootUTXOSetChunk represents a kaspa RequestNextIBDRootUTXOSetChunk message
type MsgRequestNextIBDRootUTXOSetChunk struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgRequestNextIBDRootUTXOSetChunk) Command() MessageCommand {
return CmdRequestNextIBDRootUTXOSetChunk
}
// NewMsgRequestNextIBDRootUTXOSetChunk returns a new MsgRequestNextIBDRootUTXOSetChunk.
func NewMsgRequestNextIBDRootUTXOSetChunk() *MsgRequestNextIBDRootUTXOSetChunk {
return &MsgRequestNextIBDRootUTXOSetChunk{}
}

View File

@@ -1,19 +1,19 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestRelayBlocksHashes is the maximum number of hashes that can
// MaxRequestRelayBlocksHashes is the maximum number of hashes that can
// be in a single RequestRelayBlocks message.
const MsgRequestRelayBlocksHashes = MaxInvPerMsg
const MaxRequestRelayBlocksHashes = MaxInvPerMsg
// MsgRequestRelayBlocks implements the Message interface and represents a kaspa
// RequestRelayBlocks message. It is used to request blocks as part of the block
// relay protocol.
type MsgRequestRelayBlocks struct {
baseMessage
Hashes []*daghash.Hash
Hashes []*externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
@@ -24,7 +24,7 @@ func (msg *MsgRequestRelayBlocks) Command() MessageCommand {
// NewMsgRequestRelayBlocks returns a new kaspa RequestRelayBlocks message that conforms to
// the Message interface. See MsgRequestRelayBlocks for details.
func NewMsgRequestRelayBlocks(hashes []*daghash.Hash) *MsgRequestRelayBlocks {
func NewMsgRequestRelayBlocks(hashes []*externalapi.DomainHash) *MsgRequestRelayBlocks {
return &MsgRequestRelayBlocks{
Hashes: hashes,
}

View File

@@ -1,21 +0,0 @@
package appmessage
// MsgRequestSelectedTip implements the Message interface and represents a kaspa
// RequestSelectedTip message. It is used to request the selected tip of another peer.
//
// This message has no payload.
type MsgRequestSelectedTip struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestSelectedTip) Command() MessageCommand {
return CmdRequestSelectedTip
}
// NewMsgRequestSelectedTip returns a new kaspa RequestSelectedTip message that conforms to the
// Message interface.
func NewMsgRequestSelectedTip() *MsgRequestSelectedTip {
return &MsgRequestSelectedTip{}
}

View File

@@ -1,20 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
)
// TestRequestSelectedTip tests the MsgRequestSelectedTip API.
func TestRequestSelectedTip(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(12)
msg := NewMsgRequestSelectedTip()
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequestSelectedTip: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -1,7 +1,7 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MaxInvPerRequestTransactionsMsg is the maximum number of hashes that can
@@ -13,7 +13,7 @@ const MaxInvPerRequestTransactionsMsg = MaxInvPerMsg
// transactions relay protocol.
type MsgRequestTransactions struct {
baseMessage
IDs []*daghash.TxID
IDs []*externalapi.DomainTransactionID
}
// Command returns the protocol command string for the message. This is part
@@ -24,7 +24,7 @@ func (msg *MsgRequestTransactions) Command() MessageCommand {
// NewMsgRequestTransactions returns a new kaspa RequestTransactions message that conforms to
// the Message interface. See MsgRequestTransactions for details.
func NewMsgRequestTransactions(ids []*daghash.TxID) *MsgRequestTransactions {
func NewMsgRequestTransactions(ids []*externalapi.DomainTransactionID) *MsgRequestTransactions {
return &MsgRequestTransactions{
IDs: ids,
}

View File

@@ -1,28 +0,0 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgSelectedTip implements the Message interface and represents a kaspa
// selectedtip message. It is used to answer getseltip messages and tell
// the asking peer what is the selected tip of this peer.
type MsgSelectedTip struct {
baseMessage
// The selected tip hash of the generator of the message.
SelectedTipHash *daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgSelectedTip) Command() MessageCommand {
return CmdSelectedTip
}
// NewMsgSelectedTip returns a new kaspa selectedtip message that conforms to the
// Message interface.
func NewMsgSelectedTip(selectedTipHash *daghash.Hash) *MsgSelectedTip {
return &MsgSelectedTip{
SelectedTipHash: selectedTipHash,
}
}

View File

@@ -1,18 +0,0 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
"testing"
)
// TestSelectedTip tests the MsgSelectedTip API.
func TestSelectedTip(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(11)
msg := NewMsgSelectedTip(&daghash.ZeroHash)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgSelectedTip: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -5,14 +5,14 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgTransactionNotFound defines a kaspa TransactionNotFound message which is sent in response to
// a RequestTransactions message if any of the requested data in not available on the peer.
type MsgTransactionNotFound struct {
baseMessage
ID *daghash.TxID
ID *externalapi.DomainTransactionID
}
// Command returns the protocol command string for the message. This is part
@@ -23,7 +23,7 @@ func (msg *MsgTransactionNotFound) Command() MessageCommand {
// NewMsgTransactionNotFound returns a new kaspa transactionsnotfound message that conforms to the
// Message interface. See MsgTransactionNotFound for details.
func NewMsgTransactionNotFound(id *daghash.TxID) *MsgTransactionNotFound {
func NewMsgTransactionNotFound(id *externalapi.DomainTransactionID) *MsgTransactionNotFound {
return &MsgTransactionNotFound{
ID: id,
}

View File

@@ -6,49 +6,21 @@ package appmessage
import (
"encoding/binary"
"fmt"
"io"
"math"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
"strconv"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
const (
// TxVersion is the current latest supported transaction version.
TxVersion = 1
// MaxTxInSequenceNum is the maximum sequence number the sequence field
// of a transaction input can be.
MaxTxInSequenceNum uint64 = math.MaxUint64
// MaxPrevOutIndex is the maximum index the index field of a previous
// outpoint can be.
MaxPrevOutIndex uint32 = 0xffffffff
// SequenceLockTimeDisabled is a flag that if set on a transaction
// input's sequence number, the sequence number will not be interpreted
// as a relative locktime.
SequenceLockTimeDisabled = 1 << 31
// SequenceLockTimeIsSeconds is a flag that if set on a transaction
// input's sequence number, the relative locktime has units of 512
// seconds.
SequenceLockTimeIsSeconds = 1 << 22
// SequenceLockTimeMask is a mask that extracts the relative locktime
// when masked against the transaction input sequence number.
SequenceLockTimeMask = 0x0000ffff
// SequenceLockTimeGranularity is the defined time based granularity
// for milliseconds-based relative time locks. When converting from milliseconds
// to a sequence number, the value is right shifted by this amount,
// therefore the granularity of relative time locks in 524288 or 2^19
// seconds. Enforced relative lock times are multiples of 524288 milliseconds.
SequenceLockTimeGranularity = 19
// defaultTxInOutAlloc is the default size used for the backing array for
// transaction inputs and outputs. The array will dynamically grow as needed,
// but this figure is intended to provide enough space for the number of
@@ -59,15 +31,15 @@ const (
// minTxInPayload is the minimum payload size for a transaction input.
// PreviousOutpoint.TxID + PreviousOutpoint.Index 4 bytes + Varint for
// SignatureScript length 1 byte + Sequence 4 bytes.
minTxInPayload = 9 + daghash.HashSize
minTxInPayload = 9 + externalapi.DomainHashSize
// maxTxInPerMessage is the maximum number of transactions inputs that
// a transaction which fits into a message could possibly have.
maxTxInPerMessage = (MaxMessagePayload / minTxInPayload) + 1
// MinTxOutPayload is the minimum payload size for a transaction output.
// Value 8 bytes + Varint for ScriptPubKey length 1 byte.
MinTxOutPayload = 9
// Value 8 bytes + version 2 bytes + Varint for ScriptPublicKey length 1 byte.
MinTxOutPayload = 11
// maxTxOutPerMessage is the maximum number of transactions outputs that
// a transaction which fits into a message could possibly have.
@@ -81,102 +53,18 @@ const (
// number of transaction outputs 1 byte + LockTime 4 bytes + min input
// payload + min output payload.
minTxPayload = 10
// freeListMaxScriptSize is the size of each buffer in the free list
// that is used for deserializing scripts from the appmessage before they are
// concatenated into a single contiguous buffers. This value was chosen
// because it is slightly more than twice the size of the vast majority
// of all "standard" scripts. Larger scripts are still deserialized
// properly as the free list will simply be bypassed for them.
freeListMaxScriptSize = 512
// freeListMaxItems is the number of buffers to keep in the free list
// to use for script deserialization. This value allows up to 100
// scripts per transaction being simultaneously deserialized by 125
// peers. Thus, the peak usage of the free list is 12,500 * 512 =
// 6,400,000 bytes.
freeListMaxItems = 12500
)
// txEncoding is a bitmask defining which transaction fields we
// want to encode and which to ignore.
type txEncoding uint8
const (
txEncodingFull txEncoding = 0
txEncodingExcludePayload txEncoding = 1 << iota
txEncodingExcludeSignatureScript
)
// scriptFreeList defines a free list of byte slices (up to the maximum number
// defined by the freeListMaxItems constant) that have a cap according to the
// freeListMaxScriptSize constant. It is used to provide temporary buffers for
// deserializing scripts in order to greatly reduce the number of allocations
// required.
//
// The caller can obtain a buffer from the free list by calling the Borrow
// function and should return it via the Return function when done using it.
type scriptFreeList chan []byte
// Borrow returns a byte slice from the free list with a length according the
// provided size. A new buffer is allocated if there are any items available.
//
// When the size is larger than the max size allowed for items on the free list
// a new buffer of the appropriate size is allocated and returned. It is safe
// to attempt to return said buffer via the Return function as it will be
// ignored and allowed to go the garbage collector.
func (c scriptFreeList) Borrow(size uint64) []byte {
if size > freeListMaxScriptSize {
return make([]byte, size)
}
var buf []byte
select {
case buf = <-c:
default:
buf = make([]byte, freeListMaxScriptSize)
}
return buf[:size]
}
// Return puts the provided byte slice back on the free list when it has a cap
// of the expected length. The buffer is expected to have been obtained via
// the Borrow function. Any slices that are not of the appropriate size, such
// as those whose size is greater than the largest allowed free list item size
// are simply ignored so they can go to the garbage collector.
func (c scriptFreeList) Return(buf []byte) {
// Ignore any buffers returned that aren't the expected size for the
// free list.
if cap(buf) != freeListMaxScriptSize {
return
}
// Return the buffer to the free list when it's not full. Otherwise let
// it be garbage collected.
select {
case c <- buf:
default:
// Let it go to the garbage collector.
}
}
// Create the concurrent safe free list to use for script deserialization. As
// previously described, this free list is maintained to significantly reduce
// the number of allocations.
var scriptPool scriptFreeList = make(chan []byte, freeListMaxItems)
// Outpoint defines a kaspa data type that is used to track previous
// transaction outputs.
type Outpoint struct {
TxID daghash.TxID
TxID externalapi.DomainTransactionID
Index uint32
}
// NewOutpoint returns a new kaspa transaction outpoint point with the
// provided hash and index.
func NewOutpoint(txID *daghash.TxID, index uint32) *Outpoint {
func NewOutpoint(txID *externalapi.DomainTransactionID, index uint32) *Outpoint {
return &Outpoint{
TxID: *txID,
Index: index,
@@ -191,9 +79,9 @@ func (o Outpoint) String() string {
// maximum message payload may increase in the future and this
// optimization may go unnoticed, so allocate space for 10 decimal
// digits, which will fit any uint32.
buf := make([]byte, 2*daghash.HashSize+1, 2*daghash.HashSize+1+10)
buf := make([]byte, 2*externalapi.DomainHashSize+1, 2*externalapi.DomainHashSize+1+10)
copy(buf, o.TxID.String())
buf[2*daghash.HashSize] = ':'
buf[2*externalapi.DomainHashSize] = ':'
buf = strconv.AppendUint(buf, uint64(o.Index), 10)
return string(buf)
}
@@ -205,55 +93,26 @@ type TxIn struct {
Sequence uint64
}
// SerializeSize returns the number of bytes it would take to serialize the
// the transaction input.
func (t *TxIn) SerializeSize() int {
return t.serializeSize(txEncodingFull)
}
func (t *TxIn) serializeSize(encodingFlags txEncoding) int {
// Outpoint ID 32 bytes + Outpoint Index 4 bytes + Sequence 8 bytes +
// serialized varint size for the length of SignatureScript +
// SignatureScript bytes.
return 44 + serializeSignatureScriptSize(t.SignatureScript, encodingFlags)
}
func serializeSignatureScriptSize(signatureScript []byte, encodingFlags txEncoding) int {
if encodingFlags&txEncodingExcludeSignatureScript != txEncodingExcludeSignatureScript {
return VarIntSerializeSize(uint64(len(signatureScript))) +
len(signatureScript)
}
return VarIntSerializeSize(0)
}
// NewTxIn returns a new kaspa transaction input with the provided
// previous outpoint point and signature script with a default sequence of
// MaxTxInSequenceNum.
func NewTxIn(prevOut *Outpoint, signatureScript []byte) *TxIn {
func NewTxIn(prevOut *Outpoint, signatureScript []byte, sequence uint64) *TxIn {
return &TxIn{
PreviousOutpoint: *prevOut,
SignatureScript: signatureScript,
Sequence: MaxTxInSequenceNum,
Sequence: sequence,
}
}
// TxOut defines a kaspa transaction output.
type TxOut struct {
Value uint64
ScriptPubKey []byte
}
// SerializeSize returns the number of bytes it would take to serialize the
// the transaction output.
func (t *TxOut) SerializeSize() int {
// Value 8 bytes + serialized varint size for the length of ScriptPubKey +
// ScriptPubKey bytes.
return 8 + VarIntSerializeSize(uint64(len(t.ScriptPubKey))) + len(t.ScriptPubKey)
ScriptPubKey *externalapi.ScriptPublicKey
}
// NewTxOut returns a new kaspa transaction output with the provided
// transaction value and public key script.
func NewTxOut(value uint64, scriptPubKey []byte) *TxOut {
func NewTxOut(value uint64, scriptPubKey *externalapi.ScriptPublicKey) *TxOut {
return &TxOut{
Value: value,
ScriptPubKey: scriptPubKey,
@@ -268,13 +127,13 @@ func NewTxOut(value uint64, scriptPubKey []byte) *TxOut {
// inputs and outputs.
type MsgTx struct {
baseMessage
Version int32
Version uint16
TxIn []*TxIn
TxOut []*TxOut
LockTime uint64
SubnetworkID subnetworkid.SubnetworkID
SubnetworkID externalapi.DomainSubnetworkID
Gas uint64
PayloadHash *daghash.Hash
PayloadHash externalapi.DomainHash
Payload []byte
}
@@ -295,41 +154,17 @@ func (msg *MsgTx) AddTxOut(to *TxOut) {
// value and reference the relevant block id, instead of previous transaction id.
func (msg *MsgTx) IsCoinBase() bool {
// A coinbase transaction must have subnetwork id SubnetworkIDCoinbase
return msg.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)
return msg.SubnetworkID == subnetworks.SubnetworkIDCoinbase
}
// TxHash generates the Hash for the transaction.
func (msg *MsgTx) TxHash() *daghash.Hash {
// Encode the transaction and calculate double sha256 on the result.
writer := daghash.NewDoubleHashWriter()
err := msg.serialize(writer, txEncodingExcludePayload)
if err != nil {
// this writer never return errors (no allocations or possible failures) so errors can only come from validity checks,
// and we assume we never construct malformed transactions.
panic(fmt.Sprintf("TxHash() failed. this should never fail for structurally-valid transactions. err: %+v", err))
}
hash := writer.Finalize()
return &hash
func (msg *MsgTx) TxHash() *externalapi.DomainHash {
return consensushashing.TransactionHash(MsgTxToDomainTransaction(msg))
}
// TxID generates the Hash for the transaction without the signature script, gas and payload fields.
func (msg *MsgTx) TxID() *daghash.TxID {
// Encode the transaction, replace signature script with zeroes, cut off
// payload and calculate double sha256 on the result.
var encodingFlags txEncoding
if !msg.IsCoinBase() {
encodingFlags = txEncodingExcludeSignatureScript | txEncodingExcludePayload
}
writer := daghash.NewDoubleHashWriter()
err := msg.serialize(writer, encodingFlags)
if err != nil {
// this writer never return errors (no allocations or possible failures) so errors can only come from validity checks,
// and we assume we never construct malformed transactions.
panic(fmt.Sprintf("TxID() failed. this should never fail for structurally-valid transactions. err: %+v", err))
}
txID := daghash.TxID(writer.Finalize())
return &txID
func (msg *MsgTx) TxID() *externalapi.DomainTransactionID {
return consensushashing.TransactionID(MsgTxToDomainTransaction(msg))
}
// Copy creates a deep copy of a transaction so that the original does not get
@@ -357,7 +192,7 @@ func (msg *MsgTx) Copy() *MsgTx {
// Deep copy the old previous outpoint.
oldOutpoint := oldTxIn.PreviousOutpoint
newOutpoint := Outpoint{}
newOutpoint.TxID.SetBytes(oldOutpoint.TxID[:])
newOutpoint.TxID = oldOutpoint.TxID
newOutpoint.Index = oldOutpoint.Index
// Deep copy the old signature script.
@@ -382,20 +217,20 @@ func (msg *MsgTx) Copy() *MsgTx {
// Deep copy the old TxOut data.
for _, oldTxOut := range msg.TxOut {
// Deep copy the old ScriptPubKey
var newScript []byte
// Deep copy the old ScriptPublicKey
var newScript externalapi.ScriptPublicKey
oldScript := oldTxOut.ScriptPubKey
oldScriptLen := len(oldScript)
oldScriptLen := len(oldScript.Script)
if oldScriptLen > 0 {
newScript = make([]byte, oldScriptLen)
copy(newScript, oldScript[:oldScriptLen])
newScript = externalapi.ScriptPublicKey{Script: make([]byte, oldScriptLen), Version: oldScript.Version}
copy(newScript.Script, oldScript.Script[:oldScriptLen])
}
// Create new txOut with the deep copied data and append it to
// new Tx.
newTxOut := TxOut{
Value: oldTxOut.Value,
ScriptPubKey: newScript,
ScriptPubKey: &newScript,
}
newTx.TxOut = append(newTx.TxOut, &newTxOut)
}
@@ -403,368 +238,6 @@ func (msg *MsgTx) Copy() *MsgTx {
return &newTx
}
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding transactions stored to disk, such as in a
// database, as opposed to decoding transactions from the appmessage.
func (msg *MsgTx) KaspaDecode(r io.Reader, pver uint32) error {
version, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
msg.Version = int32(version)
count, err := ReadVarInt(r)
if err != nil {
return err
}
// Prevent more input transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if count > uint64(maxTxInPerMessage) {
str := fmt.Sprintf("too many input transactions to fit into "+
"max message size [count %d, max %d]", count,
maxTxInPerMessage)
return messageError("MsgTx.KaspaDecode", str)
}
// returnScriptBuffers is a closure that returns any script buffers that
// were borrowed from the pool when there are any deserialization
// errors. This is only valid to call before the final step which
// replaces the scripts with the location in a contiguous buffer and
// returns them.
returnScriptBuffers := func() {
for _, txIn := range msg.TxIn {
if txIn == nil || txIn.SignatureScript == nil {
continue
}
scriptPool.Return(txIn.SignatureScript)
}
for _, txOut := range msg.TxOut {
if txOut == nil || txOut.ScriptPubKey == nil {
continue
}
scriptPool.Return(txOut.ScriptPubKey)
}
}
// Deserialize the inputs.
var totalScriptSize uint64
txIns := make([]TxIn, count)
msg.TxIn = make([]*TxIn, count)
for i := uint64(0); i < count; i++ {
// The pointer is set now in case a script buffer is borrowed
// and needs to be returned to the pool on error.
ti := &txIns[i]
msg.TxIn[i] = ti
err = readTxIn(r, pver, msg.Version, ti)
if err != nil {
returnScriptBuffers()
return err
}
totalScriptSize += uint64(len(ti.SignatureScript))
}
count, err = ReadVarInt(r)
if err != nil {
returnScriptBuffers()
return err
}
// Prevent more output transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if count > uint64(maxTxOutPerMessage) {
returnScriptBuffers()
str := fmt.Sprintf("too many output transactions to fit into "+
"max message size [count %d, max %d]", count,
maxTxOutPerMessage)
return messageError("MsgTx.KaspaDecode", str)
}
// Deserialize the outputs.
txOuts := make([]TxOut, count)
msg.TxOut = make([]*TxOut, count)
for i := uint64(0); i < count; i++ {
// The pointer is set now in case a script buffer is borrowed
// and needs to be returned to the pool on error.
to := &txOuts[i]
msg.TxOut[i] = to
err = readTxOut(r, pver, msg.Version, to)
if err != nil {
returnScriptBuffers()
return err
}
totalScriptSize += uint64(len(to.ScriptPubKey))
}
lockTime, err := binaryserializer.Uint64(r, littleEndian)
msg.LockTime = lockTime
if err != nil {
returnScriptBuffers()
return err
}
_, err = io.ReadFull(r, msg.SubnetworkID[:])
if err != nil {
returnScriptBuffers()
return err
}
if !msg.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) {
msg.Gas, err = binaryserializer.Uint64(r, littleEndian)
if err != nil {
returnScriptBuffers()
return err
}
var payloadHash daghash.Hash
err = ReadElement(r, &payloadHash)
if err != nil {
returnScriptBuffers()
return err
}
msg.PayloadHash = &payloadHash
payloadLength, err := ReadVarInt(r)
if err != nil {
returnScriptBuffers()
return err
}
msg.Payload = make([]byte, payloadLength)
_, err = io.ReadFull(r, msg.Payload)
if err != nil {
returnScriptBuffers()
return err
}
}
// Create a single allocation to house all of the scripts and set each
// input signature script and output public key script to the
// appropriate subslice of the overall contiguous buffer. Then, return
// each individual script buffer back to the pool so they can be reused
// for future deserializations. This is done because it significantly
// reduces the number of allocations the garbage collector needs to
// track, which in turn improves performance and drastically reduces the
// amount of runtime overhead that would otherwise be needed to keep
// track of millions of small allocations.
//
// NOTE: It is no longer valid to call the returnScriptBuffers closure
// after these blocks of code run because it is already done and the
// scripts in the transaction inputs and outputs no longer point to the
// buffers.
var offset uint64
scripts := make([]byte, totalScriptSize)
for i := 0; i < len(msg.TxIn); i++ {
// Copy the signature script into the contiguous buffer at the
// appropriate offset.
signatureScript := msg.TxIn[i].SignatureScript
copy(scripts[offset:], signatureScript)
// Reset the signature script of the transaction input to the
// slice of the contiguous buffer where the script lives.
scriptSize := uint64(len(signatureScript))
end := offset + scriptSize
msg.TxIn[i].SignatureScript = scripts[offset:end:end]
offset += scriptSize
// Return the temporary script buffer to the pool.
scriptPool.Return(signatureScript)
}
for i := 0; i < len(msg.TxOut); i++ {
// Copy the public key script into the contiguous buffer at the
// appropriate offset.
scriptPubKey := msg.TxOut[i].ScriptPubKey
copy(scripts[offset:], scriptPubKey)
// Reset the public key script of the transaction output to the
// slice of the contiguous buffer where the script lives.
scriptSize := uint64(len(scriptPubKey))
end := offset + scriptSize
msg.TxOut[i].ScriptPubKey = scripts[offset:end:end]
offset += scriptSize
// Return the temporary script buffer to the pool.
scriptPool.Return(scriptPubKey)
}
return nil
}
// Deserialize decodes a transaction from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field in the transaction. This function differs from KaspaDecode
// in that KaspaDecode decodes from the kaspa appmessage protocol as it was sent
// across the network. The appmessage encoding can technically differ depending on
// the protocol version and doesn't even really need to match the format of a
// stored transaction at all. As of the time this comment was written, the
// encoded transaction is the same in both instances, but there is a distinct
// difference and separating the two allows the API to be flexible enough to
// deal with changes.
func (msg *MsgTx) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaDecode.
return msg.KaspaDecode(r, 0)
}
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding transactions to be stored to disk, such as in a
// database, as opposed to encoding transactions for the appmessage.
func (msg *MsgTx) KaspaEncode(w io.Writer, pver uint32) error {
return msg.encode(w, pver, txEncodingFull)
}
func (msg *MsgTx) encode(w io.Writer, pver uint32, encodingFlags txEncoding) error {
err := binaryserializer.PutUint32(w, littleEndian, uint32(msg.Version))
if err != nil {
return err
}
count := uint64(len(msg.TxIn))
err = WriteVarInt(w, count)
if err != nil {
return err
}
for _, ti := range msg.TxIn {
err = writeTxIn(w, pver, msg.Version, ti, encodingFlags)
if err != nil {
return err
}
}
count = uint64(len(msg.TxOut))
err = WriteVarInt(w, count)
if err != nil {
return err
}
for _, to := range msg.TxOut {
err = WriteTxOut(w, pver, msg.Version, to)
if err != nil {
return err
}
}
err = binaryserializer.PutUint64(w, littleEndian, msg.LockTime)
if err != nil {
return err
}
_, err = w.Write(msg.SubnetworkID[:])
if err != nil {
return err
}
if !msg.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) {
if msg.SubnetworkID.IsBuiltIn() && msg.Gas != 0 {
str := "Transactions from built-in should have 0 gas"
return messageError("MsgTx.KaspaEncode", str)
}
err = binaryserializer.PutUint64(w, littleEndian, msg.Gas)
if err != nil {
return err
}
err = WriteElement(w, msg.PayloadHash)
if err != nil {
return err
}
if encodingFlags&txEncodingExcludePayload != txEncodingExcludePayload {
err = WriteVarInt(w, uint64(len(msg.Payload)))
w.Write(msg.Payload)
} else {
err = WriteVarInt(w, 0)
}
if err != nil {
return err
}
} else if msg.Payload != nil {
str := "Transactions from native subnetwork should have <nil> payload"
return messageError("MsgTx.KaspaEncode", str)
} else if msg.PayloadHash != nil {
str := "Transactions from native subnetwork should have <nil> payload hash"
return messageError("MsgTx.KaspaEncode", str)
} else if msg.Gas != 0 {
str := "Transactions from native subnetwork should have 0 gas"
return messageError("MsgTx.KaspaEncode", str)
}
return nil
}
// Serialize encodes the transaction to w using a format that suitable for
// long-term storage such as a database while respecting the Version field in
// the transaction. This function differs from KaspaEncode in that KaspaEncode
// encodes the transaction to the kaspa appmessage protocol in order to be sent
// across the network. The appmessage encoding can technically differ depending on
// the protocol version and doesn't even really need to match the format of a
// stored transaction at all. As of the time this comment was written, the
// encoded transaction is the same in both instances, but there is a distinct
// difference and separating the two allows the API to be flexible enough to
// deal with changes.
func (msg *MsgTx) Serialize(w io.Writer) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaEncode.
return msg.KaspaEncode(w, 0)
}
func (msg *MsgTx) serialize(w io.Writer, encodingFlags txEncoding) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of `encode`.
return msg.encode(w, 0, encodingFlags)
}
// SerializeSize returns the number of bytes it would take to serialize
// the transaction.
func (msg *MsgTx) SerializeSize() int {
return msg.serializeSize(txEncodingFull)
}
// SerializeSize returns the number of bytes it would take to serialize
// the transaction.
func (msg *MsgTx) serializeSize(encodingFlags txEncoding) int {
// Version 4 bytes + LockTime 8 bytes + SubnetworkID 20
// bytes + Serialized varint size for the number of transaction
// inputs and outputs.
n := 32 + VarIntSerializeSize(uint64(len(msg.TxIn))) +
VarIntSerializeSize(uint64(len(msg.TxOut)))
if !msg.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) {
// Gas 8 bytes
n += 8
// PayloadHash
n += daghash.HashSize
// Serialized varint size for the length of the payload
if encodingFlags&txEncodingExcludePayload != txEncodingExcludePayload {
n += VarIntSerializeSize(uint64(len(msg.Payload)))
n += len(msg.Payload)
} else {
n += VarIntSerializeSize(0)
}
}
for _, txIn := range msg.TxIn {
n += txIn.serializeSize(encodingFlags)
}
for _, txOut := range msg.TxOut {
n += txOut.SerializeSize()
}
return n
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgTx) Command() MessageCommand {
@@ -777,52 +250,14 @@ func (msg *MsgTx) MaxPayloadLength(pver uint32) uint32 {
return MaxMessagePayload
}
// ScriptPubKeyLocs returns a slice containing the start of each public key script
// within the raw serialized transaction. The caller can easily obtain the
// length of each script by using len on the script available via the
// appropriate transaction output entry.
func (msg *MsgTx) ScriptPubKeyLocs() []int {
numTxOut := len(msg.TxOut)
if numTxOut == 0 {
return nil
}
// The starting offset in the serialized transaction of the first
// transaction output is:
//
// Version 4 bytes + serialized varint size for the number of
// transaction inputs and outputs + serialized size of each transaction
// input.
n := 4 + VarIntSerializeSize(uint64(len(msg.TxIn))) +
VarIntSerializeSize(uint64(numTxOut))
for _, txIn := range msg.TxIn {
n += txIn.SerializeSize()
}
// Calculate and set the appropriate offset for each public key script.
scriptPubKeyLocs := make([]int, numTxOut)
for i, txOut := range msg.TxOut {
// The offset of the script in the transaction output is:
//
// Value 8 bytes + serialized varint size for the length of
// ScriptPubKey.
n += 8 + VarIntSerializeSize(uint64(len(txOut.ScriptPubKey)))
scriptPubKeyLocs[i] = n
n += len(txOut.ScriptPubKey)
}
return scriptPubKeyLocs
}
// IsSubnetworkCompatible return true iff subnetworkID is one or more of the following:
// 1. The SupportsAll subnetwork (full node)
// 2. The native subnetwork
// 3. The transaction's subnetwork
func (msg *MsgTx) IsSubnetworkCompatible(subnetworkID *subnetworkid.SubnetworkID) bool {
func (msg *MsgTx) IsSubnetworkCompatible(subnetworkID *externalapi.DomainSubnetworkID) bool {
return subnetworkID == nil ||
subnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) ||
subnetworkID.IsEqual(&msg.SubnetworkID)
subnetworkID.Equal(&subnetworks.SubnetworkIDNative) ||
subnetworkID.Equal(&msg.SubnetworkID)
}
// newMsgTx returns a new tx message that conforms to the Message interface.
@@ -834,7 +269,7 @@ func (msg *MsgTx) IsSubnetworkCompatible(subnetworkID *subnetworkid.SubnetworkID
// The payload hash is calculated automatically according to provided payload.
// Also, the lock time is set to zero to indicate the transaction is valid
// immediately as opposed to some time in future.
func newMsgTx(version int32, txIn []*TxIn, txOut []*TxOut, subnetworkID *subnetworkid.SubnetworkID,
func newMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, subnetworkID *externalapi.DomainSubnetworkID,
gas uint64, payload []byte, lockTime uint64) *MsgTx {
if txIn == nil {
@@ -845,9 +280,9 @@ func newMsgTx(version int32, txIn []*TxIn, txOut []*TxOut, subnetworkID *subnetw
txOut = make([]*TxOut, 0, defaultTxInOutAlloc)
}
var payloadHash *daghash.Hash
if !subnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) {
payloadHash = daghash.DoubleHashP(payload)
var payloadHash externalapi.DomainHash
if *subnetworkID != subnetworks.SubnetworkIDNative {
payloadHash = *hashes.PayloadHash(payload)
}
return &MsgTx{
@@ -863,12 +298,12 @@ func newMsgTx(version int32, txIn []*TxIn, txOut []*TxOut, subnetworkID *subnetw
}
// NewNativeMsgTx returns a new tx message in the native subnetwork
func NewNativeMsgTx(version int32, txIn []*TxIn, txOut []*TxOut) *MsgTx {
return newMsgTx(version, txIn, txOut, subnetworkid.SubnetworkIDNative, 0, nil, 0)
func NewNativeMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut) *MsgTx {
return newMsgTx(version, txIn, txOut, &subnetworks.SubnetworkIDNative, 0, nil, 0)
}
// NewSubnetworkMsgTx returns a new tx message in the specified subnetwork with specified gas and payload
func NewSubnetworkMsgTx(version int32, txIn []*TxIn, txOut []*TxOut, subnetworkID *subnetworkid.SubnetworkID,
func NewSubnetworkMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, subnetworkID *externalapi.DomainSubnetworkID,
gas uint64, payload []byte) *MsgTx {
return newMsgTx(version, txIn, txOut, subnetworkID, gas, payload, 0)
@@ -877,128 +312,14 @@ func NewSubnetworkMsgTx(version int32, txIn []*TxIn, txOut []*TxOut, subnetworkI
// NewNativeMsgTxWithLocktime returns a new tx message in the native subnetwork with a locktime.
//
// See newMsgTx for further documntation of the parameters
func NewNativeMsgTxWithLocktime(version int32, txIn []*TxIn, txOut []*TxOut, locktime uint64) *MsgTx {
return newMsgTx(version, txIn, txOut, subnetworkid.SubnetworkIDNative, 0, nil, locktime)
func NewNativeMsgTxWithLocktime(version uint16, txIn []*TxIn, txOut []*TxOut, locktime uint64) *MsgTx {
return newMsgTx(version, txIn, txOut, &subnetworks.SubnetworkIDNative, 0, nil, locktime)
}
// NewRegistryMsgTx creates a new MsgTx that registers a new subnetwork
func NewRegistryMsgTx(version int32, txIn []*TxIn, txOut []*TxOut, gasLimit uint64) *MsgTx {
func NewRegistryMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, gasLimit uint64) *MsgTx {
payload := make([]byte, 8)
binary.LittleEndian.PutUint64(payload, gasLimit)
return NewSubnetworkMsgTx(version, txIn, txOut, subnetworkid.SubnetworkIDRegistry, 0, payload)
}
// readOutpoint reads the next sequence of bytes from r as an Outpoint.
func readOutpoint(r io.Reader, pver uint32, version int32, op *Outpoint) error {
_, err := io.ReadFull(r, op.TxID[:])
if err != nil {
return err
}
op.Index, err = binaryserializer.Uint32(r, littleEndian)
return err
}
// writeOutpoint encodes op to the kaspa protocol encoding for an Outpoint
// to w.
func writeOutpoint(w io.Writer, pver uint32, version int32, op *Outpoint) error {
_, err := w.Write(op.TxID[:])
if err != nil {
return err
}
return binaryserializer.PutUint32(w, littleEndian, op.Index)
}
// readScript reads a variable length byte array that represents a transaction
// script. It is encoded as a varInt containing the length of the array
// followed by the bytes themselves. An error is returned if the length is
// greater than the passed maxAllowed parameter which helps protect against
// memory exhaustion attacks and forced panics through malformed messages. The
// fieldName parameter is only used for the error message so it provides more
// context in the error.
func readScript(r io.Reader, pver uint32, maxAllowed uint32, fieldName string) ([]byte, error) {
count, err := ReadVarInt(r)
if err != nil {
return nil, err
}
// Prevent byte array larger than the max message size. It would
// be possible to cause memory exhaustion and panics without a sane
// upper bound on this count.
if count > uint64(maxAllowed) {
str := fmt.Sprintf("%s is larger than the max allowed size "+
"[count %d, max %d]", fieldName, count, maxAllowed)
return nil, messageError("readScript", str)
}
b := scriptPool.Borrow(count)
_, err = io.ReadFull(r, b)
if err != nil {
scriptPool.Return(b)
return nil, err
}
return b, nil
}
// readTxIn reads the next sequence of bytes from r as a transaction input
// (TxIn).
func readTxIn(r io.Reader, pver uint32, version int32, ti *TxIn) error {
err := readOutpoint(r, pver, version, &ti.PreviousOutpoint)
if err != nil {
return err
}
ti.SignatureScript, err = readScript(r, pver, MaxMessagePayload,
"transaction input signature script")
if err != nil {
return err
}
return ReadElement(r, &ti.Sequence)
}
// writeTxIn encodes ti to the kaspa protocol encoding for a transaction
// input (TxIn) to w.
func writeTxIn(w io.Writer, pver uint32, version int32, ti *TxIn, encodingFlags txEncoding) error {
err := writeOutpoint(w, pver, version, &ti.PreviousOutpoint)
if err != nil {
return err
}
if encodingFlags&txEncodingExcludeSignatureScript != txEncodingExcludeSignatureScript {
err = WriteVarBytes(w, pver, ti.SignatureScript)
} else {
err = WriteVarBytes(w, pver, []byte{})
}
if err != nil {
return err
}
return binaryserializer.PutUint64(w, littleEndian, ti.Sequence)
}
// readTxOut reads the next sequence of bytes from r as a transaction output
// (TxOut).
func readTxOut(r io.Reader, pver uint32, version int32, to *TxOut) error {
err := ReadElement(r, &to.Value)
if err != nil {
return err
}
to.ScriptPubKey, err = readScript(r, pver, MaxMessagePayload,
"transaction output public key script")
return err
}
// WriteTxOut encodes to into the kaspa protocol encoding for a transaction
// output (TxOut) to w.
func WriteTxOut(w io.Writer, pver uint32, version int32, to *TxOut) error {
err := binaryserializer.PutUint64(w, littleEndian, uint64(to.Value))
if err != nil {
return err
}
return WriteVarBytes(w, pver, to.ScriptPubKey)
return NewSubnetworkMsgTx(version, txIn, txOut, &subnetworks.SubnetworkIDRegistry, 0, payload)
}

View File

@@ -7,24 +7,25 @@ package appmessage
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"io"
"math"
"reflect"
"testing"
"unsafe"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionid"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// TestTx tests the MsgTx API.
func TestTx(t *testing.T) {
pver := ProtocolVersion
txIDStr := "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
txID, err := daghash.NewTxIDFromStr(txIDStr)
txIDStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
txID, err := transactionid.FromString(txIDStr)
if err != nil {
t.Errorf("NewTxIDFromStr: %v", err)
}
@@ -51,7 +52,7 @@ func TestTx(t *testing.T) {
// testing package functionality.
prevOutIndex := uint32(1)
prevOut := NewOutpoint(txID, prevOutIndex)
if !prevOut.TxID.IsEqual(txID) {
if !prevOut.TxID.Equal(txID) {
t.Errorf("NewOutpoint: wrong ID - got %v, want %v",
spew.Sprint(&prevOut.TxID), spew.Sprint(txID))
}
@@ -67,7 +68,7 @@ func TestTx(t *testing.T) {
// Ensure we get the same transaction input back out.
sigScript := []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}
txIn := NewTxIn(prevOut, sigScript)
txIn := NewTxIn(prevOut, sigScript, constants.MaxTxInSequenceNum)
if !reflect.DeepEqual(&txIn.PreviousOutpoint, prevOut) {
t.Errorf("NewTxIn: wrong prev outpoint - got %v, want %v",
spew.Sprint(&txIn.PreviousOutpoint),
@@ -81,26 +82,28 @@ func TestTx(t *testing.T) {
// Ensure we get the same transaction output back out.
txValue := uint64(5000000000)
scriptPubKey := []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
}
scriptPubKey := &externalapi.ScriptPublicKey{
Script: []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
},
Version: 0}
txOut := NewTxOut(txValue, scriptPubKey)
if txOut.Value != txValue {
t.Errorf("NewTxOut: wrong scriptPubKey - got %v, want %v",
txOut.Value, txValue)
}
if !bytes.Equal(txOut.ScriptPubKey, scriptPubKey) {
if !bytes.Equal(txOut.ScriptPubKey.Script, scriptPubKey.Script) {
t.Errorf("NewTxOut: wrong scriptPubKey - got %v, want %v",
spew.Sdump(txOut.ScriptPubKey),
spew.Sdump(scriptPubKey))
@@ -130,17 +133,21 @@ func TestTx(t *testing.T) {
// TestTxHash tests the ability to generate the hash of a transaction accurately.
func TestTxHashAndID(t *testing.T) {
txID1Str := "edca872f27279674c7a52192b32fd68b8b8be714bfea52d98b2c3c86c30e85c6"
wantTxID1, err := daghash.NewTxIDFromStr(txID1Str)
txHash1Str := "4bee9ee495bd93a755de428376bd582a2bb6ec37c041753b711c0606d5745c13"
txID1Str := "f868bd20e816256b80eac976821be4589d24d21141bd1cec6e8005d0c16c6881"
wantTxID1, err := transactionid.FromString(txID1Str)
if err != nil {
t.Errorf("NewTxIDFromStr: %v", err)
return
t.Fatalf("NewTxIDFromStr: %v", err)
}
wantTxHash1, err := transactionid.FromString(txHash1Str)
if err != nil {
t.Fatalf("NewTxIDFromStr: %v", err)
}
// A coinbase transaction
txIn := &TxIn{
PreviousOutpoint: Outpoint{
TxID: daghash.TxID{},
TxID: externalapi.DomainTransactionID{},
Index: math.MaxUint32,
},
SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62},
@@ -148,7 +155,7 @@ func TestTxHashAndID(t *testing.T) {
}
txOut := &TxOut{
Value: 5000000000,
ScriptPubKey: []byte{
ScriptPubKey: &externalapi.ScriptPublicKey{Script: []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
@@ -160,33 +167,33 @@ func TestTxHashAndID(t *testing.T) {
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
},
}, Version: 0},
}
tx1 := NewSubnetworkMsgTx(1, []*TxIn{txIn}, []*TxOut{txOut}, subnetworkid.SubnetworkIDCoinbase, 0, nil)
tx1 := NewSubnetworkMsgTx(0, []*TxIn{txIn}, []*TxOut{txOut}, &subnetworks.SubnetworkIDCoinbase, 0, nil)
// Ensure the hash produced is expected.
tx1Hash := tx1.TxHash()
if !tx1Hash.IsEqual((*daghash.Hash)(wantTxID1)) {
if *tx1Hash != (externalapi.DomainHash)(*wantTxHash1) {
t.Errorf("TxHash: wrong hash - got %v, want %v",
spew.Sprint(tx1Hash), spew.Sprint(wantTxID1))
spew.Sprint(tx1Hash), spew.Sprint(wantTxHash1))
}
// Ensure the TxID for coinbase transaction is the same as TxHash.
tx1ID := tx1.TxID()
if !tx1ID.IsEqual(wantTxID1) {
if !tx1ID.Equal(wantTxID1) {
t.Errorf("TxID: wrong ID - got %v, want %v",
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
}
hash2Str := "b11924b7eeffea821522222576c53dc5b8ddd97602f81e5e124d2626646d74ca"
wantHash2, err := daghash.NewHashFromStr(hash2Str)
hash2Str := "cb1bdb4a83d4885535fb3cceb5c96597b7df903db83f0ffcd779d703affd8efd"
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
if err != nil {
t.Errorf("NewTxIDFromStr: %v", err)
return
}
id2Str := "750499ae9e6d44961ef8bad8af27a44dd4bcbea166b71baf181e8d3997e1ff72"
wantID2, err := daghash.NewTxIDFromStr(id2Str)
id2Str := "ca080073d4ddf5b84443a0964af633f3c70a5b290fd3bc35a7e6f93fd33f9330"
wantID2, err := transactionid.FromString(id2Str)
if err != nil {
t.Errorf("NewTxIDFromStr: %v", err)
return
@@ -195,7 +202,7 @@ func TestTxHashAndID(t *testing.T) {
txIns := []*TxIn{{
PreviousOutpoint: Outpoint{
Index: 0,
TxID: daghash.TxID{1, 2, 3},
TxID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{1, 2, 3}),
},
SignatureScript: []byte{
0x49, 0x30, 0x46, 0x02, 0x21, 0x00, 0xDA, 0x0D, 0xC6, 0xAE, 0xCE, 0xFE, 0x1E, 0x06, 0xEF, 0xDF,
@@ -213,730 +220,42 @@ func TestTxHashAndID(t *testing.T) {
txOuts := []*TxOut{
{
Value: 244623243,
ScriptPubKey: []byte{
ScriptPubKey: &externalapi.ScriptPublicKey{Script: []byte{
0x76, 0xA9, 0x14, 0xBA, 0xDE, 0xEC, 0xFD, 0xEF, 0x05, 0x07, 0x24, 0x7F, 0xC8, 0xF7, 0x42, 0x41,
0xD7, 0x3B, 0xC0, 0x39, 0x97, 0x2D, 0x7B, 0x88, 0xAC,
},
}, Version: 0},
},
{
Value: 44602432,
ScriptPubKey: []byte{
ScriptPubKey: &externalapi.ScriptPublicKey{Script: []byte{
0x76, 0xA9, 0x14, 0xC1, 0x09, 0x32, 0x48, 0x3F, 0xEC, 0x93, 0xED, 0x51, 0xF5, 0xFE, 0x95, 0xE7,
0x25, 0x59, 0xF2, 0xCC, 0x70, 0x43, 0xF9, 0x88, 0xAC,
},
}, Version: 0},
},
}
tx2 := NewSubnetworkMsgTx(1, txIns, txOuts, &subnetworkid.SubnetworkID{1, 2, 3}, 0, payload)
tx2 := NewSubnetworkMsgTx(1, txIns, txOuts, &externalapi.DomainSubnetworkID{1, 2, 3}, 0, payload)
// Ensure the hash produced is expected.
tx2Hash := tx2.TxHash()
if !tx2Hash.IsEqual(wantHash2) {
if !tx2Hash.Equal(wantHash2) {
t.Errorf("TxHash: wrong hash - got %v, want %v",
spew.Sprint(tx2Hash), spew.Sprint(wantHash2))
}
// Ensure the TxID for coinbase transaction is the same as TxHash.
tx2ID := tx2.TxID()
if !tx2ID.IsEqual(wantID2) {
if !tx2ID.Equal(wantID2) {
t.Errorf("TxID: wrong ID - got %v, want %v",
spew.Sprint(tx2ID), spew.Sprint(wantID2))
}
if tx2ID.IsEqual((*daghash.TxID)(tx2Hash)) {
if tx2ID.Equal((*externalapi.DomainTransactionID)(tx2Hash)) {
t.Errorf("tx2ID and tx2Hash shouldn't be the same for non-coinbase transaction with signature and/or payload")
}
tx2.TxIn[0].SignatureScript = []byte{}
newTx2Hash := tx2.TxHash()
if !tx2ID.IsEqual((*daghash.TxID)(newTx2Hash)) {
t.Errorf("tx2ID and newTx2Hash should be the same for transaction with an empty signature")
if *tx2ID == (externalapi.DomainTransactionID)(*newTx2Hash) {
t.Errorf("tx2ID and newTx2Hash should not be the same even for transaction with an empty signature")
}
}
// TestTxEncoding tests the MsgTx appmessage encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestTxEncoding(t *testing.T) {
// Empty tx message.
noTx := NewNativeMsgTx(1, nil, nil)
noTxEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x00, // Varint for number of input transactions
0x00, // Varint for number of output transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // Sub Network ID
}
tests := []struct {
in *MsgTx // Message to encode
out *MsgTx // Expected decoded message
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version with no transactions.
{
noTx,
noTx,
noTxEncoded,
ProtocolVersion,
},
// Latest protocol version with multiple transactions.
{
multiTx,
multiTx,
multiTxEncoded,
ProtocolVersion,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to appmessage format.
var buf bytes.Buffer
err := test.in.KaspaEncode(&buf, test.pver)
if err != nil {
t.Errorf("KaspaEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("KaspaEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the message from appmessage format.
var msg MsgTx
rbuf := bytes.NewReader(test.buf)
err = msg.KaspaDecode(rbuf, test.pver)
if err != nil {
t.Errorf("KaspaDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&msg, test.out) {
t.Errorf("KaspaDecode #%d\n got: %s want: %s", i,
spew.Sdump(&msg), spew.Sdump(test.out))
continue
}
}
}
// TestTxEncodingErrors performs negative tests against appmessage encode and decode
// of MsgTx to confirm error paths work correctly.
func TestTxEncodingErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
in *MsgTx // Value to encode
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force error in version.
{multiTx, multiTxEncoded, pver, 0, io.ErrShortWrite, io.EOF},
// Force error in number of transaction inputs.
{multiTx, multiTxEncoded, pver, 4, io.ErrShortWrite, io.EOF},
// Force error in transaction input previous block hash.
{multiTx, multiTxEncoded, pver, 5, io.ErrShortWrite, io.EOF},
// Force error in transaction input previous block output index.
{multiTx, multiTxEncoded, pver, 37, io.ErrShortWrite, io.EOF},
// Force error in transaction input signature script length.
{multiTx, multiTxEncoded, pver, 41, io.ErrShortWrite, io.EOF},
// Force error in transaction input signature script.
{multiTx, multiTxEncoded, pver, 42, io.ErrShortWrite, io.EOF},
// Force error in transaction input sequence.
{multiTx, multiTxEncoded, pver, 49, io.ErrShortWrite, io.EOF},
// Force error in number of transaction outputs.
{multiTx, multiTxEncoded, pver, 57, io.ErrShortWrite, io.EOF},
// Force error in transaction output value.
{multiTx, multiTxEncoded, pver, 58, io.ErrShortWrite, io.EOF},
// Force error in transaction output scriptPubKey length.
{multiTx, multiTxEncoded, pver, 66, io.ErrShortWrite, io.EOF},
// Force error in transaction output scriptPubKey.
{multiTx, multiTxEncoded, pver, 67, io.ErrShortWrite, io.EOF},
// Force error in transaction output lock time.
{multiTx, multiTxEncoded, pver, 210, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := test.in.KaspaEncode(w, test.pver)
if !errors.Is(err, test.writeErr) {
t.Errorf("KaspaEncode #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from appmessage format.
var msg MsgTx
r := newFixedReader(test.max, test.buf)
err = msg.KaspaDecode(r, test.pver)
if !errors.Is(err, test.readErr) {
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestTxSerialize tests MsgTx serialize and deserialize.
func TestTxSerialize(t *testing.T) {
noTx := NewNativeMsgTx(1, nil, nil)
noTxEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x00, // Varint for number of input transactions
0x00, // Varint for number of output transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // Sub Network ID
}
registryTx := NewRegistryMsgTx(1, nil, nil, 16)
registryTxEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x00, // Varint for number of input transactions
0x00, // Varint for number of output transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // Sub Network ID
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Gas
0x77, 0x56, 0x36, 0xb4, 0x89, 0x32, 0xe9, 0xa8,
0xbb, 0x67, 0xe6, 0x54, 0x84, 0x36, 0x93, 0x8d,
0x9f, 0xc5, 0x62, 0x49, 0x79, 0x5c, 0x0d, 0x0a,
0x86, 0xaf, 0x7c, 0x5d, 0x54, 0x45, 0x4c, 0x4b, // Payload hash
0x08, // Payload length varint
0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Payload / Gas limit
}
subnetworkTx := NewSubnetworkMsgTx(1, nil, nil, &subnetworkid.SubnetworkID{0xff}, 5, []byte{0, 1, 2})
subnetworkTxEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x00, // Varint for number of input transactions
0x00, // Varint for number of output transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time
0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // Sub Network ID
0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Gas
0x35, 0xf9, 0xf2, 0x93, 0x0e, 0xa3, 0x44, 0x61,
0x88, 0x22, 0x79, 0x5e, 0xee, 0xc5, 0x68, 0xae,
0x67, 0xab, 0x29, 0x87, 0xd8, 0xb1, 0x9e, 0x45,
0x91, 0xe1, 0x05, 0x27, 0xba, 0xa1, 0xdf, 0x3d, // Payload hash
0x03, // Payload length varint
0x00, 0x01, 0x02, // Payload
}
tests := []struct {
name string
in *MsgTx // Message to encode
out *MsgTx // Expected decoded message
buf []byte // Serialized data
scriptPubKeyLocs []int // Expected output script locations
}{
// No transactions.
{
"noTx",
noTx,
noTx,
noTxEncoded,
nil,
},
// Registry Transaction.
{
"registryTx",
registryTx,
registryTx,
registryTxEncoded,
nil,
},
// Sub Network Transaction.
{
"subnetworkTx",
subnetworkTx,
subnetworkTx,
subnetworkTxEncoded,
nil,
},
// Multiple transactions.
{
"multiTx",
multiTx,
multiTx,
multiTxEncoded,
multiTxScriptPubKeyLocs,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the transaction.
var buf bytes.Buffer
err := test.in.Serialize(&buf)
if err != nil {
t.Errorf("Serialize %s: error %v", test.name, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("Serialize %s:\n got: %s want: %s", test.name,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Deserialize the transaction.
var tx MsgTx
rbuf := bytes.NewReader(test.buf)
err = tx.Deserialize(rbuf)
if err != nil {
t.Errorf("Deserialize #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&tx, test.out) {
t.Errorf("Deserialize #%d\n got: %s want: %s", i,
spew.Sdump(&tx), spew.Sdump(test.out))
continue
}
// Ensure the public key script locations are accurate.
scriptPubKeyLocs := test.in.ScriptPubKeyLocs()
if !reflect.DeepEqual(scriptPubKeyLocs, test.scriptPubKeyLocs) {
t.Errorf("ScriptPubKeyLocs #%d\n got: %s want: %s", i,
spew.Sdump(scriptPubKeyLocs),
spew.Sdump(test.scriptPubKeyLocs))
continue
}
for j, loc := range scriptPubKeyLocs {
wantScriptPubKey := test.in.TxOut[j].ScriptPubKey
gotScriptPubKey := test.buf[loc : loc+len(wantScriptPubKey)]
if !bytes.Equal(gotScriptPubKey, wantScriptPubKey) {
t.Errorf("ScriptPubKeyLocs #%d:%d\n unexpected "+
"script got: %s want: %s", i, j,
spew.Sdump(gotScriptPubKey),
spew.Sdump(wantScriptPubKey))
}
}
}
}
// TestTxSerializeErrors performs negative tests against appmessage encode and decode
// of MsgTx to confirm error paths work correctly.
func TestTxSerializeErrors(t *testing.T) {
tests := []struct {
in *MsgTx // Value to encode
buf []byte // Serialized data
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force error in version.
{multiTx, multiTxEncoded, 0, io.ErrShortWrite, io.EOF},
// Force error in number of transaction inputs.
{multiTx, multiTxEncoded, 4, io.ErrShortWrite, io.EOF},
// Force error in transaction input previous block hash.
{multiTx, multiTxEncoded, 5, io.ErrShortWrite, io.EOF},
// Force error in transaction input previous block output index.
{multiTx, multiTxEncoded, 37, io.ErrShortWrite, io.EOF},
// Force error in transaction input signature script length.
{multiTx, multiTxEncoded, 41, io.ErrShortWrite, io.EOF},
// Force error in transaction input signature script.
{multiTx, multiTxEncoded, 42, io.ErrShortWrite, io.EOF},
// Force error in transaction input sequence.
{multiTx, multiTxEncoded, 49, io.ErrShortWrite, io.EOF},
// Force error in number of transaction outputs.
{multiTx, multiTxEncoded, 57, io.ErrShortWrite, io.EOF},
// Force error in transaction output value.
{multiTx, multiTxEncoded, 58, io.ErrShortWrite, io.EOF},
// Force error in transaction output scriptPubKey length.
{multiTx, multiTxEncoded, 66, io.ErrShortWrite, io.EOF},
// Force error in transaction output scriptPubKey.
{multiTx, multiTxEncoded, 67, io.ErrShortWrite, io.EOF},
// Force error in transaction output lock time.
{multiTx, multiTxEncoded, 210, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the transaction.
w := newFixedWriter(test.max)
err := test.in.Serialize(w)
if !errors.Is(err, test.writeErr) {
t.Errorf("Serialize #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Deserialize the transaction.
var tx MsgTx
r := newFixedReader(test.max, test.buf)
err = tx.Deserialize(r)
if !errors.Is(err, test.readErr) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
registryTx := NewSubnetworkMsgTx(1, nil, nil, subnetworkid.SubnetworkIDRegistry, 1, nil)
w := bytes.NewBuffer(make([]byte, 0, registryTx.SerializeSize()))
err := registryTx.Serialize(w)
str := "Transactions from built-in should have 0 gas"
expectedErr := messageError("MsgTx.KaspaEncode", str)
if err == nil || err.Error() != expectedErr.Error() {
t.Errorf("TestTxSerializeErrors: expected error %v but got %v", expectedErr, err)
}
nativeTx := NewSubnetworkMsgTx(1, nil, nil, subnetworkid.SubnetworkIDNative, 1, nil)
w = bytes.NewBuffer(make([]byte, 0, registryTx.SerializeSize()))
err = nativeTx.Serialize(w)
str = "Transactions from native subnetwork should have 0 gas"
expectedErr = messageError("MsgTx.KaspaEncode", str)
if err == nil || err.Error() != expectedErr.Error() {
t.Errorf("TestTxSerializeErrors: expected error %v but got %v", expectedErr, err)
}
nativeTx.Gas = 0
nativeTx.Payload = []byte{1, 2, 3}
nativeTx.PayloadHash = daghash.DoubleHashP(nativeTx.Payload)
w = bytes.NewBuffer(make([]byte, 0, registryTx.SerializeSize()))
err = nativeTx.Serialize(w)
str = "Transactions from native subnetwork should have <nil> payload"
expectedErr = messageError("MsgTx.KaspaEncode", str)
if err == nil || err.Error() != expectedErr.Error() {
t.Errorf("TestTxSerializeErrors: expected error %v but got %v", expectedErr, err)
}
}
// TestTxOverflowErrors performs tests to ensure deserializing transactions
// which are intentionally crafted to use large values for the variable number
// of inputs and outputs are handled properly. This could otherwise potentially
// be used as an attack vector.
func TestTxOverflowErrors(t *testing.T) {
pver := ProtocolVersion
txVer := uint32(1)
tests := []struct {
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
version uint32 // Transaction version
err error // Expected error
}{
// Transaction that claims to have ~uint64(0) inputs.
{
[]byte{
0x00, 0x00, 0x00, 0x01, // Version
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // Varint for number of input transactions
}, pver, txVer, &MessageError{},
},
// Transaction that claims to have ~uint64(0) outputs.
{
[]byte{
0x00, 0x00, 0x00, 0x01, // Version
0x00, // Varint for number of input transactions
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // Varint for number of output transactions
}, pver, txVer, &MessageError{},
},
// Transaction that has an input with a signature script that
// claims to have ~uint64(0) length.
{
[]byte{
0x00, 0x00, 0x00, 0x01, // Version
0x01, // Varint for number of input transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // Varint for length of signature script
}, pver, txVer, &MessageError{},
},
// Transaction that has an output with a public key script
// that claims to have ~uint64(0) length.
{
[]byte{
0x00, 0x00, 0x00, 0x01, // Version
0x01, // Varint for number of input transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index
0x00, // Varint for length of signature script
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Sequence
0x01, // Varint for number of output transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Transaction amount
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // Varint for length of public key script
}, pver, txVer, &MessageError{},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from appmessage format.
var msg MsgTx
r := bytes.NewReader(test.buf)
err := msg.KaspaDecode(r, test.pver)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
// Decode from appmessage format.
r = bytes.NewReader(test.buf)
err = msg.Deserialize(r)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
}
}
// TestTxSerializeSize performs tests to ensure the serialize size for
// various transactions is accurate.
func TestTxSerializeSize(t *testing.T) {
// Empty tx message.
noTx := NewNativeMsgTx(1, nil, nil)
tests := []struct {
in *MsgTx // Tx to encode
size int // Expected serialized size
}{
// No inputs or outpus.
{noTx, 34},
// Transcaction with an input and an output.
{multiTx, 238},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
serializedSize := test.in.SerializeSize()
if serializedSize != test.size {
t.Errorf("MsgTx.SerializeSize: #%d got: %d, want: %d", i,
serializedSize, test.size)
continue
}
}
}
func TestIsSubnetworkCompatible(t *testing.T) {
testTx := NewSubnetworkMsgTx(1, nil, nil, &subnetworkid.SubnetworkID{123}, 0, []byte{})
tests := []struct {
name string
subnetworkID *subnetworkid.SubnetworkID
expectedResult bool
}{
{
name: "Native subnetwork",
subnetworkID: subnetworkid.SubnetworkIDNative,
expectedResult: true,
},
{
name: "same subnetwork as test tx",
subnetworkID: &subnetworkid.SubnetworkID{123},
expectedResult: true,
},
{
name: "other subnetwork",
subnetworkID: &subnetworkid.SubnetworkID{234},
expectedResult: false,
},
}
for _, test := range tests {
result := testTx.IsSubnetworkCompatible(test.subnetworkID)
if result != test.expectedResult {
t.Errorf("IsSubnetworkCompatible got unexpected result in test '%s': "+
"expected: %t, want: %t", test.name, test.expectedResult, result)
}
}
}
func TestScriptFreeList(t *testing.T) {
var list scriptFreeList = make(chan []byte, freeListMaxItems)
expectedCapacity := 512
expectedLengthFirst := 12
expectedLengthSecond := 13
first := list.Borrow(uint64(expectedLengthFirst))
if cap(first) != expectedCapacity {
t.Errorf("MsgTx.TestScriptFreeList: Expected capacity for first %d, but got %d",
expectedCapacity, cap(first))
}
if len(first) != expectedLengthFirst {
t.Errorf("MsgTx.TestScriptFreeList: Expected length for first %d, but got %d",
expectedLengthFirst, len(first))
}
list.Return(first)
// Borrow again, and check that the underlying array is re-used for second
second := list.Borrow(uint64(expectedLengthSecond))
if cap(second) != expectedCapacity {
t.Errorf("MsgTx.TestScriptFreeList: Expected capacity for second %d, but got %d",
expectedCapacity, cap(second))
}
if len(second) != expectedLengthSecond {
t.Errorf("MsgTx.TestScriptFreeList: Expected length for second %d, but got %d",
expectedLengthSecond, len(second))
}
firstArrayAddress := underlyingArrayAddress(first)
secondArrayAddress := underlyingArrayAddress(second)
if firstArrayAddress != secondArrayAddress {
t.Errorf("First underlying array is at address %d and second at address %d, "+
"which means memory was not re-used", firstArrayAddress, secondArrayAddress)
}
list.Return(second)
// test for buffers bigger than freeListMaxScriptSize
expectedCapacityBig := freeListMaxScriptSize + 1
expectedLengthBig := expectedCapacityBig
big := list.Borrow(uint64(expectedCapacityBig))
if cap(big) != expectedCapacityBig {
t.Errorf("MsgTx.TestScriptFreeList: Expected capacity for second %d, but got %d",
expectedCapacityBig, cap(big))
}
if len(big) != expectedLengthBig {
t.Errorf("MsgTx.TestScriptFreeList: Expected length for second %d, but got %d",
expectedLengthBig, len(big))
}
list.Return(big)
// test there's no crash when channel is full because borrowed too much
buffers := make([][]byte, freeListMaxItems+1)
for i := 0; i < freeListMaxItems+1; i++ {
buffers[i] = list.Borrow(1)
}
for i := 0; i < freeListMaxItems+1; i++ {
list.Return(buffers[i])
}
}
func underlyingArrayAddress(buf []byte) uint64 {
return uint64((*reflect.SliceHeader)(unsafe.Pointer(&buf)).Data)
}
// multiTx is a MsgTx with an input and output and used in various tests.
var multiTxIns = []*TxIn{
{
PreviousOutpoint: Outpoint{
TxID: daghash.TxID{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62,
},
Sequence: math.MaxUint64,
},
}
var multiTxOuts = []*TxOut{
{
Value: 0x12a05f200,
ScriptPubKey: []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
},
},
{
Value: 0x5f5e100,
ScriptPubKey: []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
},
},
}
var multiTx = NewNativeMsgTx(1, multiTxIns, multiTxOuts)
// multiTxEncoded is the appmessage encoded bytes for multiTx using protocol version
// 60002 and is used in the various tests.
var multiTxEncoded = []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x01, // Varint for number of input transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index
0x07, // Varint for length of signature script
0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, // Signature script
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Sequence
0x02, // Varint for number of output transactions
0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of scriptPubKey
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
0x00, 0xe1, 0xf5, 0x05, 0x00, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of scriptPubKey
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // Sub Network ID
}
// multiTxScriptPubKeyLocs is the location information for the public key scripts
// located in multiTx.
var multiTxScriptPubKeyLocs = []int{67, 143}

View File

@@ -6,14 +6,13 @@ package appmessage
import (
"fmt"
"github.com/kaspanet/kaspad/version"
"strings"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/version"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// MaxUserAgentLen is the maximum allowed length for the user agent field in a
@@ -54,14 +53,11 @@ type MsgVersion struct {
// on the appmessage. This has a max length of MaxUserAgentLen.
UserAgent string
// The selected tip hash of the generator of the version message.
SelectedTipHash *daghash.Hash
// Don't announce transactions to peer.
DisableRelayTx bool
// The subnetwork of the generator of the version message. Should be nil in full nodes
SubnetworkID *subnetworkid.SubnetworkID
SubnetworkID *externalapi.DomainSubnetworkID
}
// HasService returns whether the specified service is supported by the peer
@@ -86,7 +82,7 @@ func (msg *MsgVersion) Command() MessageCommand {
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
selectedTipHash *daghash.Hash, subnetworkID *subnetworkid.SubnetworkID) *MsgVersion {
subnetworkID *externalapi.DomainSubnetworkID) *MsgVersion {
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
@@ -98,7 +94,6 @@ func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
Address: addr,
ID: id,
UserAgent: DefaultUserAgent,
SelectedTipHash: selectedTipHash,
DisableRelayTx: false,
SubnetworkID: subnetworkID,
}

View File

@@ -5,12 +5,12 @@
package appmessage
import (
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/daghash"
"net"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
)
// TestVersion tests the MsgVersion API.
@@ -18,7 +18,6 @@ func TestVersion(t *testing.T) {
pver := ProtocolVersion
// Create version message data.
selectedTipHash := &daghash.Hash{12, 34}
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
me := NewNetAddress(tcpAddrMe, SFNodeNetwork)
generatedID, err := id.GenerateID()
@@ -27,7 +26,7 @@ func TestVersion(t *testing.T) {
}
// Ensure we get the correct data back out.
msg := NewMsgVersion(me, generatedID, "mainnet", selectedTipHash, nil)
msg := NewMsgVersion(me, generatedID, "mainnet", nil)
if msg.ProtocolVersion != pver {
t.Errorf("NewMsgVersion: wrong protocol version - got %v, want %v",
msg.ProtocolVersion, pver)
@@ -44,10 +43,6 @@ func TestVersion(t *testing.T) {
t.Errorf("NewMsgVersion: wrong user agent - got %v, want %v",
msg.UserAgent, DefaultUserAgent)
}
if !msg.SelectedTipHash.IsEqual(selectedTipHash) {
t.Errorf("NewMsgVersion: wrong selected tip hash - got %s, want %s",
msg.SelectedTipHash, selectedTipHash)
}
if msg.DisableRelayTx {
t.Errorf("NewMsgVersion: disable relay tx is not false by "+
"default - got %v, want %v", msg.DisableRelayTx, false)

View File

@@ -0,0 +1,22 @@
package appmessage
// MsgRequestIBDRootHashMessage implements the Message interface and represents a kaspa
// MsgRequestIBDRootHashMessage message. It is used to request the IBD root hash
// from a peer during IBD.
//
// This message has no payload.
type MsgRequestIBDRootHashMessage struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestIBDRootHashMessage) Command() MessageCommand {
return CmdRequestIBDRootHash
}
// NewMsgRequestIBDRootHashMessage returns a new kaspa RequestIBDRootHash message that conforms to the
// Message interface.
func NewMsgRequestIBDRootHashMessage() *MsgRequestIBDRootHashMessage {
return &MsgRequestIBDRootHashMessage{}
}

View File

@@ -6,8 +6,6 @@ type GetBlockRequestMessage struct {
baseMessage
Hash string
SubnetworkID string
IncludeBlockHex bool
IncludeBlockVerboseData bool
IncludeTransactionVerboseData bool
}
@@ -17,13 +15,10 @@ func (msg *GetBlockRequestMessage) Command() MessageCommand {
}
// NewGetBlockRequestMessage returns a instance of the message
func NewGetBlockRequestMessage(hash string, subnetworkID string, includeBlockHex bool,
includeBlockVerboseData bool, includeTransactionVerboseData bool) *GetBlockRequestMessage {
func NewGetBlockRequestMessage(hash string, subnetworkID string, includeTransactionVerboseData bool) *GetBlockRequestMessage {
return &GetBlockRequestMessage{
Hash: hash,
SubnetworkID: subnetworkID,
IncludeBlockHex: includeBlockHex,
IncludeBlockVerboseData: includeBlockVerboseData,
IncludeTransactionVerboseData: includeTransactionVerboseData,
}
}
@@ -32,7 +27,6 @@ func NewGetBlockRequestMessage(hash string, subnetworkID string, includeBlockHex
// its respective RPC message
type GetBlockResponseMessage struct {
baseMessage
BlockHex string
BlockVerboseData *BlockVerboseData
Error *RPCError
@@ -51,11 +45,7 @@ func NewGetBlockResponseMessage() *GetBlockResponseMessage {
// BlockVerboseData holds verbose data about a block
type BlockVerboseData struct {
Hash string
Confirmations uint64
Size int32
BlueScore uint64
IsChainBlock bool
Version int32
Version uint16
VersionHex string
HashMerkleRoot string
AcceptedIDMerkleRoot string
@@ -68,17 +58,16 @@ type BlockVerboseData struct {
Difficulty float64
ParentHashes []string
SelectedParentHash string
ChildHashes []string
AcceptedBlockHashes []string
BlueScore uint64
IsHeaderOnly bool
}
// TransactionVerboseData holds verbose data about a transaction
type TransactionVerboseData struct {
Hex string
TxID string
Hash string
Size int32
Version int32
Size uint64
Version uint16
LockTime uint64
SubnetworkID string
Gas uint64
@@ -87,8 +76,6 @@ type TransactionVerboseData struct {
TransactionVerboseInputs []*TransactionVerboseInput
TransactionVerboseOutputs []*TransactionVerboseOutput
BlockHash string
AcceptedBy string
IsInMempool bool
Time uint64
BlockTime uint64
}
@@ -116,7 +103,6 @@ type TransactionVerboseOutput struct {
// ScriptPubKeyResult holds data about a script public key
type ScriptPubKeyResult struct {
Asm string
Hex string
Type string
Address string

View File

@@ -1,5 +1,7 @@
package appmessage
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// GetBlockCountRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetBlockCountRequestMessage struct {
@@ -20,7 +22,8 @@ func NewGetBlockCountRequestMessage() *GetBlockCountRequestMessage {
// its respective RPC message
type GetBlockCountResponseMessage struct {
baseMessage
BlockCount uint64
BlockCount uint64
HeaderCount uint64
Error *RPCError
}
@@ -31,8 +34,9 @@ func (msg *GetBlockCountResponseMessage) Command() MessageCommand {
}
// NewGetBlockCountResponseMessage returns a instance of the message
func NewGetBlockCountResponseMessage(blockCount uint64) *GetBlockCountResponseMessage {
func NewGetBlockCountResponseMessage(syncInfo *externalapi.SyncInfo) *GetBlockCountResponseMessage {
return &GetBlockCountResponseMessage{
BlockCount: blockCount,
BlockCount: syncInfo.BlockCount,
HeaderCount: syncInfo.HeaderCount,
}
}

View File

@@ -22,6 +22,7 @@ type GetBlockDAGInfoResponseMessage struct {
baseMessage
NetworkName string
BlockCount uint64
HeaderCount uint64
TipHashes []string
VirtualParentHashes []string
Difficulty float64

View File

@@ -5,7 +5,6 @@ package appmessage
type GetBlockTemplateRequestMessage struct {
baseMessage
PayAddress string
LongPollID string
}
// Command returns the protocol command string for the message
@@ -14,10 +13,9 @@ func (msg *GetBlockTemplateRequestMessage) Command() MessageCommand {
}
// NewGetBlockTemplateRequestMessage returns a instance of the message
func NewGetBlockTemplateRequestMessage(payAddress string, longPollID string) *GetBlockTemplateRequestMessage {
func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateRequestMessage {
return &GetBlockTemplateRequestMessage{
PayAddress: payAddress,
LongPollID: longPollID,
}
}
@@ -25,23 +23,8 @@ func NewGetBlockTemplateRequestMessage(payAddress string, longPollID string) *Ge
// its respective RPC message
type GetBlockTemplateResponseMessage struct {
baseMessage
Bits string
CurrentTime int64
ParentHashes []string
MassLimit int
Transactions []GetBlockTemplateTransactionMessage
HashMerkleRoot string
AcceptedIDMerkleRoot string
UTXOCommitment string
Version int32
LongPollID string
TargetDifficulty string
MinTime int64
MaxTime int64
MutableFields []string
NonceRange string
IsSynced bool
IsConnected bool
MsgBlock *MsgBlock
IsSynced bool
Error *RPCError
}
@@ -52,27 +35,9 @@ func (msg *GetBlockTemplateResponseMessage) Command() MessageCommand {
}
// NewGetBlockTemplateResponseMessage returns a instance of the message
func NewGetBlockTemplateResponseMessage() *GetBlockTemplateResponseMessage {
return &GetBlockTemplateResponseMessage{}
}
// GetBlockTemplateTransactionMessage is an appmessage corresponding to
// its respective RPC message
type GetBlockTemplateTransactionMessage struct {
baseMessage
Data string
ID string
Depends []int64
Mass uint64
Fee uint64
}
// Command returns the protocol command string for the message
func (msg *GetBlockTemplateTransactionMessage) Command() MessageCommand {
return CmdGetBlockTemplateTransactionMessage
}
// NewGetBlockTemplateTransactionMessage returns a instance of the message
func NewGetBlockTemplateTransactionMessage() *GetBlockTemplateTransactionMessage {
return &GetBlockTemplateTransactionMessage{}
func NewGetBlockTemplateResponseMessage(msgBlock *MsgBlock, isSynced bool) *GetBlockTemplateResponseMessage {
return &GetBlockTemplateResponseMessage{
MsgBlock: msgBlock,
IsSynced: isSynced,
}
}

View File

@@ -1,49 +0,0 @@
package appmessage
// GetChainFromBlockRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetChainFromBlockRequestMessage struct {
baseMessage
StartHash string
IncludeBlockVerboseData bool
}
// Command returns the protocol command string for the message
func (msg *GetChainFromBlockRequestMessage) Command() MessageCommand {
return CmdGetChainFromBlockRequestMessage
}
// NewGetChainFromBlockRequestMessage returns a instance of the message
func NewGetChainFromBlockRequestMessage(startHash string, includeBlockVerboseData bool) *GetChainFromBlockRequestMessage {
return &GetChainFromBlockRequestMessage{
StartHash: startHash,
IncludeBlockVerboseData: includeBlockVerboseData,
}
}
// GetChainFromBlockResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetChainFromBlockResponseMessage struct {
baseMessage
RemovedChainBlockHashes []string
AddedChainBlocks []*ChainBlock
BlockVerboseData []*BlockVerboseData
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetChainFromBlockResponseMessage) Command() MessageCommand {
return CmdGetChainFromBlockResponseMessage
}
// NewGetChainFromBlockResponseMessage returns a instance of the message
func NewGetChainFromBlockResponseMessage(removedChainBlockHashes []string,
addedChainBlocks []*ChainBlock, blockVerboseData []*BlockVerboseData) *GetChainFromBlockResponseMessage {
return &GetChainFromBlockResponseMessage{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlocks: addedChainBlocks,
BlockVerboseData: blockVerboseData,
}
}

View File

@@ -41,11 +41,10 @@ type GetConnectedPeerInfoMessage struct {
ID string
Address string
LastPingDuration int64
SelectedTipHash string
IsSyncNode bool
IsOutbound bool
TimeOffset int64
UserAgent string
AdvertisedProtocolVersion uint32
TimeConnected int64
IsIBDPeer bool
}

View File

@@ -0,0 +1,45 @@
package appmessage
// GetHeadersRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetHeadersRequestMessage struct {
baseMessage
StartHash string
Limit uint64
IsAscending bool
}
// Command returns the protocol command string for the message
func (msg *GetHeadersRequestMessage) Command() MessageCommand {
return CmdGetHeadersRequestMessage
}
// NewGetHeadersRequestMessage returns a instance of the message
func NewGetHeadersRequestMessage(startHash string, limit uint64, isAscending bool) *GetHeadersRequestMessage {
return &GetHeadersRequestMessage{
StartHash: startHash,
Limit: limit,
IsAscending: isAscending,
}
}
// GetHeadersResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetHeadersResponseMessage struct {
baseMessage
Headers []string
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetHeadersResponseMessage) Command() MessageCommand {
return CmdGetHeadersResponseMessage
}
// NewGetHeadersResponseMessage returns a instance of the message
func NewGetHeadersResponseMessage(headers []string) *GetHeadersResponseMessage {
return &GetHeadersResponseMessage{
Headers: headers,
}
}

View File

@@ -20,7 +20,8 @@ func NewGetPeerAddressesRequestMessage() *GetPeerAddressesRequestMessage {
// its respective RPC message
type GetPeerAddressesResponseMessage struct {
baseMessage
Addresses []*GetPeerAddressesKnownAddressMessage
Addresses []*GetPeerAddressesKnownAddressMessage
BannedAddresses []*GetPeerAddressesKnownAddressMessage
Error *RPCError
}
@@ -31,9 +32,10 @@ func (msg *GetPeerAddressesResponseMessage) Command() MessageCommand {
}
// NewGetPeerAddressesResponseMessage returns a instance of the message
func NewGetPeerAddressesResponseMessage(addresses []*GetPeerAddressesKnownAddressMessage) *GetPeerAddressesResponseMessage {
func NewGetPeerAddressesResponseMessage(addresses []*GetPeerAddressesKnownAddressMessage, bannedAddresses []*GetPeerAddressesKnownAddressMessage) *GetPeerAddressesResponseMessage {
return &GetPeerAddressesResponseMessage{
Addresses: addresses,
Addresses: addresses,
BannedAddresses: bannedAddresses,
}
}

View File

@@ -0,0 +1,41 @@
package appmessage
// GetUTXOsByAddressesRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetUTXOsByAddressesRequestMessage struct {
baseMessage
Addresses []string
}
// Command returns the protocol command string for the message
func (msg *GetUTXOsByAddressesRequestMessage) Command() MessageCommand {
return CmdGetUTXOsByAddressesRequestMessage
}
// NewGetUTXOsByAddressesRequestMessage returns a instance of the message
func NewGetUTXOsByAddressesRequestMessage(addresses []string) *GetUTXOsByAddressesRequestMessage {
return &GetUTXOsByAddressesRequestMessage{
Addresses: addresses,
}
}
// GetUTXOsByAddressesResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetUTXOsByAddressesResponseMessage struct {
baseMessage
Entries []*UTXOsByAddressesEntry
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetUTXOsByAddressesResponseMessage) Command() MessageCommand {
return CmdGetUTXOsByAddressesResponseMessage
}
// NewGetUTXOsByAddressesResponseMessage returns a instance of the message
func NewGetUTXOsByAddressesResponseMessage(entries []*UTXOsByAddressesEntry) *GetUTXOsByAddressesResponseMessage {
return &GetUTXOsByAddressesResponseMessage{
Entries: entries,
}
}

View File

@@ -0,0 +1,38 @@
package appmessage
// GetVirtualSelectedParentBlueScoreRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetVirtualSelectedParentBlueScoreRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *GetVirtualSelectedParentBlueScoreRequestMessage) Command() MessageCommand {
return CmdGetVirtualSelectedParentBlueScoreRequestMessage
}
// NewGetVirtualSelectedParentBlueScoreRequestMessage returns a instance of the message
func NewGetVirtualSelectedParentBlueScoreRequestMessage() *GetVirtualSelectedParentBlueScoreRequestMessage {
return &GetVirtualSelectedParentBlueScoreRequestMessage{}
}
// GetVirtualSelectedParentBlueScoreResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetVirtualSelectedParentBlueScoreResponseMessage struct {
baseMessage
BlueScore uint64
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetVirtualSelectedParentBlueScoreResponseMessage) Command() MessageCommand {
return CmdGetVirtualSelectedParentBlueScoreResponseMessage
}
// NewGetVirtualSelectedParentBlueScoreResponseMessage returns a instance of the message
func NewGetVirtualSelectedParentBlueScoreResponseMessage(blueScore uint64) *GetVirtualSelectedParentBlueScoreResponseMessage {
return &GetVirtualSelectedParentBlueScoreResponseMessage{
BlueScore: blueScore,
}
}

View File

@@ -0,0 +1,45 @@
package appmessage
// GetVirtualSelectedParentChainFromBlockRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetVirtualSelectedParentChainFromBlockRequestMessage struct {
baseMessage
StartHash string
}
// Command returns the protocol command string for the message
func (msg *GetVirtualSelectedParentChainFromBlockRequestMessage) Command() MessageCommand {
return CmdGetVirtualSelectedParentChainFromBlockRequestMessage
}
// NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(startHash string) *GetVirtualSelectedParentChainFromBlockRequestMessage {
return &GetVirtualSelectedParentChainFromBlockRequestMessage{
StartHash: startHash,
}
}
// GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
baseMessage
RemovedChainBlockHashes []string
AddedChainBlocks []*ChainBlock
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetVirtualSelectedParentChainFromBlockResponseMessage) Command() MessageCommand {
return CmdGetVirtualSelectedParentChainFromBlockResponseMessage
}
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes []string,
addedChainBlocks []*ChainBlock) *GetVirtualSelectedParentChainFromBlockResponseMessage {
return &GetVirtualSelectedParentChainFromBlockResponseMessage{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlocks: addedChainBlocks,
}
}

View File

@@ -1,69 +0,0 @@
package appmessage
// NotifyChainChangedRequestMessage is an appmessage corresponding to
// its respective RPC message
type NotifyChainChangedRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *NotifyChainChangedRequestMessage) Command() MessageCommand {
return CmdNotifyChainChangedRequestMessage
}
// NewNotifyChainChangedRequestMessage returns a instance of the message
func NewNotifyChainChangedRequestMessage() *NotifyChainChangedRequestMessage {
return &NotifyChainChangedRequestMessage{}
}
// NotifyChainChangedResponseMessage is an appmessage corresponding to
// its respective RPC message
type NotifyChainChangedResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *NotifyChainChangedResponseMessage) Command() MessageCommand {
return CmdNotifyChainChangedResponseMessage
}
// NewNotifyChainChangedResponseMessage returns a instance of the message
func NewNotifyChainChangedResponseMessage() *NotifyChainChangedResponseMessage {
return &NotifyChainChangedResponseMessage{}
}
// ChainChangedNotificationMessage is an appmessage corresponding to
// its respective RPC message
type ChainChangedNotificationMessage struct {
baseMessage
RemovedChainBlockHashes []string
AddedChainBlocks []*ChainBlock
}
// ChainBlock represents a DAG chain-block
type ChainBlock struct {
Hash string
AcceptedBlocks []*AcceptedBlock
}
// AcceptedBlock represents a block accepted into the DAG
type AcceptedBlock struct {
Hash string
AcceptedTxIDs []string
}
// Command returns the protocol command string for the message
func (msg *ChainChangedNotificationMessage) Command() MessageCommand {
return CmdChainChangedNotificationMessage
}
// NewChainChangedNotificationMessage returns a instance of the message
func NewChainChangedNotificationMessage(removedChainBlockHashes []string,
addedChainBlocks []*ChainBlock) *ChainChangedNotificationMessage {
return &ChainChangedNotificationMessage{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlocks: addedChainBlocks,
}
}

View File

@@ -0,0 +1,62 @@
package appmessage
// NotifyUTXOsChangedRequestMessage is an appmessage corresponding to
// its respective RPC message
type NotifyUTXOsChangedRequestMessage struct {
baseMessage
Addresses []string
}
// Command returns the protocol command string for the message
func (msg *NotifyUTXOsChangedRequestMessage) Command() MessageCommand {
return CmdNotifyUTXOsChangedRequestMessage
}
// NewNotifyUTXOsChangedRequestMessage returns a instance of the message
func NewNotifyUTXOsChangedRequestMessage(addresses []string) *NotifyUTXOsChangedRequestMessage {
return &NotifyUTXOsChangedRequestMessage{
Addresses: addresses,
}
}
// NotifyUTXOsChangedResponseMessage is an appmessage corresponding to
// its respective RPC message
type NotifyUTXOsChangedResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *NotifyUTXOsChangedResponseMessage) Command() MessageCommand {
return CmdNotifyUTXOsChangedResponseMessage
}
// NewNotifyUTXOsChangedResponseMessage returns a instance of the message
func NewNotifyUTXOsChangedResponseMessage() *NotifyUTXOsChangedResponseMessage {
return &NotifyUTXOsChangedResponseMessage{}
}
// UTXOsChangedNotificationMessage is an appmessage corresponding to
// its respective RPC message
type UTXOsChangedNotificationMessage struct {
baseMessage
Added []*UTXOsByAddressesEntry
Removed []*UTXOsByAddressesEntry
}
// UTXOsByAddressesEntry represents a UTXO of some address
type UTXOsByAddressesEntry struct {
Address string
Outpoint *RPCOutpoint
UTXOEntry *RPCUTXOEntry
}
// Command returns the protocol command string for the message
func (msg *UTXOsChangedNotificationMessage) Command() MessageCommand {
return CmdUTXOsChangedNotificationMessage
}
// NewUTXOsChangedNotificationMessage returns a instance of the message
func NewUTXOsChangedNotificationMessage() *UTXOsChangedNotificationMessage {
return &UTXOsChangedNotificationMessage{}
}

View File

@@ -0,0 +1,55 @@
package appmessage
// NotifyVirtualSelectedParentBlueScoreChangedRequestMessage is an appmessage corresponding to
// its respective RPC message
type NotifyVirtualSelectedParentBlueScoreChangedRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *NotifyVirtualSelectedParentBlueScoreChangedRequestMessage) Command() MessageCommand {
return CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage
}
// NewNotifyVirtualSelectedParentBlueScoreChangedRequestMessage returns a instance of the message
func NewNotifyVirtualSelectedParentBlueScoreChangedRequestMessage() *NotifyVirtualSelectedParentBlueScoreChangedRequestMessage {
return &NotifyVirtualSelectedParentBlueScoreChangedRequestMessage{}
}
// NotifyVirtualSelectedParentBlueScoreChangedResponseMessage is an appmessage corresponding to
// its respective RPC message
type NotifyVirtualSelectedParentBlueScoreChangedResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *NotifyVirtualSelectedParentBlueScoreChangedResponseMessage) Command() MessageCommand {
return CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage
}
// NewNotifyVirtualSelectedParentBlueScoreChangedResponseMessage returns a instance of the message
func NewNotifyVirtualSelectedParentBlueScoreChangedResponseMessage() *NotifyVirtualSelectedParentBlueScoreChangedResponseMessage {
return &NotifyVirtualSelectedParentBlueScoreChangedResponseMessage{}
}
// VirtualSelectedParentBlueScoreChangedNotificationMessage is an appmessage corresponding to
// its respective RPC message
type VirtualSelectedParentBlueScoreChangedNotificationMessage struct {
baseMessage
VirtualSelectedParentBlueScore uint64
}
// Command returns the protocol command string for the message
func (msg *VirtualSelectedParentBlueScoreChangedNotificationMessage) Command() MessageCommand {
return CmdVirtualSelectedParentBlueScoreChangedNotificationMessage
}
// NewVirtualSelectedParentBlueScoreChangedNotificationMessage returns a instance of the message
func NewVirtualSelectedParentBlueScoreChangedNotificationMessage(
virtualSelectedParentBlueScore uint64) *VirtualSelectedParentBlueScoreChangedNotificationMessage {
return &VirtualSelectedParentBlueScoreChangedNotificationMessage{
VirtualSelectedParentBlueScore: virtualSelectedParentBlueScore,
}
}

View File

@@ -0,0 +1,69 @@
package appmessage
// NotifyVirtualSelectedParentChainChangedRequestMessage is an appmessage corresponding to
// its respective RPC message
type NotifyVirtualSelectedParentChainChangedRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *NotifyVirtualSelectedParentChainChangedRequestMessage) Command() MessageCommand {
return CmdNotifyVirtualSelectedParentChainChangedRequestMessage
}
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns a instance of the message
func NewNotifyVirtualSelectedParentChainChangedRequestMessage() *NotifyVirtualSelectedParentChainChangedRequestMessage {
return &NotifyVirtualSelectedParentChainChangedRequestMessage{}
}
// NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to
// its respective RPC message
type NotifyVirtualSelectedParentChainChangedResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *NotifyVirtualSelectedParentChainChangedResponseMessage) Command() MessageCommand {
return CmdNotifyVirtualSelectedParentChainChangedResponseMessage
}
// NewNotifyVirtualSelectedParentChainChangedResponseMessage returns a instance of the message
func NewNotifyVirtualSelectedParentChainChangedResponseMessage() *NotifyVirtualSelectedParentChainChangedResponseMessage {
return &NotifyVirtualSelectedParentChainChangedResponseMessage{}
}
// VirtualSelectedParentChainChangedNotificationMessage is an appmessage corresponding to
// its respective RPC message
type VirtualSelectedParentChainChangedNotificationMessage struct {
baseMessage
RemovedChainBlockHashes []string
AddedChainBlocks []*ChainBlock
}
// ChainBlock represents a DAG chain-block
type ChainBlock struct {
Hash string
AcceptedBlocks []*AcceptedBlock
}
// AcceptedBlock represents a block accepted into the DAG
type AcceptedBlock struct {
Hash string
AcceptedTransactionIDs []string
}
// Command returns the protocol command string for the message
func (msg *VirtualSelectedParentChainChangedNotificationMessage) Command() MessageCommand {
return CmdVirtualSelectedParentChainChangedNotificationMessage
}
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes []string,
addedChainBlocks []*ChainBlock) *VirtualSelectedParentChainChangedNotificationMessage {
return &VirtualSelectedParentChainChangedNotificationMessage{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlocks: addedChainBlocks,
}
}

View File

@@ -4,7 +4,7 @@ package appmessage
// its respective RPC message
type SubmitBlockRequestMessage struct {
baseMessage
BlockHex string
Block *MsgBlock
}
// Command returns the protocol command string for the message
@@ -13,17 +13,39 @@ func (msg *SubmitBlockRequestMessage) Command() MessageCommand {
}
// NewSubmitBlockRequestMessage returns a instance of the message
func NewSubmitBlockRequestMessage(blockHex string) *SubmitBlockRequestMessage {
func NewSubmitBlockRequestMessage(block *MsgBlock) *SubmitBlockRequestMessage {
return &SubmitBlockRequestMessage{
BlockHex: blockHex,
Block: block,
}
}
// RejectReason describes the reason why a block sent by SubmitBlock was rejected
type RejectReason byte
// RejectReason constants
// Not using iota, since in the .proto file those are hardcoded
const (
RejectReasonNone RejectReason = 0
RejectReasonBlockInvalid RejectReason = 1
RejectReasonIsInIBD RejectReason = 2
)
var rejectReasonToString = map[RejectReason]string{
RejectReasonNone: "None",
RejectReasonBlockInvalid: "Block is invalid",
RejectReasonIsInIBD: "Node is in IBD",
}
func (rr RejectReason) String() string {
return rejectReasonToString[rr]
}
// SubmitBlockResponseMessage is an appmessage corresponding to
// its respective RPC message
type SubmitBlockResponseMessage struct {
baseMessage
Error *RPCError
RejectReason RejectReason
Error *RPCError
}
// Command returns the protocol command string for the message

View File

@@ -4,7 +4,7 @@ package appmessage
// its respective RPC message
type SubmitTransactionRequestMessage struct {
baseMessage
TransactionHex string
Transaction *RPCTransaction
}
// Command returns the protocol command string for the message
@@ -13,9 +13,9 @@ func (msg *SubmitTransactionRequestMessage) Command() MessageCommand {
}
// NewSubmitTransactionRequestMessage returns a instance of the message
func NewSubmitTransactionRequestMessage(transactionHex string) *SubmitTransactionRequestMessage {
func NewSubmitTransactionRequestMessage(transaction *RPCTransaction) *SubmitTransactionRequestMessage {
return &SubmitTransactionRequestMessage{
TransactionHex: transactionHex,
Transaction: transaction,
}
}
@@ -23,7 +23,7 @@ func NewSubmitTransactionRequestMessage(transactionHex string) *SubmitTransactio
// its respective RPC message
type SubmitTransactionResponseMessage struct {
baseMessage
TxID string
TransactionID string
Error *RPCError
}
@@ -34,8 +34,58 @@ func (msg *SubmitTransactionResponseMessage) Command() MessageCommand {
}
// NewSubmitTransactionResponseMessage returns a instance of the message
func NewSubmitTransactionResponseMessage(txID string) *SubmitTransactionResponseMessage {
func NewSubmitTransactionResponseMessage(transactionID string) *SubmitTransactionResponseMessage {
return &SubmitTransactionResponseMessage{
TxID: txID,
TransactionID: transactionID,
}
}
// RPCTransaction is a kaspad transaction representation meant to be
// used over RPC
type RPCTransaction struct {
Version uint16
Inputs []*RPCTransactionInput
Outputs []*RPCTransactionOutput
LockTime uint64
SubnetworkID string
Gas uint64
PayloadHash string
Payload string
}
// RPCTransactionInput is a kaspad transaction input representation
// meant to be used over RPC
type RPCTransactionInput struct {
PreviousOutpoint *RPCOutpoint
SignatureScript string
Sequence uint64
}
// RPCScriptPublicKey is a kaspad ScriptPublicKey representation
type RPCScriptPublicKey struct {
Version uint16
Script string
}
// RPCTransactionOutput is a kaspad transaction output representation
// meant to be used over RPC
type RPCTransactionOutput struct {
Amount uint64
ScriptPublicKey *RPCScriptPublicKey
}
// RPCOutpoint is a kaspad outpoint representation meant to be used
// over RPC
type RPCOutpoint struct {
TransactionID string
Index uint32
}
// RPCUTXOEntry is a kaspad utxo entry representation meant to be used
// over RPC
type RPCUTXOEntry struct {
Amount uint64
ScriptPublicKey *RPCScriptPublicKey
BlockBlueScore uint64
IsCoinbase bool
}

View File

@@ -4,6 +4,12 @@ import (
"fmt"
"sync/atomic"
"github.com/kaspanet/kaspad/domain/utxoindex"
infrastructuredatabase "github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
@@ -11,13 +17,7 @@ import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol"
"github.com/kaspanet/kaspad/app/rpc"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain/blockdag/indexers"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/domain/mining"
"github.com/kaspanet/kaspad/domain/txscript"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/dnsseed"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
@@ -72,46 +72,44 @@ func (a *ComponentManager) Stop() {
log.Errorf("Error stopping the net adapter: %+v", err)
}
err = a.addressManager.Stop()
if err != nil {
log.Errorf("Error stopping address manager: %s", err)
}
return
}
// NewComponentManager returns a new ComponentManager instance.
// Use Start() to begin all services within this ComponentManager
func NewComponentManager(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt chan<- struct{}) (*ComponentManager, error) {
indexManager, acceptanceIndex := setupIndexes(cfg)
func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database, interrupt chan<- struct{}) (
*ComponentManager, error) {
sigCache := txscript.NewSigCache(cfg.SigCacheMaxSize)
// Create a new block DAG instance with the appropriate configuration.
dag, err := setupDAG(cfg, databaseContext, sigCache, indexManager)
domain, err := domain.New(cfg.ActiveNetParams, db, cfg.IsArchivalNode)
if err != nil {
return nil, err
}
txMempool := setupMempool(cfg, dag, sigCache)
netAdapter, err := netadapter.NewNetAdapter(cfg)
if err != nil {
return nil, err
}
addressManager, err := addressmanager.New(cfg, databaseContext)
addressManager, err := addressmanager.New(addressmanager.NewConfig(cfg))
if err != nil {
return nil, err
}
var utxoIndex *utxoindex.UTXOIndex
if cfg.UTXOIndex {
utxoIndex = utxoindex.New(domain.Consensus(), db)
log.Infof("UTXO index started")
}
connectionManager, err := connmanager.New(cfg, netAdapter, addressManager)
if err != nil {
return nil, err
}
protocolManager, err := protocol.NewManager(cfg, dag, netAdapter, addressManager, txMempool, connectionManager)
protocolManager, err := protocol.NewManager(cfg, domain, netAdapter, addressManager, connectionManager)
if err != nil {
return nil, err
}
rpcManager := setupRPC(cfg, txMempool, dag, sigCache, netAdapter, protocolManager, connectionManager, addressManager, acceptanceIndex, interrupt)
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, interrupt)
return &ComponentManager{
cfg: cfg,
@@ -126,60 +124,30 @@ func NewComponentManager(cfg *config.Config, databaseContext *dbaccess.DatabaseC
func setupRPC(
cfg *config.Config,
txMempool *mempool.TxPool,
dag *blockdag.BlockDAG,
sigCache *txscript.SigCache,
domain domain.Domain,
netAdapter *netadapter.NetAdapter,
protocolManager *protocol.Manager,
connectionManager *connmanager.ConnectionManager,
addressManager *addressmanager.AddressManager,
acceptanceIndex *indexers.AcceptanceIndex,
utxoIndex *utxoindex.UTXOIndex,
shutDownChan chan<- struct{},
) *rpc.Manager {
blockTemplateGenerator := mining.NewBlkTmplGenerator(&mining.Policy{BlockMaxMass: cfg.BlockMaxMass}, txMempool, dag, sigCache)
rpcManager := rpc.NewManager(cfg, netAdapter, dag, protocolManager, connectionManager, blockTemplateGenerator, txMempool, addressManager, acceptanceIndex, shutDownChan)
rpcManager := rpc.NewManager(
cfg,
domain,
netAdapter,
protocolManager,
connectionManager,
addressManager,
utxoIndex,
shutDownChan,
)
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
protocolManager.SetOnTransactionAddedToMempoolHandler(rpcManager.NotifyTransactionAddedToMempool)
dag.Subscribe(func(notification *blockdag.Notification) {
err := handleBlockDAGNotifications(notification, acceptanceIndex, rpcManager)
if err != nil {
panic(err)
}
})
return rpcManager
}
func handleBlockDAGNotifications(notification *blockdag.Notification,
acceptanceIndex *indexers.AcceptanceIndex, rpcManager *rpc.Manager) error {
switch notification.Type {
case blockdag.NTChainChanged:
if acceptanceIndex == nil {
return nil
}
chainChangedNotificationData := notification.Data.(*blockdag.ChainChangedNotificationData)
err := rpcManager.NotifyChainChanged(chainChangedNotificationData.RemovedChainBlockHashes,
chainChangedNotificationData.AddedChainBlockHashes)
if err != nil {
return err
}
case blockdag.NTFinalityConflict:
finalityConflictNotificationData := notification.Data.(*blockdag.FinalityConflictNotificationData)
err := rpcManager.NotifyFinalityConflict(finalityConflictNotificationData.ViolatingBlockHash.String())
if err != nil {
return err
}
case blockdag.NTFinalityConflictResolved:
finalityConflictResolvedNotificationData := notification.Data.(*blockdag.FinalityConflictResolvedNotificationData)
err := rpcManager.NotifyFinalityConflictResolved(finalityConflictResolvedNotificationData.FinalityBlockHash.String())
if err != nil {
return err
}
}
return nil
}
func (a *ComponentManager) maybeSeedFromDNS() {
if !a.cfg.DisableDNSSeed {
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, appmessage.SFNodeNetwork, false, nil,
@@ -187,68 +155,16 @@ func (a *ComponentManager) maybeSeedFromDNS() {
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
// IPs of nodes and not its own IP, we can not know real IP of
// source. So we'll take first returned address as source.
a.addressManager.AddAddresses(addresses, addresses[0], nil)
a.addressManager.AddAddresses(addresses...)
})
}
if a.cfg.GRPCSeed != "" {
dnsseed.SeedFromGRPC(a.cfg.NetParams(), a.cfg.GRPCSeed, appmessage.SFNodeNetwork, false, nil,
func(addresses []*appmessage.NetAddress) {
a.addressManager.AddAddresses(addresses, addresses[0], nil)
a.addressManager.AddAddresses(addresses...)
})
}
}
func setupDAG(cfg *config.Config, databaseContext *dbaccess.DatabaseContext,
sigCache *txscript.SigCache, indexManager blockdag.IndexManager) (*blockdag.BlockDAG, error) {
dag, err := blockdag.New(&blockdag.Config{
DatabaseContext: databaseContext,
DAGParams: cfg.NetParams(),
TimeSource: blockdag.NewTimeSource(),
SigCache: sigCache,
IndexManager: indexManager,
SubnetworkID: cfg.SubnetworkID,
MaxUTXOCacheSize: cfg.MaxUTXOCacheSize,
})
return dag, err
}
func setupIndexes(cfg *config.Config) (blockdag.IndexManager, *indexers.AcceptanceIndex) {
// Create indexes if needed.
var indexes []indexers.Indexer
var acceptanceIndex *indexers.AcceptanceIndex
if cfg.AcceptanceIndex {
log.Info("acceptance index is enabled")
acceptanceIndex = indexers.NewAcceptanceIndex()
indexes = append(indexes, acceptanceIndex)
}
// Create an index manager if any of the optional indexes are enabled.
if len(indexes) < 0 {
return nil, nil
}
indexManager := indexers.NewManager(indexes)
return indexManager, acceptanceIndex
}
func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript.SigCache) *mempool.TxPool {
mempoolConfig := mempool.Config{
Policy: mempool.Policy{
AcceptNonStd: cfg.RelayNonStd,
MaxOrphanTxs: cfg.MaxOrphanTxs,
MaxOrphanTxSize: config.DefaultMaxOrphanTxSize,
MinRelayTxFee: cfg.MinRelayTxFee,
MaxTxVersion: 1,
},
CalcTxSequenceLockFromReferencedUTXOEntries: dag.CalcTxSequenceLockFromReferencedUTXOEntries,
SigCache: sigCache,
DAG: dag,
}
return mempool.New(&mempoolConfig)
}
// P2PNodeID returns the network ID associated with this ComponentManager
func (a *ComponentManager) P2PNodeID() *id.ID {
return a.netAdapter.ID()

View File

@@ -5,11 +5,11 @@
package blocklogger
import (
"github.com/kaspanet/kaspad/util/mstime"
"sync"
"time"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/mstime"
)
var (
@@ -22,17 +22,17 @@ var (
// LogBlock logs a new block blue score as an information message
// to show progress to the user. In order to prevent spam, it limits logging to
// one message every 10 seconds with duration and totals included.
func LogBlock(block *util.Block) error {
func LogBlock(block *externalapi.DomainBlock) {
mtx.Lock()
defer mtx.Unlock()
receivedLogBlocks++
receivedLogTx += int64(len(block.MsgBlock().Transactions))
receivedLogTx += int64(len(block.Transactions))
now := mstime.Now()
duration := now.Sub(lastBlockLogTime)
if duration < time.Second*10 {
return nil
return
}
// Truncate the duration to 10s of milliseconds.
@@ -48,17 +48,11 @@ func LogBlock(block *util.Block) error {
txStr = "transaction"
}
blueScore, err := block.BlueScore()
if err != nil {
return err
}
log.Infof("Processed %d %s in the last %s (%d %s, blue score %d, %s)",
log.Infof("Processed %d %s in the last %s (%d %s, %s)",
receivedLogBlocks, blockStr, tDuration, receivedLogTx,
txStr, blueScore, block.MsgBlock().Header.Timestamp)
txStr, mstime.UnixMilliseconds(block.Header.TimeInMilliseconds()))
receivedLogBlocks = 0
receivedLogTx = 0
lastBlockLogTime = now
return nil
}

View File

@@ -1,49 +1,82 @@
package flowcontext
import (
"sync/atomic"
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
)
// OnNewBlock updates the mempool after a new block arrival, and
// relays newly unorphaned transactions and possibly rebroadcast
// manually added transactions when not in IBD.
func (f *FlowContext) OnNewBlock(block *util.Block) error {
transactionsAcceptedToMempool, err := f.txPool.HandleNewBlock(block)
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
blockInsertionResult *externalapi.BlockInsertionResult) error {
hash := consensushashing.BlockHash(block)
log.Debugf("OnNewBlock start for block %s", hash)
defer log.Debugf("OnNewBlock end for block %s", hash)
unorphaningResults, err := f.UnorphanBlocks(block)
if err != nil {
return err
}
if f.onBlockAddedToDAGHandler != nil {
err := f.onBlockAddedToDAGHandler(block)
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
newBlocks := []*externalapi.DomainBlock{block}
newBlockInsertionResults := []*externalapi.BlockInsertionResult{blockInsertionResult}
for _, unorphaningResult := range unorphaningResults {
newBlocks = append(newBlocks, unorphaningResult.block)
newBlockInsertionResults = append(newBlockInsertionResults, unorphaningResult.blockInsertionResult)
}
for i, newBlock := range newBlocks {
blocklogger.LogBlock(block)
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
_, err = f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
if err != nil {
return err
}
if f.onBlockAddedToDAGHandler != nil {
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
blockInsertionResult = newBlockInsertionResults[i]
err := f.onBlockAddedToDAGHandler(newBlock, blockInsertionResult)
if err != nil {
return err
}
}
}
return f.broadcastTransactionsAfterBlockAdded(block, transactionsAcceptedToMempool)
return nil
}
func (f *FlowContext) broadcastTransactionsAfterBlockAdded(block *util.Block, transactionsAcceptedToMempool []*util.Tx) error {
func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
block *externalapi.DomainBlock, transactionsAcceptedToMempool []*externalapi.DomainTransaction) error {
f.updateTransactionsToRebroadcast(block)
// Don't relay transactions when in IBD.
if atomic.LoadUint32(&f.isInIBD) != 0 {
if f.IsIBDRunning() {
return nil
}
var txIDsToRebroadcast []*daghash.TxID
var txIDsToRebroadcast []*externalapi.DomainTransactionID
if f.shouldRebroadcastTransactions() {
txIDsToRebroadcast = f.txIDsToRebroadcast()
}
txIDsToBroadcast := make([]*daghash.TxID, len(transactionsAcceptedToMempool)+len(txIDsToRebroadcast))
txIDsToBroadcast := make([]*externalapi.DomainTransactionID, len(transactionsAcceptedToMempool)+len(txIDsToRebroadcast))
for i, tx := range transactionsAcceptedToMempool {
txIDsToBroadcast[i] = tx.ID()
txIDsToBroadcast[i] = consensushashing.TransactionID(tx)
}
offset := len(transactionsAcceptedToMempool)
for i, txID := range txIDsToRebroadcast {
@@ -67,14 +100,62 @@ func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks
}
// AddBlock adds the given block to the DAG and propagates it.
func (f *FlowContext) AddBlock(block *util.Block, flags blockdag.BehaviorFlags) error {
_, _, err := f.DAG().ProcessBlock(block, flags)
func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
blockInsertionResult, err := f.Domain().Consensus().ValidateAndInsertBlock(block)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
}
return err
}
err = f.OnNewBlock(block, blockInsertionResult)
if err != nil {
return err
}
err = f.OnNewBlock(block)
if err != nil {
return err
}
return f.Broadcast(appmessage.NewMsgInvBlock(block.Hash()))
return f.Broadcast(appmessage.NewMsgInvBlock(consensushashing.BlockHash(block)))
}
// IsIBDRunning returns true if IBD is currently marked as running
func (f *FlowContext) IsIBDRunning() bool {
f.ibdPeerMutex.RLock()
defer f.ibdPeerMutex.RUnlock()
return f.ibdPeer != nil
}
// TrySetIBDRunning attempts to set `isInIBD`. Returns false
// if it is already set
func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
f.ibdPeerMutex.Lock()
defer f.ibdPeerMutex.Unlock()
if f.ibdPeer != nil {
return false
}
f.ibdPeer = ibdPeer
log.Infof("IBD started")
return true
}
// UnsetIBDRunning unsets isInIBD
func (f *FlowContext) UnsetIBDRunning() {
f.ibdPeerMutex.Lock()
defer f.ibdPeerMutex.Unlock()
if f.ibdPeer == nil {
panic("attempted to unset isInIBD when it was not set to begin with")
}
f.ibdPeer = nil
log.Infof("IBD finished")
}
// IBDPeer returns the current IBD peer or null if the node is not
// in IBD
func (f *FlowContext) IBDPeer() *peerpkg.Peer {
f.ibdPeerMutex.RLock()
defer f.ibdPeerMutex.RUnlock()
return f.ibdPeer
}

View File

@@ -1,8 +0,0 @@
package flowcontext
import "github.com/kaspanet/kaspad/domain/blockdag"
// DAG returns the DAG associated to the flow context.
func (f *FlowContext) DAG() *blockdag.BlockDAG {
return f.dag
}

View File

@@ -0,0 +1,10 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/domain"
)
// Domain returns the Domain object associated to the flow context.
func (f *FlowContext) Domain() domain.Domain {
return f.domain
}

View File

@@ -1,26 +1,27 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/util/mstime"
"sync"
"time"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/relaytransactions"
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
)
// OnBlockAddedToDAGHandler is a handler function that's triggered
// when a block is added to the DAG
type OnBlockAddedToDAGHandler func(block *util.Block) error
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
// OnTransactionAddedToMempoolHandler is a handler function that's triggered
// when a transaction is added to the mempool
@@ -31,45 +32,48 @@ type OnTransactionAddedToMempoolHandler func()
type FlowContext struct {
cfg *config.Config
netAdapter *netadapter.NetAdapter
txPool *mempool.TxPool
dag *blockdag.BlockDAG
domain domain.Domain
addressManager *addressmanager.AddressManager
connectionManager *connmanager.ConnectionManager
timeStarted int64
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
transactionsToRebroadcastLock sync.Mutex
transactionsToRebroadcast map[daghash.TxID]*util.Tx
transactionsToRebroadcast map[externalapi.DomainTransactionID]*externalapi.DomainTransaction
lastRebroadcastTime time.Time
sharedRequestedTransactions *relaytransactions.SharedRequestedTransactions
sharedRequestedTransactions *transactionrelay.SharedRequestedTransactions
sharedRequestedBlocks *blockrelay.SharedRequestedBlocks
isInIBD uint32
startIBDMutex sync.Mutex
ibdPeer *peerpkg.Peer
ibdPeer *peerpkg.Peer
ibdPeerMutex sync.RWMutex
peers map[id.ID]*peerpkg.Peer
peersMutex sync.RWMutex
orphans map[externalapi.DomainHash]*externalapi.DomainBlock
orphansMutex sync.RWMutex
}
// New returns a new instance of FlowContext.
func New(cfg *config.Config, dag *blockdag.BlockDAG, addressManager *addressmanager.AddressManager,
txPool *mempool.TxPool, netAdapter *netadapter.NetAdapter,
connectionManager *connmanager.ConnectionManager) *FlowContext {
func New(cfg *config.Config, domain domain.Domain, addressManager *addressmanager.AddressManager,
netAdapter *netadapter.NetAdapter, connectionManager *connmanager.ConnectionManager) *FlowContext {
return &FlowContext{
cfg: cfg,
netAdapter: netAdapter,
dag: dag,
domain: domain,
addressManager: addressManager,
connectionManager: connectionManager,
txPool: txPool,
sharedRequestedTransactions: relaytransactions.NewSharedRequestedTransactions(),
sharedRequestedTransactions: transactionrelay.NewSharedRequestedTransactions(),
sharedRequestedBlocks: blockrelay.NewSharedRequestedBlocks(),
peers: make(map[id.ID]*peerpkg.Peer),
transactionsToRebroadcast: make(map[daghash.TxID]*util.Tx),
transactionsToRebroadcast: make(map[externalapi.DomainTransactionID]*externalapi.DomainTransaction),
orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
timeStarted: mstime.Now().UnixMilliseconds(),
}
}

View File

@@ -1,89 +0,0 @@
package flowcontext
import (
"sync/atomic"
"time"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/blockdag"
)
// StartIBDIfRequired selects a peer and starts IBD against it
// if required
func (f *FlowContext) StartIBDIfRequired() {
f.startIBDMutex.Lock()
defer f.startIBDMutex.Unlock()
if f.IsInIBD() {
return
}
peer := f.selectPeerForIBD(f.dag)
if peer == nil {
spawn("StartIBDIfRequired-requestSelectedTipsIfRequired", f.requestSelectedTipsIfRequired)
return
}
atomic.StoreUint32(&f.isInIBD, 1)
f.ibdPeer = peer
spawn("StartIBDIfRequired-peer.StartIBD", peer.StartIBD)
}
// IsInIBD is true if IBD is currently running
func (f *FlowContext) IsInIBD() bool {
return atomic.LoadUint32(&f.isInIBD) != 0
}
// selectPeerForIBD returns the first peer whose selected tip
// hash is not in our DAG
func (f *FlowContext) selectPeerForIBD(dag *blockdag.BlockDAG) *peerpkg.Peer {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
for _, peer := range f.peers {
peerSelectedTipHash := peer.SelectedTipHash()
if !dag.IsInDAG(peerSelectedTipHash) {
return peer
}
}
return nil
}
func (f *FlowContext) requestSelectedTipsIfRequired() {
if f.isDAGTimeCurrent() {
return
}
f.requestSelectedTips()
}
func (f *FlowContext) isDAGTimeCurrent() bool {
const minDurationToRequestSelectedTips = time.Minute
return f.dag.Now().Sub(f.dag.SelectedTipHeader().Timestamp) > minDurationToRequestSelectedTips
}
func (f *FlowContext) requestSelectedTips() {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
for _, peer := range f.peers {
peer.RequestSelectedTipIfRequired()
}
}
// FinishIBD finishes the current IBD flow and starts a new one if required.
func (f *FlowContext) FinishIBD() {
f.ibdPeer = nil
atomic.StoreUint32(&f.isInIBD, 0)
f.StartIBDIfRequired()
}
// IBDPeer returns the currently active IBD peer.
// Returns nil if we aren't currently in IBD
func (f *FlowContext) IBDPeer() *peerpkg.Peer {
if !f.IsInIBD() {
return nil
}
return f.ibdPeer
}

View File

@@ -0,0 +1,162 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/pkg/errors"
)
// maxOrphans is the maximum amount of orphans allowed in the
// orphans collection. This number is an approximation of how
// many orphans there can possibly be on average. It is based
// on: 2^orphanResolutionRange * PHANTOM K.
const maxOrphans = 600
// UnorphaningResult is the result of unorphaning a block
type UnorphaningResult struct {
block *externalapi.DomainBlock
blockInsertionResult *externalapi.BlockInsertionResult
}
// AddOrphan adds the block to the orphan set
func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
f.orphansMutex.Lock()
defer f.orphansMutex.Unlock()
orphanHash := consensushashing.BlockHash(orphanBlock)
f.orphans[*orphanHash] = orphanBlock
if len(f.orphans) > maxOrphans {
log.Debugf("Orphan collection size exceeded. Evicting a random orphan")
f.evictRandomOrphan()
}
log.Infof("Received a block with missing parents, adding to orphan pool: %s", orphanHash)
}
func (f *FlowContext) evictRandomOrphan() {
var toEvict externalapi.DomainHash
for hash := range f.orphans {
toEvict = hash
break
}
delete(f.orphans, toEvict)
log.Debugf("Evicted %s from the orphan collection", toEvict)
}
// IsOrphan returns whether the given blockHash belongs to an orphan block
func (f *FlowContext) IsOrphan(blockHash *externalapi.DomainHash) bool {
f.orphansMutex.RLock()
defer f.orphansMutex.RUnlock()
_, ok := f.orphans[*blockHash]
return ok
}
// UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore.
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*UnorphaningResult, error) {
f.orphansMutex.Lock()
defer f.orphansMutex.Unlock()
// Find all the children of rootBlock among the orphans
// and add them to the process queue
rootBlockHash := consensushashing.BlockHash(rootBlock)
processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{})
var unorphaningResults []*UnorphaningResult
for len(processQueue) > 0 {
var orphanHash externalapi.DomainHash
orphanHash, processQueue = processQueue[0], processQueue[1:]
orphanBlock := f.orphans[orphanHash]
log.Debugf("Considering to unorphan block %s with parents %s",
orphanHash, orphanBlock.Header.ParentHashes())
canBeUnorphaned := true
for _, orphanBlockParentHash := range orphanBlock.Header.ParentHashes() {
orphanBlockParentInfo, err := f.domain.Consensus().GetBlockInfo(orphanBlockParentHash)
if err != nil {
return nil, err
}
if !orphanBlockParentInfo.Exists || orphanBlockParentInfo.BlockStatus == externalapi.StatusHeaderOnly {
log.Debugf("Cannot unorphan block %s. It's missing at "+
"least the following parent: %s", orphanHash, orphanBlockParentHash)
canBeUnorphaned = false
break
}
}
if canBeUnorphaned {
blockInsertionResult, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
if err != nil {
return nil, err
}
if unorphaningSucceeded {
unorphaningResults = append(unorphaningResults, &UnorphaningResult{
block: orphanBlock,
blockInsertionResult: blockInsertionResult,
})
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
}
}
}
return unorphaningResults, nil
}
// addChildOrphansToProcessQueue finds all child orphans of `blockHash`
// and adds them to the given `processQueue` if they don't already exist
// inside of it
// Note that this method does not modify the given `processQueue`
func (f *FlowContext) addChildOrphansToProcessQueue(blockHash *externalapi.DomainHash,
processQueue []externalapi.DomainHash) []externalapi.DomainHash {
blockChildren := f.findChildOrphansOfBlock(blockHash)
for _, blockChild := range blockChildren {
exists := false
for _, queueOrphan := range processQueue {
if queueOrphan == blockChild {
exists = true
break
}
}
if !exists {
processQueue = append(processQueue, blockChild)
}
}
return processQueue
}
func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash) []externalapi.DomainHash {
var childOrphans []externalapi.DomainHash
for orphanHash, orphanBlock := range f.orphans {
for _, orphanBlockParentHash := range orphanBlock.Header.ParentHashes() {
if orphanBlockParentHash.Equal(blockHash) {
childOrphans = append(childOrphans, orphanHash)
break
}
}
}
return childOrphans
}
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.BlockInsertionResult, bool, error) {
orphanBlock, ok := f.orphans[orphanHash]
if !ok {
return nil, false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
}
delete(f.orphans, orphanHash)
blockInsertionResult, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
return nil, false, nil
}
return nil, false, err
}
log.Infof("Unorphaned block %s", orphanHash)
return blockInsertionResult, true, nil
}

View File

@@ -0,0 +1,43 @@
package flowcontext
import "github.com/kaspanet/kaspad/util/mstime"
const (
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 300_000
)
// ShouldMine returns whether it's ok to use block template from this node
// for mining purposes.
func (f *FlowContext) ShouldMine() (bool, error) {
peers := f.Peers()
if len(peers) == 0 {
log.Debugf("The node is not connected, so ShouldMine returns false")
return false, nil
}
if f.IsIBDRunning() {
log.Debugf("IBD is running, so ShouldMine returns false")
return false, nil
}
virtualSelectedParent, err := f.domain.Consensus().GetVirtualSelectedParent()
if err != nil {
return false, err
}
virtualSelectedParentHeader, err := f.domain.Consensus().GetBlockHeader(virtualSelectedParent)
if err != nil {
return false, err
}
now := mstime.Now().UnixMilliseconds()
if now-virtualSelectedParentHeader.TimeInMilliseconds() < maxSelectedParentTimeDiffToAllowMiningInMilliSeconds {
log.Debugf("The selected tip timestamp is recent (%d), so ShouldMine returns true",
virtualSelectedParentHeader.TimeInMilliseconds())
return true, nil
}
log.Debugf("The selected tip timestamp is old (%d), so ShouldMine returns false",
virtualSelectedParentHeader.TimeInMilliseconds())
return false, nil
}

View File

@@ -1,42 +1,38 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/relaytransactions"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
)
// AddTransaction adds transaction to the mempool and propagates it.
func (f *FlowContext) AddTransaction(tx *util.Tx) error {
func (f *FlowContext) AddTransaction(tx *externalapi.DomainTransaction) error {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
transactionsAcceptedToMempool, err := f.txPool.ProcessTransaction(tx, false)
err := f.Domain().MiningManager().ValidateAndInsertTransaction(tx, false)
if err != nil {
return err
}
if len(transactionsAcceptedToMempool) > 1 {
return errors.New("got more than one accepted transactions when no orphans were allowed")
}
f.transactionsToRebroadcast[*tx.ID()] = tx
inv := appmessage.NewMsgInvTransaction([]*daghash.TxID{tx.ID()})
transactionID := consensushashing.TransactionID(tx)
f.transactionsToRebroadcast[*transactionID] = tx
inv := appmessage.NewMsgInvTransaction([]*externalapi.DomainTransactionID{transactionID})
return f.Broadcast(inv)
}
func (f *FlowContext) updateTransactionsToRebroadcast(block *util.Block) {
func (f *FlowContext) updateTransactionsToRebroadcast(block *externalapi.DomainBlock) {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
// Note: if the block is red, its transactions won't be rebroadcasted
// anymore, although they are not included in the UTXO set.
// This is probably ok, since red blocks are quite rare.
for _, tx := range block.Transactions() {
delete(f.transactionsToRebroadcast, *tx.ID())
for _, tx := range block.Transactions {
delete(f.transactionsToRebroadcast, *consensushashing.TransactionID(tx))
}
}
@@ -45,30 +41,25 @@ func (f *FlowContext) shouldRebroadcastTransactions() bool {
return time.Since(f.lastRebroadcastTime) > rebroadcastInterval
}
func (f *FlowContext) txIDsToRebroadcast() []*daghash.TxID {
func (f *FlowContext) txIDsToRebroadcast() []*externalapi.DomainTransactionID {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
txIDs := make([]*daghash.TxID, len(f.transactionsToRebroadcast))
txIDs := make([]*externalapi.DomainTransactionID, len(f.transactionsToRebroadcast))
i := 0
for _, tx := range f.transactionsToRebroadcast {
txIDs[i] = tx.ID()
txIDs[i] = consensushashing.TransactionID(tx)
i++
}
return txIDs
}
// SharedRequestedTransactions returns a *relaytransactions.SharedRequestedTransactions for sharing
// SharedRequestedTransactions returns a *transactionrelay.SharedRequestedTransactions for sharing
// data about requested transactions between different peers.
func (f *FlowContext) SharedRequestedTransactions() *relaytransactions.SharedRequestedTransactions {
func (f *FlowContext) SharedRequestedTransactions() *transactionrelay.SharedRequestedTransactions {
return f.sharedRequestedTransactions
}
// TxPool returns the transaction pool associated to the manager.
func (f *FlowContext) TxPool() *mempool.TxPool {
return f.txPool
}
// OnTransactionAddedToMempool notifies the handler function that a transaction
// has been added to the mempool
func (f *FlowContext) OnTransactionAddedToMempool() {

View File

@@ -20,10 +20,6 @@ type ReceiveAddressesContext interface {
func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
if !context.AddressManager().NeedMoreAddresses() {
return nil
}
subnetworkID := peer.SubnetworkID()
msgGetAddresses := appmessage.NewMsgRequestAddresses(false, subnetworkID)
err := outgoingRoute.Enqueue(msgGetAddresses)
@@ -37,21 +33,10 @@ func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Rou
}
msgAddresses := message.(*appmessage.MsgAddresses)
if len(msgAddresses.AddrList) > addressmanager.GetAddressesMax {
if len(msgAddresses.AddressList) > addressmanager.GetAddressesMax {
return protocolerrors.Errorf(true, "address count exceeded %d", addressmanager.GetAddressesMax)
}
if msgAddresses.IncludeAllSubnetworks {
return protocolerrors.Errorf(true, "got unexpected "+
"IncludeAllSubnetworks=true in [%s] command", msgAddresses.Command())
}
if !msgAddresses.SubnetworkID.IsEqual(context.Config().SubnetworkID) && msgAddresses.SubnetworkID != nil {
return protocolerrors.Errorf(false, "only full nodes and %s subnetwork IDs "+
"are allowed in [%s] command, but got subnetwork ID %s",
context.Config().SubnetworkID, msgAddresses.Command(), msgAddresses.SubnetworkID)
}
sourceAddress := peer.Connection().NetAddress()
context.AddressManager().AddAddresses(msgAddresses.AddrList, sourceAddress, msgAddresses.SubnetworkID)
context.AddressManager().AddAddresses(msgAddresses.AddressList...)
return nil
}

View File

@@ -1,10 +1,12 @@
package addressexchange
import (
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"math/rand"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"math/rand"
)
// SendAddressesContext is the interface for the context needed for the SendAddresses flow.
@@ -14,21 +16,25 @@ type SendAddressesContext interface {
// SendAddresses sends addresses to a peer that requests it.
func SendAddresses(context SendAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
for {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
msgGetAddresses := message.(*appmessage.MsgRequestAddresses)
addresses := context.AddressManager().AddressCache(msgGetAddresses.IncludeAllSubnetworks,
msgGetAddresses.SubnetworkID)
msgAddresses := appmessage.NewMsgAddresses(msgGetAddresses.IncludeAllSubnetworks, msgGetAddresses.SubnetworkID)
err = msgAddresses.AddAddresses(shuffleAddresses(addresses)...)
if err != nil {
return err
}
_, ok := message.(*appmessage.MsgRequestAddresses)
if !ok {
return protocolerrors.Errorf(true, "unexpected message. "+
"Expected: %s, got: %s", appmessage.CmdRequestAddresses, message.Command())
}
addresses := context.AddressManager().Addresses()
msgAddresses := appmessage.NewMsgAddresses(shuffleAddresses(addresses))
return outgoingRoute.Enqueue(msgAddresses)
err = outgoingRoute.Enqueue(msgAddresses)
if err != nil {
return err
}
}
}
// shuffleAddresses randomizes the given addresses sent if there are more than the maximum allowed in one message.

View File

@@ -0,0 +1,29 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
func (flow *handleRelayInvsFlow) sendGetBlockLocator(lowHash *externalapi.DomainHash,
highHash *externalapi.DomainHash, limit uint32) error {
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(lowHash, highHash, limit)
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
}
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, err
}
msgBlockLocator, ok := message.(*appmessage.MsgBlockLocator)
if !ok {
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
}
return msgBlockLocator.BlockLocatorHashes, nil
}

View File

@@ -0,0 +1,77 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleIBDBlockLocatorContext is the interface for the context needed for the HandleIBDBlockLocator flow.
type HandleIBDBlockLocatorContext interface {
Domain() domain.Domain
}
// HandleIBDBlockLocator listens to appmessage.MsgIBDBlockLocator messages and sends
// the highest known block that's in the selected parent chain of `targetHash` to the
// requesting peer.
func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peer.Peer) error {
for {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
ibdBlockLocatorMessage := message.(*appmessage.MsgIBDBlockLocator)
targetHash := ibdBlockLocatorMessage.TargetHash
log.Debugf("Received IBDBlockLocator from %s with targetHash %s", peer, targetHash)
blockInfo, err := context.Domain().Consensus().GetBlockInfo(targetHash)
if err != nil {
return err
}
if !blockInfo.Exists {
return protocolerrors.Errorf(true, "received IBDBlockLocator "+
"with an unknown targetHash %s", targetHash)
}
foundHighestHashInTheSelectedParentChainOfTargetHash := false
for _, blockLocatorHash := range ibdBlockLocatorMessage.BlockLocatorHashes {
blockInfo, err := context.Domain().Consensus().GetBlockInfo(blockLocatorHash)
if err != nil {
return err
}
if !blockInfo.Exists {
continue
}
isBlockLocatorHashInSelectedParentChainOfHighHash, err :=
context.Domain().Consensus().IsInSelectedParentChainOf(blockLocatorHash, targetHash)
if err != nil {
return err
}
if !isBlockLocatorHashInSelectedParentChainOfHighHash {
continue
}
foundHighestHashInTheSelectedParentChainOfTargetHash = true
log.Debugf("Found a known hash %s amongst peer %s's "+
"blockLocator that's in the selected parent chain of targetHash %s", blockLocatorHash, peer, targetHash)
ibdBlockLocatorHighestHashMessage := appmessage.NewMsgIBDBlockLocatorHighestHash(blockLocatorHash)
err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashMessage)
if err != nil {
return err
}
break
}
if !foundHighestHashInTheSelectedParentChainOfTargetHash {
return protocolerrors.Errorf(true, "no hash was found in the blockLocator "+
"that was in the selected parent chain of targetHash %s", targetHash)
}
}
}

View File

@@ -0,0 +1,54 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// HandleIBDBlockRequestsContext is the interface for the context needed for the HandleIBDBlockRequests flow.
type HandleIBDBlockRequestsContext interface {
Domain() domain.Domain
}
// HandleIBDBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
// their corresponding blocks to the requesting peer.
func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route) error {
for {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
msgRequestIBDBlocks := message.(*appmessage.MsgRequestIBDBlocks)
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
for i, hash := range msgRequestIBDBlocks.Hashes {
// Fetch the block from the database.
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return protocolerrors.Errorf(true, "block %s not found", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)
if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
}
// TODO (Partial nodes): Convert block to partial block if needed
blockMessage := appmessage.DomainBlockToMsgBlock(block)
ibdBlockMessage := appmessage.NewMsgIBDBlock(blockMessage)
err = outgoingRoute.Enqueue(ibdBlockMessage)
if err != nil {
return err
}
log.Debugf("sent %d out of %d", i, len(msgRequestIBDBlocks.Hashes))
}
}
}

View File

@@ -0,0 +1,51 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleIBDRootHashRequestsFlowContext is the interface for the context needed for the handleIBDRootHashRequestsFlow flow.
type HandleIBDRootHashRequestsFlowContext interface {
Domain() domain.Domain
}
type handleIBDRootHashRequestsFlow struct {
HandleIBDRootHashRequestsFlowContext
incomingRoute, outgoingRoute *router.Route
}
// HandleIBDRootHashRequests listens to appmessage.MsgRequestIBDRootHashMessage messages and sends
// the IBD root hash as response.
func HandleIBDRootHashRequests(context HandleIBDRootHashRequestsFlowContext, incomingRoute,
outgoingRoute *router.Route) error {
flow := &handleIBDRootHashRequestsFlow{
HandleIBDRootHashRequestsFlowContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleIBDRootHashRequestsFlow) start() error {
for {
_, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
log.Debugf("Got request for IBD root hash")
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil {
return err
}
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgIBDRootHashMessage(pruningPoint))
if err != nil {
return err
}
log.Debugf("Sent IBD root hash %s", pruningPoint)
}
}

View File

@@ -4,14 +4,15 @@ import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// RelayBlockRequestsContext is the interface for the context needed for the HandleRelayBlockRequests flow.
type RelayBlockRequestsContext interface {
DAG() *blockdag.BlockDAG
Domain() domain.Domain
}
// HandleRelayBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
@@ -25,31 +26,28 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
return err
}
getRelayBlocksMessage := message.(*appmessage.MsgRequestRelayBlocks)
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
for _, hash := range getRelayBlocksMessage.Hashes {
// Fetch the block from the database.
block, err := context.DAG().BlockByHash(hash)
if blockdag.IsNotInDAGErr(err) {
return protocolerrors.Errorf(true, "block %s not found", hash)
} else if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
}
msgBlock := block.MsgBlock()
// If we are a full node and the peer is a partial node, we must convert
// the block to a partial block.
nodeSubnetworkID := context.DAG().SubnetworkID()
peerSubnetworkID := peer.SubnetworkID()
isNodeFull := nodeSubnetworkID == nil
isPeerFull := peerSubnetworkID == nil
if isNodeFull && !isPeerFull {
msgBlock.ConvertToPartial(peerSubnetworkID)
}
err = outgoingRoute.Enqueue(msgBlock)
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
if err != nil {
return err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
return protocolerrors.Errorf(true, "block %s not found", hash)
}
block, err := context.Domain().Consensus().GetBlock(hash)
if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
}
// TODO (Partial nodes): Convert block to partial block if needed
err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block))
if err != nil {
return err
}
log.Debugf("Relayed block with hash %s", hash)
}
}
}

View File

@@ -2,28 +2,37 @@ package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
mathUtil "github.com/kaspanet/kaspad/util/math"
"github.com/pkg/errors"
)
// orphanResolutionRange is the maximum amount of blockLocator hashes
// to search for known blocks. See isBlockInOrphanResolutionRange for
// further details
var orphanResolutionRange uint32 = 5
// RelayInvsContext is the interface for the context needed for the HandleRelayInvs flow.
type RelayInvsContext interface {
Domain() domain.Domain
Config() *config.Config
NetAdapter() *netadapter.NetAdapter
DAG() *blockdag.BlockDAG
OnNewBlock(block *util.Block) error
OnNewBlock(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
SharedRequestedBlocks() *SharedRequestedBlocks
StartIBDIfRequired()
IsInIBD() bool
Broadcast(message appmessage.Message) error
AddOrphan(orphanBlock *externalapi.DomainBlock)
IsOrphan(blockHash *externalapi.DomainHash) bool
IsIBDRunning() bool
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
UnsetIBDRunning()
}
type handleRelayInvsFlow struct {
@@ -50,6 +59,7 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
func (flow *handleRelayInvsFlow) start() error {
for {
log.Debugf("Waiting for inv")
inv, err := flow.readInv()
if err != nil {
return err
@@ -57,34 +67,68 @@ func (flow *handleRelayInvsFlow) start() error {
log.Debugf("Got relay inv for block %s", inv.Hash)
if flow.DAG().IsKnownBlock(inv.Hash) {
if flow.DAG().IsKnownInvalid(inv.Hash) {
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(inv.Hash)
if err != nil {
return err
}
if blockInfo.Exists {
if blockInfo.BlockStatus == externalapi.StatusInvalid {
return protocolerrors.Errorf(true, "sent inv of an invalid block %s",
inv.Hash)
}
log.Debugf("Block %s already exists. continuing...", inv.Hash)
continue
}
flow.StartIBDIfRequired()
if flow.IsInIBD() {
// Block relay is disabled during IBD
if flow.IsOrphan(inv.Hash) {
log.Debugf("Block %s is a known orphan. continuing...", inv.Hash)
continue
}
requestQueue := newHashesQueueSet()
requestQueue.enqueueIfNotExists(inv.Hash)
// Block relay is disabled during IBD
if flow.IsIBDRunning() {
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
continue
}
for requestQueue.len() > 0 {
err := flow.requestBlocks(requestQueue)
log.Debugf("Requesting block %s", inv.Hash)
block, exists, err := flow.requestBlock(inv.Hash)
if err != nil {
return err
}
if exists {
log.Debugf("Aborting requesting block %s because it already exists", inv.Hash)
continue
}
log.Debugf("Processing block %s", inv.Hash)
missingParents, blockInsertionResult, err := flow.processBlock(block)
if err != nil {
return err
}
if len(missingParents) > 0 {
log.Debugf("Block %s contains orphans: %s", inv.Hash, missingParents)
err := flow.processOrphan(block, missingParents)
if err != nil {
return err
}
continue
}
log.Debugf("Relaying block %s", inv.Hash)
err = flow.relayBlock(block)
if err != nil {
return err
}
log.Infof("Accepted block %s via relay", inv.Hash)
err = flow.OnNewBlock(block, blockInsertionResult)
if err != nil {
return err
}
}
}
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
if len(flow.invsQueue) > 0 {
var inv *appmessage.MsgInvRelayBlock
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
@@ -104,72 +148,40 @@ func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error)
return inv, nil
}
func (flow *handleRelayInvsFlow) requestBlocks(requestQueue *hashesQueueSet) error {
numHashesToRequest := mathUtil.MinInt(appmessage.MsgRequestRelayBlocksHashes, requestQueue.len())
hashesToRequest := requestQueue.dequeue(numHashesToRequest)
pendingBlocks := map[daghash.Hash]struct{}{}
var filteredHashesToRequest []*daghash.Hash
for _, hash := range hashesToRequest {
exists := flow.SharedRequestedBlocks().addIfNotExists(hash)
if exists {
continue
}
// The block can become known from another peer in the process of orphan resolution
if flow.DAG().IsKnownBlock(hash) {
continue
}
pendingBlocks[*hash] = struct{}{}
filteredHashesToRequest = append(filteredHashesToRequest, hash)
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
exists := flow.SharedRequestedBlocks().addIfNotExists(requestHash)
if exists {
return nil, true, nil
}
// Exit early if we've filtered out all the hashes
if len(filteredHashesToRequest) == 0 {
return nil
}
// In case the function returns earlier than expected, we want to make sure requestedBlocks is
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
// clean from any pending blocks.
defer flow.SharedRequestedBlocks().removeSet(pendingBlocks)
defer flow.SharedRequestedBlocks().remove(requestHash)
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks(filteredHashesToRequest)
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
if err != nil {
return err
return nil, false, err
}
for len(pendingBlocks) > 0 {
msgBlock, err := flow.readMsgBlock()
if err != nil {
return err
}
block := util.NewBlock(msgBlock)
blockHash := block.Hash()
if _, ok := pendingBlocks[*blockHash]; !ok {
return protocolerrors.Errorf(true, "got unrequested block %s", block.Hash())
}
err = flow.processAndRelayBlock(requestQueue, block)
if err != nil {
return err
}
delete(pendingBlocks, *blockHash)
flow.SharedRequestedBlocks().remove(blockHash)
msgBlock, err := flow.readMsgBlock()
if err != nil {
return nil, false, err
}
return nil
block := appmessage.MsgBlockToDomainBlock(msgBlock)
blockHash := consensushashing.BlockHash(block)
if !blockHash.Equal(requestHash) {
return nil, false, protocolerrors.Errorf(true, "got unrequested block %s", blockHash)
}
return block, false, nil
}
// readMsgBlock returns the next msgBlock in msgChan, and populates invsQueue with any inv messages that meanwhile arrive.
//
// Note: this function assumes msgChan can contain only appmessage.MsgInvRelayBlock and appmessage.MsgBlock messages.
func (flow *handleRelayInvsFlow) readMsgBlock() (
msgBlock *appmessage.MsgBlock, err error) {
func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock, err error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
@@ -187,59 +199,91 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (
}
}
func (flow *handleRelayInvsFlow) processAndRelayBlock(requestQueue *hashesQueueSet, block *util.Block) error {
blockHash := block.Hash()
isOrphan, isDelayed, err := flow.DAG().ProcessBlock(block, blockdag.BFNone)
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, *externalapi.BlockInsertionResult, error) {
blockHash := consensushashing.BlockHash(block)
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block)
if err != nil {
if !errors.As(err, &blockdag.RuleError{}) {
return errors.Wrapf(err, "failed to process block %s", blockHash)
}
log.Infof("Rejected block %s from %s: %s", blockHash, flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
}
if isDelayed {
return nil
}
if isOrphan {
blueScore, err := block.BlueScore()
if err != nil {
return protocolerrors.Errorf(true, "received an orphan "+
"block %s with malformed blue score", blockHash)
if !errors.As(err, &ruleerrors.RuleError{}) {
return nil, nil, errors.Wrapf(err, "failed to process block %s", blockHash)
}
const maxOrphanBlueScoreDiff = 10000
selectedTipBlueScore := flow.DAG().SelectedTipBlueScore()
if blueScore > selectedTipBlueScore+maxOrphanBlueScoreDiff {
log.Infof("Orphan block %s has blue score %d and the selected tip blue score is "+
"%d. Ignoring orphans with a blue score difference from the selected tip greater than %d",
blockHash, blueScore, selectedTipBlueScore, maxOrphanBlueScoreDiff)
return nil
missingParentsError := &ruleerrors.ErrMissingParents{}
if errors.As(err, missingParentsError) {
return missingParentsError.MissingParentHashes, nil, nil
}
// Request the parents for the orphan block from the peer that sent it.
missingAncestors := flow.DAG().GetOrphanMissingAncestorHashes(blockHash)
for _, missingAncestor := range missingAncestors {
requestQueue.enqueueIfNotExists(missingAncestor)
}
return nil
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
return nil, nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
}
err = blocklogger.LogBlock(block)
if err != nil {
return err
}
err = flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
if err != nil {
return err
}
flow.StartIBDIfRequired()
err = flow.OnNewBlock(block)
if err != nil {
return err
}
return nil
return nil, blockInsertionResult, nil
}
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
blockHash := consensushashing.BlockHash(block)
return flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
}
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock, missingParents []*externalapi.DomainHash) error {
blockHash := consensushashing.BlockHash(block)
// Return if the block has been orphaned from elsewhere already
if flow.IsOrphan(blockHash) {
log.Debugf("Skipping orphan processing for block %s because it is already an orphan", blockHash)
return nil
}
// Add the block to the orphan set if it's within orphan resolution range
isBlockInOrphanResolutionRange, err := flow.isBlockInOrphanResolutionRange(blockHash)
if err != nil {
return err
}
if isBlockInOrphanResolutionRange {
log.Debugf("Block %s is within orphan resolution range. "+
"Adding it to the orphan set and requesting its missing parents", blockHash)
flow.addToOrphanSetAndRequestMissingParents(block, missingParents)
return nil
}
// Start IBD unless we already are in IBD
log.Debugf("Block %s is out of orphan resolution range. "+
"Attempting to start IBD against it.", blockHash)
return flow.runIBDIfNotRunning(blockHash)
}
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be
// retrieved via the unorphaning mechanism or via IBD. This method sends a
// getBlockLocator request to the peer with a limit of orphanResolutionRange.
// In the response, if we know none of the hashes, we should retrieve the given
// blockHash via IBD. Otherwise, via unorphaning.
func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *externalapi.DomainHash) (bool, error) {
lowHash := flow.Config().ActiveNetParams.GenesisHash
err := flow.sendGetBlockLocator(lowHash, blockHash, orphanResolutionRange)
if err != nil {
return false, err
}
blockLocatorHashes, err := flow.receiveBlockLocator()
if err != nil {
return false, err
}
for _, blockLocatorHash := range blockLocatorHashes {
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockLocatorHash)
if err != nil {
return false, err
}
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
return true, nil
}
}
return false, nil
}
func (flow *handleRelayInvsFlow) addToOrphanSetAndRequestMissingParents(
block *externalapi.DomainBlock, missingParents []*externalapi.DomainHash) {
flow.AddOrphan(block)
invMessages := make([]*appmessage.MsgInvRelayBlock, len(missingParents))
for i, missingParent := range missingParents {
invMessages[i] = appmessage.NewMsgInvBlock(missingParent)
}
flow.invsQueue = append(invMessages, flow.invsQueue...)
}

View File

@@ -1,16 +1,16 @@
package ibd
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/daghash"
)
// RequestBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
type RequestBlockLocatorContext interface {
DAG() *blockdag.BlockDAG
Domain() domain.Domain
}
type handleRequestBlockLocatorFlow struct {
@@ -32,13 +32,18 @@ func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute
func (flow *handleRequestBlockLocatorFlow) start() error {
for {
lowHash, highHash, err := flow.receiveGetBlockLocator()
lowHash, highHash, limit, err := flow.receiveGetBlockLocator()
if err != nil {
return err
}
log.Debugf("Received getBlockLocator with lowHash: %s, highHash: %s, limit: %d",
lowHash, highHash, limit)
locator, err := flow.DAG().BlockLocatorFromHashes(highHash, lowHash)
locator, err := flow.Domain().Consensus().CreateBlockLocator(lowHash, highHash, limit)
if err != nil || len(locator) == 0 {
if err != nil {
log.Debugf("Received error from CreateBlockLocator: %s", err)
}
return protocolerrors.Errorf(true, "couldn't build a block "+
"locator between blocks %s and %s", lowHash, highHash)
}
@@ -50,19 +55,19 @@ func (flow *handleRequestBlockLocatorFlow) start() error {
}
}
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (lowHash *daghash.Hash,
highHash *daghash.Hash, err error) {
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (lowHash *externalapi.DomainHash,
highHash *externalapi.DomainHash, limit uint32, err error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
return nil, nil, 0, err
}
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
return msgGetBlockLocator.LowHash, msgGetBlockLocator.HighHash, nil
return msgGetBlockLocator.LowHash, msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
}
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator blockdag.BlockLocator) error {
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator externalapi.BlockLocator) error {
msgBlockLocator := appmessage.NewMsgBlockLocator(locator)
err := flow.outgoingRoute.Enqueue(msgBlockLocator)
if err != nil {

View File

@@ -0,0 +1,104 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
const ibdBatchSize = router.DefaultMaxMessages
// RequestIBDBlocksContext is the interface for the context needed for the HandleRequestHeaders flow.
type RequestIBDBlocksContext interface {
Domain() domain.Domain
}
type handleRequestBlocksFlow struct {
RequestIBDBlocksContext
incomingRoute, outgoingRoute *router.Route
peer *peer.Peer
}
// HandleRequestHeaders handles RequestHeaders messages
func HandleRequestHeaders(context RequestIBDBlocksContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peer.Peer) error {
flow := &handleRequestBlocksFlow{
RequestIBDBlocksContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *handleRequestBlocksFlow) start() error {
for {
lowHash, highHash, err := receiveRequestHeaders(flow.incomingRoute)
if err != nil {
return err
}
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
for !lowHash.Equal(highHash) {
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
// GetHashesBetween is a relatively heavy operation so we limit it
// in order to avoid locking the consensus for too long
const maxBlueScoreDifference = 1 << 10
blockHashes, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlueScoreDifference)
if err != nil {
return err
}
log.Debugf("Got %d header hashes above lowHash %s", len(blockHashes), lowHash)
blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes))
for i, blockHash := range blockHashes {
blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash)
if err != nil {
return err
}
blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader)
}
blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders)
err = flow.outgoingRoute.Enqueue(blockHeadersMessage)
if err != nil {
return err
}
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
if _, ok := message.(*appmessage.MsgRequestNextHeaders); !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextHeaders, message.Command())
}
// The next lowHash is the last element in blockHashes
lowHash = blockHashes[len(blockHashes)-1]
}
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders())
if err != nil {
return err
}
}
}
func receiveRequestHeaders(incomingRoute *router.Route) (lowHash *externalapi.DomainHash,
highHash *externalapi.DomainHash, err error) {
message, err := incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
}
msgRequestIBDBlocks := message.(*appmessage.MsgRequestHeaders)
return msgRequestIBDBlocks.LowHash, msgRequestIBDBlocks.HighHash, nil
}

View File

@@ -0,0 +1,117 @@
package blockrelay
import (
"errors"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleRequestIBDRootUTXOSetAndBlockContext is the interface for the context needed for the HandleRequestIBDRootUTXOSetAndBlock flow.
type HandleRequestIBDRootUTXOSetAndBlockContext interface {
Domain() domain.Domain
}
type handleRequestIBDRootUTXOSetAndBlockFlow struct {
HandleRequestIBDRootUTXOSetAndBlockContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestIBDRootUTXOSetAndBlock listens to appmessage.MsgRequestIBDRootUTXOSetAndBlock messages and sends
// the IBD root UTXO set and block body.
func HandleRequestIBDRootUTXOSetAndBlock(context HandleRequestIBDRootUTXOSetAndBlockContext, incomingRoute,
outgoingRoute *router.Route) error {
flow := &handleRequestIBDRootUTXOSetAndBlockFlow{
HandleRequestIBDRootUTXOSetAndBlockContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestIBDRootUTXOSetAndBlockFlow) start() error {
for {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
msgRequestIBDRootUTXOSetAndBlock, ok := message.(*appmessage.MsgRequestIBDRootUTXOSetAndBlock)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestIBDRootUTXOSetAndBlock, message.Command())
}
finishMeasuring := logger.LogAndMeasureExecutionTime(log, "handleRequestIBDRootUTXOSetAndBlockFlow")
log.Debugf("Got request for IBDRoot UTXOSet and Block")
serializedUTXOSet, err := flow.Domain().Consensus().GetPruningPointUTXOSet(msgRequestIBDRootUTXOSetAndBlock.IBDRoot)
if err != nil {
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgIBDRootNotFound())
if err != nil {
return err
}
continue
}
}
log.Debugf("Retrieved utxo set for pruning block %s", msgRequestIBDRootUTXOSetAndBlock.IBDRoot)
block, err := flow.Domain().Consensus().GetBlock(msgRequestIBDRootUTXOSetAndBlock.IBDRoot)
if err != nil {
return err
}
log.Debugf("Retrieved pruning block %s", msgRequestIBDRootUTXOSetAndBlock.IBDRoot)
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(block)))
if err != nil {
return err
}
// Send the UTXO set in `step`-sized chunks
const step = 1024 * 1024 // 1MB
offset := 0
chunksSent := 0
for offset < len(serializedUTXOSet) {
var chunk []byte
if offset+step < len(serializedUTXOSet) {
chunk = serializedUTXOSet[offset : offset+step]
} else {
chunk = serializedUTXOSet[offset:]
}
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgIBDRootUTXOSetChunk(chunk))
if err != nil {
return err
}
offset += step
chunksSent++
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
if chunksSent%ibdBatchSize == 0 {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
_, ok := message.(*appmessage.MsgRequestNextIBDRootUTXOSetChunk)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextIBDRootUTXOSetChunk, message.Command())
}
}
}
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneIBDRootUTXOSetChunks())
if err != nil {
return err
}
finishMeasuring()
}
}

View File

@@ -1,35 +0,0 @@
package blockrelay
import "github.com/kaspanet/kaspad/util/daghash"
type hashesQueueSet struct {
queue []*daghash.Hash
set map[daghash.Hash]struct{}
}
func (r *hashesQueueSet) enqueueIfNotExists(hash *daghash.Hash) {
if _, ok := r.set[*hash]; ok {
return
}
r.queue = append(r.queue, hash)
r.set[*hash] = struct{}{}
}
func (r *hashesQueueSet) dequeue(numItems int) []*daghash.Hash {
var hashes []*daghash.Hash
hashes, r.queue = r.queue[:numItems], r.queue[numItems:]
for _, hash := range hashes {
delete(r.set, *hash)
}
return hashes
}
func (r *hashesQueueSet) len() int {
return len(r.queue)
}
func newHashesQueueSet() *hashesQueueSet {
return &hashesQueueSet{
set: make(map[daghash.Hash]struct{}),
}
}

View File

@@ -0,0 +1,496 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"time"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/pkg/errors"
)
func (flow *handleRelayInvsFlow) runIBDIfNotRunning(highHash *externalapi.DomainHash) error {
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
if !wasIBDNotRunning {
log.Debugf("IBD is already running")
return nil
}
defer flow.UnsetIBDRunning()
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
log.Debugf("Syncing headers up to %s", highHash)
err := flow.syncHeaders(highHash)
if err != nil {
return err
}
log.Debugf("Finished syncing headers up to %s", highHash)
log.Debugf("Syncing the current pruning point UTXO set")
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet()
if err != nil {
return err
}
if !syncedPruningPointUTXOSetSuccessfully {
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
return nil
}
log.Debugf("Finished syncing the current pruning point UTXO set")
log.Debugf("Downloading block bodies up to %s", highHash)
err = flow.syncMissingBlockBodies(highHash)
if err != nil {
return err
}
log.Debugf("Finished downloading block bodies up to %s", highHash)
return nil
}
func (flow *handleRelayInvsFlow) syncHeaders(highHash *externalapi.DomainHash) error {
highHashReceived := false
for !highHashReceived {
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
highestSharedBlockHash, err := flow.findHighestSharedBlockHash(highHash)
if err != nil {
return err
}
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
err = flow.downloadHeaders(highestSharedBlockHash, highHash)
if err != nil {
return err
}
// We're finished once highHash has been inserted into the DAG
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(highHash)
if err != nil {
return err
}
highHashReceived = blockInfo.Exists
log.Debugf("Headers downloaded from peer %s. Are further headers required: %t", flow.peer, !highHashReceived)
}
return nil
}
func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(targetHash *externalapi.DomainHash) (*externalapi.DomainHash, error) {
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
if err != nil {
return nil, err
}
for {
highestHash, err := flow.fetchHighestHash(targetHash, blockLocator)
if err != nil {
return nil, err
}
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
if err != nil {
return nil, err
}
if highestHashIndex == 0 ||
// If the block locator contains only two adjacent chain blocks, the
// syncer will always find the same highest chain block, so to avoid
// an endless loop, we explicitly stop the loop in such situation.
(len(blockLocator) == 2 && highestHashIndex == 1) {
return highestHash, nil
}
locatorHashAboveHighestHash := highestHash
if highestHashIndex > 0 {
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
}
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
if err != nil {
return nil, err
}
}
}
func (flow *handleRelayInvsFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
if err != nil {
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
return nil, err
}
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
"restarting with full block locator")
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
if err != nil {
return nil, err
}
}
return blockLocator, nil
}
func (flow *handleRelayInvsFlow) findHighestHashIndex(
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
highestHashIndex := 0
highestHashIndexFound := false
for i, blockLocatorHash := range blockLocator {
if highestHash.Equal(blockLocatorHash) {
highestHashIndex = i
highestHashIndexFound = true
break
}
}
if !highestHashIndexFound {
return 0, protocolerrors.Errorf(true, "highest hash %s "+
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
}
log.Debugf("The index of the highest hash in the original "+
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
return highestHashIndex, nil
}
func (flow *handleRelayInvsFlow) fetchHighestHash(
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, error) {
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
if err != nil {
return nil, err
}
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, err
}
ibdBlockLocatorHighestHashMessage, ok := message.(*appmessage.MsgIBDBlockLocatorHighestHash)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
}
highestHash := ibdBlockLocatorHighestHashMessage.HighestHash
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
return highestHash, nil
}
func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externalapi.DomainHash,
highHash *externalapi.DomainHash) error {
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
if err != nil {
return err
}
// Keep a short queue of blockHeadersMessages so that there's
// never a moment when the node is not validating and inserting
// headers
blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2)
errChan := make(chan error)
doneChan := make(chan interface{})
spawn("handleRelayInvsFlow-downloadHeaders", func() {
for {
blockHeadersMessage, doneIBD, err := flow.receiveHeaders()
if err != nil {
errChan <- err
return
}
if doneIBD {
doneChan <- struct{}{}
return
}
blockHeadersMessageChan <- blockHeadersMessage
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
if err != nil {
errChan <- err
return
}
}
})
for {
select {
case blockHeadersMessage := <-blockHeadersMessageChan:
for _, header := range blockHeadersMessage.BlockHeaders {
err = flow.processHeader(header)
if err != nil {
return err
}
}
case err := <-errChan:
return err
case <-doneChan:
return nil
}
}
}
func (flow *handleRelayInvsFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
peerSelectedTipHash *externalapi.DomainHash) error {
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
}
func (flow *handleRelayInvsFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneIBD bool, err error) {
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch message := message.(type) {
case *appmessage.BlockHeadersMessage:
return message, false, nil
case *appmessage.MsgDoneHeaders:
return nil, true, nil
default:
return nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s", appmessage.CmdHeader, appmessage.CmdDoneHeaders, message.Command())
}
}
func (flow *handleRelayInvsFlow) processHeader(msgBlockHeader *appmessage.MsgBlockHeader) error {
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
block := &externalapi.DomainBlock{
Header: header,
Transactions: nil,
}
blockHash := consensushashing.BlockHash(block)
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockHash)
if err != nil {
return err
}
if blockInfo.Exists {
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
return nil
}
_, err = flow.Domain().Consensus().ValidateAndInsertBlock(block)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
}
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block %s during IBD", blockHash)
}
return nil
}
func (flow *handleRelayInvsFlow) syncPruningPointUTXOSet() (bool, error) {
log.Debugf("Checking if a new pruning point is available")
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDRootHashMessage())
if err != nil {
return false, err
}
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return false, err
}
msgIBDRootHash, ok := message.(*appmessage.MsgIBDRootHashMessage)
if !ok {
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDRootHash, message.Command())
}
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(msgIBDRootHash.Hash)
if err != nil {
return false, err
}
if blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
log.Debugf("Already has the block data of the new suggested pruning point %s", msgIBDRootHash.Hash)
return true, nil
}
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", msgIBDRootHash.Hash)
isValid, err := flow.Domain().Consensus().IsValidPruningPoint(msgIBDRootHash.Hash)
if err != nil {
return false, err
}
if !isValid {
log.Infof("The suggested pruning point %s is incompatible to this node DAG, so stopping IBD with this"+
" peer", msgIBDRootHash.Hash)
return false, nil
}
log.Info("Fetching the pruning point UTXO set")
succeed, err := flow.fetchMissingUTXOSet(msgIBDRootHash.Hash)
if err != nil {
return false, err
}
if !succeed {
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
return false, nil
}
log.Info("Fetched the new pruning point UTXO set")
return true, nil
}
func (flow *handleRelayInvsFlow) fetchMissingUTXOSet(ibdRootHash *externalapi.DomainHash) (succeed bool, err error) {
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDRootUTXOSetAndBlock(ibdRootHash))
if err != nil {
return false, err
}
utxoSet, block, found, err := flow.receiveIBDRootUTXOSetAndBlock()
if err != nil {
return false, err
}
if !found {
return false, nil
}
err = flow.Domain().Consensus().ValidateAndInsertPruningPoint(block, utxoSet)
if err != nil {
// TODO: Find a better way to deal with finality conflicts.
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
return false, nil
}
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with IBD root UTXO set")
}
return true, nil
}
func (flow *handleRelayInvsFlow) receiveIBDRootUTXOSetAndBlock() ([]byte, *externalapi.DomainBlock, bool, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveIBDRootUTXOSetAndBlock")
defer onEnd()
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, nil, false, err
}
var block *externalapi.DomainBlock
switch message := message.(type) {
case *appmessage.MsgIBDBlock:
block = appmessage.MsgBlockToDomainBlock(message.MsgBlock)
case *appmessage.MsgIBDRootNotFound:
return nil, nil, false, nil
default:
return nil, nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s",
appmessage.CmdIBDBlock, appmessage.CmdIBDRootNotFound, message.Command(),
)
}
log.Debugf("Received IBD root block %s", consensushashing.BlockHash(block))
serializedUTXOSet := []byte{}
receivedAllChunks := false
receivedChunkCount := 0
for !receivedAllChunks {
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, nil, false, err
}
switch message := message.(type) {
case *appmessage.MsgIBDRootUTXOSetChunk:
serializedUTXOSet = append(serializedUTXOSet, message.Chunk...)
case *appmessage.MsgDoneIBDRootUTXOSetChunks:
receivedAllChunks = true
default:
return nil, nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s",
appmessage.CmdIBDRootUTXOSetChunk, appmessage.CmdDoneIBDRootUTXOSetChunks, message.Command(),
)
}
receivedChunkCount++
if !receivedAllChunks && receivedChunkCount%ibdBatchSize == 0 {
log.Debugf("Received %d UTXO set chunks so far, totaling in %d bytes",
receivedChunkCount, len(serializedUTXOSet))
requestNextIBDRootUTXOSetChunkMessage := appmessage.NewMsgRequestNextIBDRootUTXOSetChunk()
err := flow.outgoingRoute.Enqueue(requestNextIBDRootUTXOSetChunkMessage)
if err != nil {
return nil, nil, false, err
}
}
}
log.Debugf("Finished receiving the UTXO set. Total bytes: %d", len(serializedUTXOSet))
return serializedUTXOSet, block, true, nil
}
func (flow *handleRelayInvsFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
if err != nil {
return err
}
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
var hashesToRequest []*externalapi.DomainHash
if offset+ibdBatchSize < len(hashes) {
hashesToRequest = hashes[offset : offset+ibdBatchSize]
} else {
hashesToRequest = hashes[offset:]
}
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
if err != nil {
return err
}
for _, expectedHash := range hashesToRequest {
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return err
}
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
}
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
blockHash := consensushashing.BlockHash(block)
if !expectedHash.Equal(blockHash) {
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
}
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block)
if err != nil {
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
}
err = flow.OnNewBlock(block, blockInsertionResult)
if err != nil {
return err
}
}
}
return nil
}
// dequeueIncomingMessageAndSkipInvs is a convenience method to be used during
// IBD. Inv messages are expected to arrive at any given moment, but should be
// ignored while we're in IBD
func (flow *handleRelayInvsFlow) dequeueIncomingMessageAndSkipInvs(timeout time.Duration) (appmessage.Message, error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
if err != nil {
return nil, err
}
if _, ok := message.(*appmessage.MsgInvRelayBlock); !ok {
return message, nil
}
}
}

View File

@@ -3,23 +3,23 @@ package blockrelay
import (
"sync"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// SharedRequestedBlocks is a data structure that is shared between peers that
// holds the hashes of all the requested blocks to prevent redundant requests.
type SharedRequestedBlocks struct {
blocks map[daghash.Hash]struct{}
blocks map[externalapi.DomainHash]struct{}
sync.Mutex
}
func (s *SharedRequestedBlocks) remove(hash *daghash.Hash) {
func (s *SharedRequestedBlocks) remove(hash *externalapi.DomainHash) {
s.Lock()
defer s.Unlock()
delete(s.blocks, *hash)
}
func (s *SharedRequestedBlocks) removeSet(blockHashes map[daghash.Hash]struct{}) {
func (s *SharedRequestedBlocks) removeSet(blockHashes map[externalapi.DomainHash]struct{}) {
s.Lock()
defer s.Unlock()
for hash := range blockHashes {
@@ -27,7 +27,7 @@ func (s *SharedRequestedBlocks) removeSet(blockHashes map[daghash.Hash]struct{})
}
}
func (s *SharedRequestedBlocks) addIfNotExists(hash *daghash.Hash) (exists bool) {
func (s *SharedRequestedBlocks) addIfNotExists(hash *externalapi.DomainHash) (exists bool) {
s.Lock()
defer s.Unlock()
_, ok := s.blocks[*hash]
@@ -41,6 +41,6 @@ func (s *SharedRequestedBlocks) addIfNotExists(hash *daghash.Hash) (exists bool)
// NewSharedRequestedBlocks returns a new instance of SharedRequestedBlocks.
func NewSharedRequestedBlocks() *SharedRequestedBlocks {
return &SharedRequestedBlocks{
blocks: make(map[daghash.Hash]struct{}),
blocks: make(map[externalapi.DomainHash]struct{}),
}
}

View File

@@ -1,21 +1,20 @@
package handshake
import (
"sync"
"sync/atomic"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/locks"
"github.com/pkg/errors"
)
@@ -23,9 +22,8 @@ import (
type HandleHandshakeContext interface {
Config() *config.Config
NetAdapter() *netadapter.NetAdapter
DAG() *blockdag.BlockDAG
Domain() domain.Domain
AddressManager() *addressmanager.AddressManager
StartIBDIfRequired()
AddToPeers(peer *peerpkg.Peer) error
HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error)
}
@@ -37,10 +35,12 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
) (*peerpkg.Peer, error) {
// For HandleHandshake to finish, we need to get from the other node
// a version and verack messages, so we increase the wait group by 2
// and block HandleHandshake with wg.Wait().
wg := sync.WaitGroup{}
wg.Add(2)
// a version and verack messages, so we set doneCount to 2, decrease it
// when sending and receiving the version, and close the doneChan when
// it's 0. Then we wait for on select for a tick from doneChan or from
// errChan.
doneCount := int32(2)
doneChan := make(chan struct{})
isStopping := uint32(0)
errChan := make(chan error)
@@ -55,7 +55,9 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
return
}
peerAddress = address
wg.Done()
if atomic.AddInt32(&doneCount, -1) == 0 {
close(doneChan)
}
})
spawn("HandleHandshake-SendVersion", func() {
@@ -64,7 +66,9 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
handleError(err, "SendVersion", &isStopping, errChan)
return
}
wg.Done()
if atomic.AddInt32(&doneCount, -1) == 0 {
close(doneChan)
}
})
select {
@@ -73,7 +77,7 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
return nil, err
}
return nil, nil
case <-locks.ReceiveFromChanWhenDone(func() { wg.Wait() }):
case <-doneChan:
}
err := context.AddToPeers(peer)
@@ -85,13 +89,8 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
}
if peerAddress != nil {
subnetworkID := peer.SubnetworkID()
context.AddressManager().AddAddress(peerAddress, peerAddress, subnetworkID)
context.AddressManager().Good(peerAddress, subnetworkID)
context.AddressManager().AddAddresses(peerAddress)
}
context.StartIBDIfRequired()
return peer, nil
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
@@ -41,11 +42,18 @@ func ReceiveVersion(context HandleHandshakeContext, incomingRoute *router.Route,
}
func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveVersionFlow.start")
defer onEnd()
log.Debugf("Starting receiveVersionFlow with %s", flow.peer.Address())
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
log.Debugf("Got version message")
msgVersion, ok := message.(*appmessage.MsgVersion)
if !ok {
return nil, protocolerrors.New(true, "a version message must precede all others")
@@ -72,7 +80,7 @@ func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
}
// Disconnect from partial nodes in networks that don't allow them
if !flow.DAG().Params.EnableNonNativeSubnetworks && msgVersion.SubnetworkID != nil {
if !flow.Config().ActiveNetParams.EnableNonNativeSubnetworks && msgVersion.SubnetworkID != nil {
return nil, protocolerrors.New(true, "partial nodes are not allowed")
}
@@ -84,7 +92,7 @@ func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
isRemoteNodeFull := msgVersion.SubnetworkID == nil
isOutbound := flow.peer.Connection().IsOutbound()
if (isLocalNodeFull && !isRemoteNodeFull && isOutbound) ||
(!isLocalNodeFull && !isRemoteNodeFull && !msgVersion.SubnetworkID.IsEqual(localSubnetworkID)) {
(!isLocalNodeFull && !isRemoteNodeFull && !msgVersion.SubnetworkID.Equal(localSubnetworkID)) {
return nil, protocolerrors.New(false, "incompatible subnetworks")
}

View File

@@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/version"
)
@@ -28,6 +29,7 @@ var (
type sendVersionFlow struct {
HandleHandshakeContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
@@ -46,13 +48,16 @@ func SendVersion(context HandleHandshakeContext, incomingRoute *router.Route,
}
func (flow *sendVersionFlow) start() error {
selectedTipHash := flow.DAG().SelectedTipHash()
subnetworkID := flow.Config().SubnetworkID
onEnd := logger.LogAndMeasureExecutionTime(log, "sendVersionFlow.start")
defer onEnd()
log.Debugf("Starting sendVersionFlow with %s", flow.peer.Address())
// Version message.
localAddress := flow.AddressManager().GetBestLocalAddress(flow.peer.Connection().NetAddress())
localAddress := flow.AddressManager().BestLocalAddress(flow.peer.Connection().NetAddress())
subnetworkID := flow.Config().SubnetworkID
msg := appmessage.NewMsgVersion(localAddress, flow.NetAdapter().ID(),
flow.Config().ActiveNetParams.Name, selectedTipHash, subnetworkID)
flow.Config().ActiveNetParams.Name, subnetworkID)
msg.AddUserAgent(userAgentName, userAgentVersion, flow.Config().UserAgentComments...)
// Advertise the services flag
@@ -70,9 +75,11 @@ func (flow *sendVersionFlow) start() error {
}
// Wait for verack
log.Debugf("Waiting for verack")
_, err = flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
log.Debugf("Got verack")
return nil
}

Some files were not shown because too many files have changed in this diff Show More