Compare commits

...

89 Commits

Author SHA1 Message Date
stasatdaglabs
d5f2495522 Use SerializeUTXODiff and DeserializeUTXODiff in utxoDiffStore and implement TestUTXODiffSerializationAndDeserialization. 2021-01-28 14:19:06 +02:00
stasatdaglabs
8588774837 Finish implementing SerializeUTXODiff and DeserializeUTXODiff. 2021-01-28 14:01:02 +02:00
stasatdaglabs
94c3f4b80c Begin implementing SerializeUTXODiff and DeserializeUTXODiff. 2021-01-28 13:48:17 +02:00
stasatdaglabs
a5a84e9215 Take buildTestUTXODiff out of the benchmark loops. 2021-01-28 12:58:47 +02:00
stasatdaglabs
1cec4c91cf Use a random number for utxoEntryScriptPublicKeyVersion as well. 2021-01-28 12:51:13 +02:00
stasatdaglabs
3c0b74208a Rename a couple of variables. 2021-01-28 12:49:45 +02:00
stasatdaglabs
08d983b84a Implement BenchmarkUTXODiffSerializationAndDeserialization. 2021-01-28 12:49:04 +02:00
Michael Sutton
c9b591f2d3 Final reindex algorithm (#1430)
* Mine JSON

* [Reindex tests] add test_params and validate_mining flag to test_consensus

* Rename file and extend tests

* Ignore local test datasets

* Use spaces over tabs

* Reindex algorithm - full algorithm, initial commit, some tests fail

* Reindex algorithm - a few critical fixes

* Reindex algorithm - move reindex struct and all related operations to new file

* Reindex algorithm - added a validateIntervals method and modified tests to use it (instead of exact comparisons)

* Reindex algorithm - modified reindexIntervals to receive the new child as argument and fixed an important related bug

* Reindex attack tests - move logic to helper function and add stretch test

* Reindex algorithm - variable names and some comments

* Reindex algorithm - minor changes

* Reindex algorithm - minor changes 2

* Reindex algorithm - extended stretch test

* Reindex algorithm - small fix to validate function

* Reindex tests - move tests and add DAG files

* go format fixes

* TestParams doc comment

* Reindex tests - exact comparisons are not needed

* Update to version 0.8.6

* Remove TestParams and use AddUTXOInvalidHeader instead

* Use gzipeed test files

* This unintended change somehow slipped in through branch merges

* Rename test

* Move interval increase/decrease methods to reachability interval file

* Addressing a bunch of minor review comments

* Addressed a few more minor review comments

* Make code of offsetSiblingsBefore and offsetSiblingsAfter symmetric

* Optimize reindex logic in cases where reorg occurs + reorg test

* Do not change reindex root too fast (on reorg)

* Some comments

* A few more comments

* Addressing review comments

* Remove TestNoAttackAlternateReorg and assert chain attack

* Minor

Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
Co-authored-by: Mike Zak <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-27 17:09:20 +02:00
Ori Newman
8d6e71d490 Add IBD test cases and check for MsgUnexpectedPruningPoint on receivePruningPointBlock as well (#1459)
* Check for MsgUnexpectedPruningPoint on receivePruningPointBlock as well

* Add IBD test cases

* Revert "Check for MsgUnexpectedPruningPoint on receivePruningPointBlock as well"

This reverts commit 6a6d1ea180.

* Change log level for two logs

* Remove "testing a situation where the pruning point moved during IBD (before sending the pruning point block)"
2021-01-27 16:42:42 +02:00
Elichai Turkel
2823461fe2 Change AddressKey from string to port+ipv6 address (#1458) 2021-01-27 16:09:32 +02:00
Elichai Turkel
2075c585da Fix race condition in kaspaminer (#1455)
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-27 13:08:35 +02:00
Elichai Turkel
01aee62cb0 Add log and measure to pruning points (#1457) 2021-01-27 11:40:51 +02:00
Ori Newman
a6ee871f7e Increase maxSelectedParentTimeDiffToAllowMiningInMilliSeconds to one hour (#1456) 2021-01-27 11:04:58 +02:00
Mike Zak
6393a8186a Update to version 0.9.0 2021-01-27 09:22:56 +02:00
stasatdaglabs
3916534a7e In kaspactl, prettify responses before printing them (#1453)
* In kaspactl, prettify responses before printing them.

* Indent with four spaces instead of a tab.

* Unwrap the response.

* Simplify unwrapping the response.

* Don't unwrap responses.

* Use protojson.MarshalOptions for prettification.
2021-01-27 09:13:48 +02:00
Elichai Turkel
0561347ff1 Remove default dns/grpc seeders (#1454) 2021-01-26 17:40:54 +02:00
Svarog
fb11981da1 Make kaspactl usable by human beings (#1452)
* Add request_types and their help

* Added command parser

* Updated main to use command if it's specified

* Some progress in making everything work

* Fix command parser for pointers to structs

* Cleanup code

* Enhance usage text

* Fixed typo

* Some minor style fixing, and remove temporary code

* Correctly fallthrough in stringToValue unsupported types

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-26 14:22:35 +02:00
Ori Newman
1742e76af7 Add TestHandleRelayInvsErrors (#1450) 2021-01-26 13:31:28 +02:00
Svarog
ddfe376388 Don't ban peers that sent a requested duplicate block (#1440)
* Ignore ErrDuplicateBlock for any blocks that were requested

* Don't check for DuplicateBlockErr in processHeaders + improve logs

* Return the check for ErrDuplicateBlock in processHeader

* Fix log message
2021-01-26 08:42:04 +02:00
Elichai Turkel
52da65077a codecov: Ignore protobuf autogenerated files (#1449) 2021-01-25 16:58:11 +02:00
Elichai Turkel
ed85f09742 Add P2P/RPC name to the gRPC logs (#1448)
* Add P2P/RPC name to the gRPC logs

* Add p2p/rpc name in more logs
2021-01-25 16:41:43 +02:00
stasatdaglabs
819ec9f2a7 Pass fromOutpoint into GetPruningPointUTXOs instead of offset, so it could Seek over the cursor (#1447)
* Implement TestGetPruningPointUTXOs.

* Fix a bad error message.

* Fix TestGetPruningPointUTXOs for testnet.

* Make sure all the UTXOs are returned in TestGetPruningPointUTXOs.

* Implement BenchmarkGetPruningPointUTXOs.

* Pass fromOutpoint into GetPruningPointUTXOs instead of offset, so it could Seek over the cursor.

* Fix weird benchmark timer calls.

* Remove unnecessary collection of outpointAndUTXOEntryPairs from BenchmarkGetPruningPointUTXOs.

* Fix a comment.
2021-01-25 13:38:59 +02:00
Ori Newman
7ea8a72a9e Add TestReceiveAddressesErrors (#1446)
* Add TestReceiveAddressesErrors

* Change errors to be more descriptive

* Fix checkFlowError
2021-01-24 17:35:20 +02:00
stasatdaglabs
ca04c049ab When the pruning point moves, update its UTXO set outside of a database transaction (#1444)
* Remove pruningPointUTXOSetStaging and implement UpdatePruningPointUTXOSet.

* Implement StageStartSavingNewPruningPointUTXOSet, HadStartedSavingNewPruningPointUTXOSet, and FinishSavingNewPruningPointUTXOSet.

* Fix a bad return.

* Implement UpdatePruningPointUTXOSetIfRequired.

* Call UpdatePruningPointUTXOSetIfRequired on consensus creation.

* Rename savingNewPruningPointUTXOSetKey to updatingPruningPointUTXOSet.

* Add a log.

* Add calls to runtime.GC() at its start and end.

* Rename a variable.

* Replace calls to runtime.GC to calls to LogMemoryStats.

* Wrap the contents of LogMemoryStats in a log closure.
2021-01-24 14:48:11 +02:00
Ori Newman
9a17198e7d Remove redundant type check (#1445) 2021-01-24 14:14:03 +02:00
stasatdaglabs
756f40c59a Sync pruning point UTXO sets incrementally instead of all at once (#1431)
* Replaced the content of MsgIBDRootUTXOSetChunk with pairs of outpoint-utxo entry pairs.

* Rename utxoIter to utxoIterator.

* Add a big stinky TODO on an assert.

* Replace pruningStore staging with a UTXO set iterator.

* Reimplement receiveAndInsertIBDRootUTXOSet.

* Extract OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs into domainconverters.go.

* Pass the outpoint and utxy entry pairs to the pruning store.

* Implement InsertCandidatePruningPointUTXOs.

* Implement ClearCandidatePruningPointUTXOs.

* Implement UpdateCandidatePruningPointMultiset.

* Use the candidate pruning point multiset in updatePruningPoint.

* Implement CandidatePruningPointUTXOIterator.

* Use the pruning point utxo set iterator for StageVirtualUTXOSet.

* Defer ClearCandidatePruningPointUTXOs.

* Implement OverwriteVirtualUTXOSet.

* Implement CommitCandidatePruningPointUTXOSet.

* Implement BeginOverwritingVirtualUTXOSet and FinishOverwritingVirtualUTXOSet.

* Implement overwriteVirtualUTXOSetAndCommitPruningPointUTXOSet.

* Rename ClearCandidatePruningPointUTXOs to ClearCandidatePruningPointData.

* Add missing methods to dbManager.

* Implement PruningPointUTXOs.

* Implement RecoverUTXOIfRequired.

* Delete the utxoserialization package.

* Fix compilation errors in TestValidateAndInsertPruningPoint.

* Switch order of operations in the if statements in PruningPointUTXOs so that Next() wouldn't be unnecessarily called.

* Fix missing pruning point utxo set staging and bad slice length.

* Fix no default multiset in InsertCandidatePruningPointUTXOs.

* Make go vet happy.

* Rename candidateXXX to importedXXX.

* Do some more renaming.

* Rename some more.

* Fix bad MsgIBDRootNotFound logic.

* Fix an error message.

* Simplify receiveIBDRootBlock.

* Fix error message in receiveAndInsertIBDRootUTXOSet.

* Do some more renaming.

* Fix merge errors.

* Fix a bug caused by calling iterator.First() unnecessarily.

* Remove databaseContext from stores and don't use a transaction in ClearXXX functions.

* Simplify receiveAndInsertIBDRootUTXOSet.

* Fix offset count in PruningPointUTXOs().

* Fix readOnlyUTXOIteratorWithDiff.First().

* Split handleRequestIBDRootUTXOSetAndBlockFlow into smaller methods.

* Rename IbdRootNotFound to UnexpectedPruningPoint.

* Rename requestIBDRootHash to requestPruningPointHash.

* Rename IBDRootHash to PruningPointHash.

* Rename RequestIBDRootUTXOSetAndBlock to RequestPruningPointUTXOSetAndBlock.

* Rename IBDRootUTXOSetChunk to PruningPointUTXOSetChunk.

* Rename RequestNextIBDRootUTXOSetChunk to RequestNextPruningPointUTXOSetChunk.

* Rename DoneIBDRootUTXOSetChunks to DonePruningPointUTXOSetChunks.

* Rename remaining references to IBD root.

* Fix an error message.

* Add a check for HadStartedImportingPruningPointUTXOSet in commitVirtualUTXODiff.

* Add a check for HadStartedImportingPruningPointUTXOSet in ImportPruningPointUTXOSetIntoVirtualUTXOSet.

* Move FinishImportingPruningPointUTXOSet closer to HadStartedImportingPruningPointUTXOSet.

* Remove reference to pruningStore in utxoSetIterator.

* Pointerify utxoSetIterator receivers.

* Fix bad insert in CommitImportedPruningPointUTXOSet.

* Rename commitImportedPruningPointUTXOSetAll to applyImportedPruningPointUTXOSet.

* Simplify PruningPointUTXOs.

* Add populateTransactionWithUTXOEntriesFromUTXOSet.

* Fix a TODO comment.

* Rename InsertImportedPruningPointUTXOs to AppendImportedPruningPointUTXOs.

* Extract handleRequestPruningPointUTXOSetAndBlockMessage to a separate method.

* Rename stuff in readOnlyUTXOIteratorWithDiff.First().

* Address toAddIterator in readOnlyUTXOIteratorWithDiff.First().

* Call First() before any full iteration on ReadOnlyUTXOSetIterator.

* Call First() before any full iteration on a database Cursor.

* Put StartImportingPruningPointUTXOSet inside the pruning point transaction.

* Make serializeOutpoint and serializeUTXOEntry free functions in pruningStore.

* Fix readOnlyUTXOIteratorWithDiff.First().

* Fix bad validations in importPruningPoint.

* Remove superfluous call to validateBlockTransactionsAgainstPastUTXO.
2021-01-21 17:24:52 +02:00
Elichai Turkel
6a03d31f98 Remove accidental pointer indirection in dbKey (#1441) 2021-01-21 12:14:52 +02:00
Ori Newman
319ab6cfcd Always request orphan roots, even when you get an inv of a known orphan (#1436)
Co-authored-by: Svarog <feanorr@gmail.com>
2021-01-20 10:58:45 +02:00
Ori Newman
abef96e3de Add TestIBDWithPruning (#1425)
* Add TestIBDWithPruning

* Test block count

* Fix a typo

Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
Co-authored-by: Mike Zak <feanorr@gmail.com>
2021-01-20 10:07:32 +02:00
stasatdaglabs
2e0bc0f8c4 Increase P2P connections' dial timeouts to 5 seconds (#1437)
Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
2021-01-20 09:48:48 +02:00
Ori Newman
acf5423c63 Remove docker test parallelism (#1434)
* Remove docker parallelism

* Remove redundant dash
2021-01-19 17:30:24 +02:00
Ori Newman
effb545d20 Fix wrong condition and add logs (#1435) 2021-01-19 17:20:25 +02:00
Svarog
ad9c213a06 Restructure database to prevent double-slashes in keys, causing bugs in cursors (#1432)
* Add TestValidateAndInsertPruningPointWithSideBlocks

* Optimize infrastracture bucket paths

* Update infrastracture tests

* Refactor the consensus/database layer

* Remove utils/dbkeys

* Use consensus/database in consensus instead of infrastructure

* Fix a bug in dbBucketToDatabaseBucket and MakeBucket combination

Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-19 14:19:08 +02:00
Svarog
a4adbabf96 TestBuildBlockErrorCases and remove redundant check of coinbase script length (#1427)
* Write structure of TestBlockBuilderErrorCases

* Remove double verification of script length in serializeCoinbasePayload

* Remove redundant code in TestBuildBlockErrorCases

* Rename povTransactionHash -> povBlockHash

* Convert coinbasePayloadScriptPublicKeyMaxLength to uint8

* Re-use consensus in TestBuildBlockErrorCases
2021-01-19 10:37:51 +02:00
Ori Newman
799eb7515c Test validateAndInsertPruningPoint (#1420)
* Add TestValidateAndInsertPruningPoint

* Check fake UTXO set and validate that the pruning point changed
2021-01-18 18:17:13 +02:00
Mike Zak
0769705b37 Update to version 0.8.6 2021-01-18 15:17:15 +02:00
Svarog
189e3b6be9 Fix missing utxo notifications (#1428)
* fix missing UTXO notifications (#1426)

* Remove redundant semicolon

Co-authored-by: aspect <anton.yemelyanov@gmail.com>
2021-01-18 13:08:16 +02:00
Mike Zak
e8dfbc8367 Merge remote-tracking branch 'origin/master' into v0.8.5-dev 2021-01-18 11:36:25 +02:00
Ori Newman
d70740331a Remove hashesQueueSet (#1424)
Co-authored-by: Svarog <feanorr@gmail.com>
2021-01-18 09:10:26 +02:00
Svarog
9a81b1328a Add the Address of node to whom connected in log of send/receiveVersion (#1423)
* Add the Address of node to whom connected in log of send/receiveVersion

* Don't call functions before LogAndMeasureExecutionTime

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-17 16:31:48 +02:00
Ori Newman
d4f3a252ff Add TestIsFinalizedTransaction (#1422)
Co-authored-by: Svarog <feanorr@gmail.com>
2021-01-17 15:47:49 +02:00
Ori Newman
f14527de4c Give different limit to the RPC server (#1421) 2021-01-17 13:58:42 +02:00
Ori Newman
dd57e6abe6 Fix checkParentHeadersExist and cover pruning_violation_proof_of_work_and_difficulty.go with tests (#1418)
* Fix checkParentHeadersExist and cover pruning_violation_proof_of_work_and_difficulty.go with tests

* Remove unused variable

* Change consensus violation

* Change condition order

* Get rid of irrelevant error codes in extractRejectCode

* Fix wrong test db names

* Fix checkParentHeadersExist
2021-01-17 11:27:04 +02:00
Ori Newman
67be4d82bf Don't mark bad merkle root as invalid (#1419)
* Don't mark bad merkle root as invalid

* Fix TestBlockStatus

* Move discardAllChanges inside the inner if
2021-01-17 10:40:05 +02:00
Ori Newman
a1381d6768 Add TestCheckParentBlockBodiesExist (#1405)
* Add TestCheckParentBlockBodiesExist

* Use block in pruning point's anticone for the test

* Fix test db name
2021-01-14 13:31:17 +02:00
Ori Newman
10b519a3e2 Add tests to ValidateHeaderInIsolation (#1415)
* Add tests to ValidateHeaderInIsolation

* Fix tests db names

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-14 11:06:08 +02:00
Ori Newman
a35f8269ea Add checkBlockIsNotPruned (#1413)
* Add checkBlockIsNotPruned

* Fix test name and comment

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-14 10:56:22 +02:00
stasatdaglabs
15af6641fc Send the IBD root UTXO set in chunks instead of a massive monolythic message (#1412)
* Extract syncPruningPointUTXOSet to a separate method.

* Implement logic to send pruning point utxo set chunks in a loop.

* Replace IBDRootUTXOSetAndBlockMessage with IbdRootUtxoSetChunkMessage.

* Add a new message: RequestNextIBDRootUTXOSetChunk.

* Add a new message: DoneIBDRootUTXOSetChunks.

* Protect HandleRequestIBDRootUTXOSetAndBlock from rogue messages.

* Reimplement receiveIBDRootUTXOSetAndBlock.

* Add CmdDoneIBDRootUTXOSetChunks to the HandleRelayInvs flow.

* Decrease the max message size to 10mb.

* Fix bad step.

* Fix confusion between outgoing/incoming routes.

* Measure how long it takes to send/receive the UTXO set.

* Use LogAndMeasure in handleRequestIBDRootUTXOSetAndBlockFlow.
2021-01-13 18:03:07 +02:00
Svarog
1b97cfb302 Prevent a race condition in findHighestSharedBlockHash where we get headersSelectedTip and then pass it as highHash to GetBlockLocator, without locking consensus (#1410)
* Prevent a race condition in findHighestSharedBlockHash where we get headersSelectedTip and then pass it as highHash to GetBlockLocator, without locking consensus

* Restart findHighestSharedBlockHash if lowHash or highHash are no longer in selectedParentChain

* Test for specifically ErrBlockNotInSelectedParentChain instead of database NotFound error

* Fix TestCreateHeadersSelectedChainBlockLocator

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-13 17:55:37 +02:00
Ori Newman
61be80a60c Add TestCheckMergeSizeLimit (#1408)
Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-13 16:19:52 +02:00
Elichai Turkel
83134cc2b5 Add a codecov yml, disable patch checks and make status checks always pass (#1414) 2021-01-13 15:57:57 +02:00
Svarog
4988817da1 Reject SubmitBlock if the node is in IBD (#1409)
* Reject SubmitBlock if the node is in IBD

* Add comments

* Don't use iota for RejectReason constants, since in .proto those are hard-coded
2021-01-13 15:04:55 +02:00
Elichai Turkel
68bd8330ac Log the networks hashrate (#1406)
* Log the hashrate of each block

* Add a test for GetHashrateString

* Move difficulty related functions to its own package

* Convert the validated log in validateAndInsertBlock to a log function

* Add tests for max/min int
2021-01-13 12:51:23 +02:00
Elichai Turkel
192dd2ba8f Add codecov to github (#1358) 2021-01-13 10:35:12 +02:00
Ori Newman
cc49b1826a Reset windowExpectedEndTime after each window (#1407) 2021-01-12 21:34:26 +02:00
Svarog
ce348373c6 Delete existing UTXOSet when commiting VirtualUTXOSet. (#1403) 2021-01-12 16:51:56 +02:00
talelbaz
8ad5725421 Adding a test for the error cases on the function 'checkBlockStatus()' (#1398)
* Adds test for error cases on the function checkBlockStatus.

* Fix review's comments.

* Move test to validateandinsertblock_test.go

Co-authored-by: tal <tal@daglabs.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-12 15:55:02 +02:00
Ori Newman
23a2fbf401 Remove erroneous finality optimization from LowestChainBlockAboveOrEqualToBlueScore (#1402)
* Remove finality erroneous optimization from LowestChainBlockAboveOrEqualToBlueScore

* Add TestLowestChainBlockAboveOrEqualToBlueScore

* Remove unnecessary fields from dagTraversalManager
2021-01-12 15:27:08 +02:00
Elichai Turkel
c1361e5b3e Change log sizes and add some new features to logger (#1400)
* Increase default log sizes, and increase kaspad log sizes

* Add an option to not print logs to stdout

* Allow logs to be printed in the current working directory

* Add more pruning related logs

* Add comment and increase log rotations to save last 64 logs
2021-01-12 13:26:29 +02:00
Ori Newman
53744ceb45 Compare transaction IDs with Equal (#1401) 2021-01-12 12:53:33 +02:00
Ori Newman
bcf2302460 Add high hash to block locator, and add block locator tests (#1397)
* Include high hash in the block locator

* Add tests for block locator

* Remove redundant function

* Remove redundant assignments
2021-01-12 11:16:25 +02:00
Ori Newman
6101e6bdb6 Fix UTXO serialization, its test, and the static check that missed it (#1396)
* Fix UTXO serialization, its test, and the static check that missed it

* Remove duplicate case

* Use one line for static check

Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
2021-01-11 17:45:17 +02:00
stasatdaglabs
d9b97afb92 Don't swallow errors in HandleNewBlockTransactions. (#1390)
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-11 17:16:15 +02:00
Ori Newman
b8ca33d91d Add selected chain store and optimize block locator with it (#1394)
* Add selected chain store and optimize block locator with it

* Fix build error

* Fix comments

* Fix IsStaged

* Rename CalculateSelectedParentChainChanges to CalculateChainPath and SelectedParentChainChanges->SelectedChainPath

* Use binary.LittleEndian directly to allow compiler optimizations

* Remove boolean from HeadersSelectedChainStore interface

* Prevent endless loop in block locator
2021-01-11 15:51:45 +02:00
Svarog
c7deda41c6 Fix deserialization of script version in UTXOSet deserialization (#1395)
* Initalize protoUTXOSetIterator with index = -1

* Handle error when failed to deserialize Script version

* Add support for (de)serialization of (u)int16

* Log the error when converting it into ErrMalformedUTXO
2021-01-11 15:23:27 +02:00
talelbaz
434cf45112 Adds a new test to validate POW, and Fix Main-net and Test-net genesis block data. (#1389)
* commit for do fetch&merge

* Adds a new test to validate POW, and Fix Main-net and Testnet genesis block data.

* Fix window's test for testnet and change the expected pruning point for mainnet and testnet.

* Delete function "solveBlock" on proof_of_work_test.go and call the function mining.SolvaBlock instead. Also, remove using of random in "solveBlockWithWrongPOW" function.

* Replace 0xFFFFFFFFFFFFFFFF to math.MaxUint64 in "solveBlockWithWrongPOW" function and change the function's comment of "TestPOW"

* Replace 0xFFFFFFFFFFFFFFFF to math.MaxUint64 in "solveBlockWithWrongPOW" function and change the function's comment of "TestPOW"

* Change from <= to < in the for statement in "solveBlockWithWrongPOW" function

* Adds one arg to the function call "NewTestConsensus" (the function sig has changed).

Co-authored-by: tal <tal@daglabs.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-11 13:15:26 +02:00
Elichai Turkel
2cc0bf1639 Optimize block locator using finality store (#1386)
* Make sure block locator doesn't include a hash lower than the lowHash in
the block locator

* Use finalityStore to optimize LowestChainBlockAboveOrEqualToBlueScore
2021-01-10 13:36:02 +02:00
Ori Newman
0f2d0d45b5 Add TargetBlocksPerSecond for kaspaminer (#1385)
Co-authored-by: Svarog <feanorr@gmail.com>
2021-01-10 12:44:24 +02:00
Svarog
09e1a73340 Added some logs to block-relay and IBD flows (#1384)
Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-01-10 12:05:34 +02:00
Svarog
c6d20c1f6f Start IBDBlockLocator from PruningPoint instead of Genesis (#1383) 2021-01-10 11:06:06 +02:00
Svarog
49e0a2a2e7 Add basic support for archival node (#1370)
* Add archival cli flag

* If --archival was activated - don't delete anything

* Fix tests

* Still change block status to StatusHeaderOnly even in archival nodes
2021-01-10 10:25:15 +02:00
Ori Newman
285ae5cd40 Update READMEs and add CONTRIBUTING.md (#1381)
* Update READMEs and add CONTRIBUTING.md

* Update go version

* Update READMEs and CONTRIBUTING.md

* Update README.md

* Update README.md

* Update README.md
2021-01-10 09:13:00 +02:00
stasatdaglabs
541205904e Add RPC documentation (#1379)
* Split messages.proto to p2p and rpc.

* Split messages.proto to p2p and rpc.

* Write a short intro to the RPC docs.

* Start documenting RPC calls.

* Use a custom protoc-gen-doc.

* Continue writing RPC documentation.

* Finish writing RPC documentation.

* Fix a formatting error.

* Fix merge errors.

* Fix formatting into protowire/README.md.

* Rerun go generate ..

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-07 16:55:47 +02:00
Ori Newman
256b7f25f1 Decrease grpc client dial timeout to one second (#1378) 2021-01-07 16:26:37 +02:00
Elichai Turkel
82d95c59a3 Increase log files sizes (#1374) 2021-01-07 11:17:13 +02:00
Ori Newman
79c5d4595e Remove gencerts (#1371) 2021-01-07 10:00:13 +02:00
stasatdaglabs
b195301a99 Add IsIBDPeer to GetConnectedPeerInfoResponse. (#1367)
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-01-06 17:33:51 +02:00
Ori Newman
4ad89056c2 Implement getMempoolEntries (#1369)
* Implement getMempoolEntries

* Fix comment

* Fix comment
2021-01-06 17:26:21 +02:00
Elichai Turkel
64f6cd2178 Merge pull request #1368 from kaspanet/v0.8.4-dev
Upgrade master to 0.8.4rc0
2021-01-06 14:43:39 +02:00
Mike Zak
26368cd674 Update to version 0.8.5 2021-01-06 14:04:46 +02:00
alexandratran
f6dfce8180 Update README.md 2020-11-26 21:46:57 -08:00
Yuval Shaul
a361d62945 Merge remote-tracking branch 'origin/v0.6.2-dev' 2020-08-16 13:51:25 +03:00
Yuval Shaul
aac173ed72 Merge remote-tracking branch 'origin/v0.6.1-dev' 2020-08-12 10:48:51 +03:00
stasatdaglabs
5f3fb0bf9f [NOD-1238] Fix acceptance index never being initialized. (#859) 2020-08-11 12:04:54 +03:00
Mike Zak
61f383a713 Merge remote-tracking branch 'origin/v0.6.0-dev' 2020-08-09 09:09:27 +03:00
Mike Zak
c62bdb2fa1 Merge remote-tracking branch 'origin/v0.5.0-dev' 2020-07-01 15:05:02 +03:00
Svarog
c88869778d [NOD-869] Add a print after os.Exit(1) to see if it is ever called (#701) 2020-04-22 11:37:30 +03:00
Ori Newman
3fd647b291 [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer (#700)
* [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer

* [NOD-858] SetShouldSendBlockLocator(false) on OnBlockLocator

* [NOD-858] Rename shouldSendBlockLocator->wasBlockLocatorRequested

* [NOD-858] Move panic to shouldReplaceSyncPeer
2020-04-13 15:49:46 +03:00
Mike Zak
2f255952b7 Updated to version v0.3.1 2020-04-13 15:10:27 +03:00
273 changed files with 21275 additions and 13305 deletions

9
.codecov.yml Normal file
View File

@@ -0,0 +1,9 @@
coverage:
status:
patch: off
project:
default:
informational: true
ignore:
- "**/*.pb.go" # Ignore all auto generated protobuf structures.

View File

@@ -49,3 +49,22 @@ jobs:
- name: Test
shell: bash
run: ./build_and_test.sh
coverage:
runs-on: ubuntu-20.04
name: Produce code coverage
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.14
- name: Create coverage file
# Because of https://github.com/golang/go/issues/27333 this seem to "fail" even though nothing is wrong, so ignore the failure
run: go test -json -covermode=atomic -coverpkg=./... -coverprofile coverage.txt ./... || true
- name: Upload coverage file
run: bash <(curl -s https://codecov.io/bash)

19
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,19 @@
# Contributing to Kaspad
Any contribution to Kaspad is very welcome.
## Getting started
If you want to start contributing to Kaspad and don't know where to start, you can pick an issue from
the [list](https://github.com/kaspanet/kaspad/issues).
If you want to make a big change it's better to discuss it first by opening an issue or talk about it in
[Discord](https://discord.gg/WmGhhzk) to avoid duplicate work.
## Pull Request process
Any pull request should be opened against the development branch of the target version. The development branch format is
as follows: `vx.y.z-dev`, for example: `v0.8.5-dev`.
All pull requests should pass the checks written in `build_and_test.sh`, so it's recommended to run this script before
submitting your PR.

View File

@@ -9,12 +9,16 @@ Warning: This is pre-alpha software. There's no guarantee anything works.
Kaspad is the reference full node Kaspa implementation written in Go (golang).
This project is currently under active development and is in a pre-Alpha state.
This project is currently under active development and is in a pre-Alpha state.
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
## What is kaspa
Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations and sub-second block times. It is based on [the PHANTOM protocol](https://eprint.iacr.org/2018/104.pdf), a generalization of Nakamoto consensus.
## Requirements
Latest version of [Go](http://golang.org) (currently 1.13).
Go 1.14 or later.
## Installation
@@ -27,23 +31,17 @@ Latest version of [Go](http://golang.org) (currently 1.13).
```bash
$ go version
$ go env GOROOT GOPATH
```
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
recommended that `GOPATH` is set to a directory in your home directory such as
`~/dev/go` to avoid write permission issues. It is also recommended to add
`$GOPATH/bin` to your `PATH` at this point.
- Run the following commands to obtain and install kaspad including all dependencies:
```bash
$ git clone https://github.com/kaspanet/kaspad $GOPATH/src/github.com/kaspanet/kaspad
$ cd $GOPATH/src/github.com/kaspanet/kaspad
$ git clone https://github.com/kaspanet/kaspad
$ cd kaspad
$ go install . ./cmd/...
```
- Kaspad (and utilities) should now be installed in `$GOPATH/bin`. If you did
- Kaspad (and utilities) should now be installed in `$(go env GOPATH)/bin`. If you did
not already add the bin directory to your system path during Go installation,
you are encouraged to do so now.
@@ -53,10 +51,8 @@ $ go install . ./cmd/...
Kaspad has several configuration options available to tweak how it runs, but all
of the basic operations work with zero configuration.
#### Linux/BSD/POSIX/Source
```bash
$ ./kaspad
$ kaspad
```
## Discord
@@ -69,9 +65,8 @@ is used for this project.
## Documentation
The documentation is a work-in-progress. It is located in the [docs](https://github.com/kaspanet/kaspad/tree/master/docs) folder.
The documentation is a work-in-progress.
## License
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).

View File

@@ -3,6 +3,7 @@ package appmessage
import (
"encoding/hex"
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
@@ -268,3 +269,49 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
Payload: payload,
}
}
// OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs converts
// OutpointAndUTXOEntryPairs to domain OutpointAndUTXOEntryPairs
func OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(
outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) []*externalapi.OutpointAndUTXOEntryPair {
domainOutpointAndUTXOEntryPairs := make([]*externalapi.OutpointAndUTXOEntryPair, len(outpointAndUTXOEntryPairs))
for i, outpointAndUTXOEntryPair := range outpointAndUTXOEntryPairs {
domainOutpointAndUTXOEntryPairs[i] = &externalapi.OutpointAndUTXOEntryPair{
Outpoint: &externalapi.DomainOutpoint{
TransactionID: outpointAndUTXOEntryPair.Outpoint.TxID,
Index: outpointAndUTXOEntryPair.Outpoint.Index,
},
UTXOEntry: utxo.NewUTXOEntry(
outpointAndUTXOEntryPair.UTXOEntry.Amount,
outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey,
outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase,
outpointAndUTXOEntryPair.UTXOEntry.BlockBlueScore,
),
}
}
return domainOutpointAndUTXOEntryPairs
}
// DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs converts
// domain OutpointAndUTXOEntryPairs to OutpointAndUTXOEntryPairs
func DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(
outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) []*OutpointAndUTXOEntryPair {
domainOutpointAndUTXOEntryPairs := make([]*OutpointAndUTXOEntryPair, len(outpointAndUTXOEntryPairs))
for i, outpointAndUTXOEntryPair := range outpointAndUTXOEntryPairs {
domainOutpointAndUTXOEntryPairs[i] = &OutpointAndUTXOEntryPair{
Outpoint: &Outpoint{
TxID: outpointAndUTXOEntryPair.Outpoint.TransactionID,
Index: outpointAndUTXOEntryPair.Outpoint.Index,
},
UTXOEntry: &UTXOEntry{
Amount: outpointAndUTXOEntryPair.UTXOEntry.Amount(),
ScriptPublicKey: outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey(),
IsCoinbase: outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase(),
BlockBlueScore: outpointAndUTXOEntryPair.UTXOEntry.BlockBlueScore(),
},
}
}
return domainOutpointAndUTXOEntryPairs
}

View File

@@ -51,15 +51,17 @@ const (
CmdReject
CmdHeader
CmdRequestNextHeaders
CmdRequestIBDRootUTXOSetAndBlock
CmdIBDRootUTXOSetAndBlock
CmdRequestPruningPointUTXOSetAndBlock
CmdPruningPointUTXOSetChunk
CmdRequestIBDBlocks
CmdIBDRootNotFound
CmdRequestIBDRootHash
CmdIBDRootHash
CmdUnexpectedPruningPoint
CmdRequestPruningPointHash
CmdPruningPointHash
CmdIBDBlockLocator
CmdIBDBlockLocatorHighestHash
CmdBlockHeaders
CmdRequestNextPruningPointUTXOSetChunk
CmdDonePruningPointUTXOSetChunks
// rpc
CmdGetCurrentNetworkRequestMessage
@@ -125,36 +127,38 @@ const (
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
var ProtocolMessageCommandToString = map[MessageCommand]string{
CmdVersion: "Version",
CmdVerAck: "VerAck",
CmdRequestAddresses: "RequestAddresses",
CmdAddresses: "Addresses",
CmdRequestHeaders: "RequestHeaders",
CmdBlock: "Block",
CmdTx: "Tx",
CmdPing: "Ping",
CmdPong: "Pong",
CmdRequestBlockLocator: "RequestBlockLocator",
CmdBlockLocator: "BlockLocator",
CmdInvRelayBlock: "InvRelayBlock",
CmdRequestRelayBlocks: "RequestRelayBlocks",
CmdInvTransaction: "InvTransaction",
CmdRequestTransactions: "RequestTransactions",
CmdIBDBlock: "IBDBlock",
CmdDoneHeaders: "DoneHeaders",
CmdTransactionNotFound: "TransactionNotFound",
CmdReject: "Reject",
CmdHeader: "Header",
CmdRequestNextHeaders: "RequestNextHeaders",
CmdRequestIBDRootUTXOSetAndBlock: "RequestPruningUTXOSetAndBlock",
CmdIBDRootUTXOSetAndBlock: "IBDRootUTXOSetAndBlock",
CmdRequestIBDBlocks: "RequestIBDBlocks",
CmdIBDRootNotFound: "IBDRootNotFound",
CmdRequestIBDRootHash: "IBDRequestIBDRootHash",
CmdIBDRootHash: "IBDIBDRootHash",
CmdIBDBlockLocator: "IBDBlockLocator",
CmdIBDBlockLocatorHighestHash: "IBDBlockLocatorHighestHash",
CmdBlockHeaders: "BlockHeaders",
CmdVersion: "Version",
CmdVerAck: "VerAck",
CmdRequestAddresses: "RequestAddresses",
CmdAddresses: "Addresses",
CmdRequestHeaders: "RequestHeaders",
CmdBlock: "Block",
CmdTx: "Tx",
CmdPing: "Ping",
CmdPong: "Pong",
CmdRequestBlockLocator: "RequestBlockLocator",
CmdBlockLocator: "BlockLocator",
CmdInvRelayBlock: "InvRelayBlock",
CmdRequestRelayBlocks: "RequestRelayBlocks",
CmdInvTransaction: "InvTransaction",
CmdRequestTransactions: "RequestTransactions",
CmdIBDBlock: "IBDBlock",
CmdDoneHeaders: "DoneHeaders",
CmdTransactionNotFound: "TransactionNotFound",
CmdReject: "Reject",
CmdHeader: "Header",
CmdRequestNextHeaders: "RequestNextHeaders",
CmdRequestPruningPointUTXOSetAndBlock: "RequestPruningPointUTXOSetAndBlock",
CmdPruningPointUTXOSetChunk: "PruningPointUTXOSetChunk",
CmdRequestIBDBlocks: "RequestIBDBlocks",
CmdUnexpectedPruningPoint: "UnexpectedPruningPoint",
CmdRequestPruningPointHash: "RequestPruningPointHashHash",
CmdPruningPointHash: "PruningPointHash",
CmdIBDBlockLocator: "IBDBlockLocator",
CmdIBDBlockLocatorHighestHash: "IBDBlockLocatorHighestHash",
CmdBlockHeaders: "BlockHeaders",
CmdRequestNextPruningPointUTXOSetChunk: "RequestNextPruningPointUTXOSetChunk",
CmdDonePruningPointUTXOSetChunks: "DonePruningPointUTXOSetChunks",
}
// RPCMessageCommandToString maps all MessageCommands to their string representation

View File

@@ -1,22 +0,0 @@
package appmessage
// MsgIBDRootNotFound implements the Message interface and represents a kaspa
// IBDRootNotFound message. It is used to notify the IBD root that was requested
// by other peer was not found.
//
// This message has no payload.
type MsgIBDRootNotFound struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgIBDRootNotFound) Command() MessageCommand {
return CmdIBDRootNotFound
}
// NewMsgIBDRootNotFound returns a new kaspa IBDRootNotFound message that conforms to the
// Message interface.
func NewMsgIBDRootNotFound() *MsgIBDRootNotFound {
return &MsgIBDRootNotFound{}
}

View File

@@ -0,0 +1,16 @@
package appmessage
// MsgDonePruningPointUTXOSetChunks represents a kaspa DonePruningPointUTXOSetChunks message
type MsgDonePruningPointUTXOSetChunks struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgDonePruningPointUTXOSetChunks) Command() MessageCommand {
return CmdDonePruningPointUTXOSetChunks
}
// NewMsgDonePruningPointUTXOSetChunks returns a new MsgDonePruningPointUTXOSetChunks.
func NewMsgDonePruningPointUTXOSetChunks() *MsgDonePruningPointUTXOSetChunks {
return &MsgDonePruningPointUTXOSetChunks{}
}

View File

@@ -1,26 +0,0 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgIBDRootHashMessage implements the Message interface and represents a kaspa
// IBDRootHash message. It is used as a reply to IBD root hash requests.
type MsgIBDRootHashMessage struct {
baseMessage
Hash *externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgIBDRootHashMessage) Command() MessageCommand {
return CmdIBDRootHash
}
// NewMsgIBDRootHashMessage returns a new kaspa IBDRootHash message that conforms to
// the Message interface. See MsgIBDRootHashMessage for details.
func NewMsgIBDRootHashMessage(hash *externalapi.DomainHash) *MsgIBDRootHashMessage {
return &MsgIBDRootHashMessage{
Hash: hash,
}
}

View File

@@ -1,23 +0,0 @@
package appmessage
// MsgIBDRootUTXOSetAndBlock implements the Message interface and represents a kaspa
// IBDRootUTXOSetAndBlock message. It is used to answer RequestIBDRootUTXOSetAndBlock messages.
type MsgIBDRootUTXOSetAndBlock struct {
baseMessage
UTXOSet []byte
Block *MsgBlock
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgIBDRootUTXOSetAndBlock) Command() MessageCommand {
return CmdIBDRootUTXOSetAndBlock
}
// NewMsgIBDRootUTXOSetAndBlock returns a new MsgIBDRootUTXOSetAndBlock.
func NewMsgIBDRootUTXOSetAndBlock(utxoSet []byte, block *MsgBlock) *MsgIBDRootUTXOSetAndBlock {
return &MsgIBDRootUTXOSetAndBlock{
UTXOSet: utxoSet,
Block: block,
}
}

View File

@@ -0,0 +1,23 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgPruningPointHashMessage represents a kaspa PruningPointHash message
type MsgPruningPointHashMessage struct {
baseMessage
Hash *externalapi.DomainHash
}
// Command returns the protocol command string for the message
func (msg *MsgPruningPointHashMessage) Command() MessageCommand {
return CmdPruningPointHash
}
// NewPruningPointHashMessage returns a new kaspa PruningPointHash message
func NewPruningPointHashMessage(hash *externalapi.DomainHash) *MsgPruningPointHashMessage {
return &MsgPruningPointHashMessage{
Hash: hash,
}
}

View File

@@ -0,0 +1,36 @@
package appmessage
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// MsgPruningPointUTXOSetChunk represents a kaspa PruningPointUTXOSetChunk message
type MsgPruningPointUTXOSetChunk struct {
baseMessage
OutpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair
}
// Command returns the protocol command string for the message
func (msg *MsgPruningPointUTXOSetChunk) Command() MessageCommand {
return CmdPruningPointUTXOSetChunk
}
// NewMsgPruningPointUTXOSetChunk returns a new MsgPruningPointUTXOSetChunk.
func NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) *MsgPruningPointUTXOSetChunk {
return &MsgPruningPointUTXOSetChunk{
OutpointAndUTXOEntryPairs: outpointAndUTXOEntryPairs,
}
}
// OutpointAndUTXOEntryPair is an outpoint along with its
// respective UTXO entry
type OutpointAndUTXOEntryPair struct {
Outpoint *Outpoint
UTXOEntry *UTXOEntry
}
// UTXOEntry houses details about an individual transaction output in a UTXO
type UTXOEntry struct {
Amount uint64
ScriptPublicKey *externalapi.ScriptPublicKey
BlockBlueScore uint64
IsCoinbase bool
}

View File

@@ -1,26 +0,0 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestIBDRootUTXOSetAndBlock implements the Message interface and represents a kaspa
// RequestIBDRootUTXOSetAndBlock message. It is used to request the UTXO set and block body
// of the IBD root block.
type MsgRequestIBDRootUTXOSetAndBlock struct {
baseMessage
IBDRoot *externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestIBDRootUTXOSetAndBlock) Command() MessageCommand {
return CmdRequestIBDRootUTXOSetAndBlock
}
// NewMsgRequestIBDRootUTXOSetAndBlock returns a new MsgRequestIBDRootUTXOSetAndBlock.
func NewMsgRequestIBDRootUTXOSetAndBlock(ibdRoot *externalapi.DomainHash) *MsgRequestIBDRootUTXOSetAndBlock {
return &MsgRequestIBDRootUTXOSetAndBlock{
IBDRoot: ibdRoot,
}
}

View File

@@ -0,0 +1,16 @@
package appmessage
// MsgRequestNextPruningPointUTXOSetChunk represents a kaspa RequestNextPruningPointUTXOSetChunk message
type MsgRequestNextPruningPointUTXOSetChunk struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgRequestNextPruningPointUTXOSetChunk) Command() MessageCommand {
return CmdRequestNextPruningPointUTXOSetChunk
}
// NewMsgRequestNextPruningPointUTXOSetChunk returns a new MsgRequestNextPruningPointUTXOSetChunk.
func NewMsgRequestNextPruningPointUTXOSetChunk() *MsgRequestNextPruningPointUTXOSetChunk {
return &MsgRequestNextPruningPointUTXOSetChunk{}
}

View File

@@ -0,0 +1,23 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestPruningPointUTXOSetAndBlock represents a kaspa RequestPruningPointUTXOSetAndBlock message
type MsgRequestPruningPointUTXOSetAndBlock struct {
baseMessage
PruningPointHash *externalapi.DomainHash
}
// Command returns the protocol command string for the message
func (msg *MsgRequestPruningPointUTXOSetAndBlock) Command() MessageCommand {
return CmdRequestPruningPointUTXOSetAndBlock
}
// NewMsgRequestPruningPointUTXOSetAndBlock returns a new MsgRequestPruningPointUTXOSetAndBlock
func NewMsgRequestPruningPointUTXOSetAndBlock(pruningPointHash *externalapi.DomainHash) *MsgRequestPruningPointUTXOSetAndBlock {
return &MsgRequestPruningPointUTXOSetAndBlock{
PruningPointHash: pruningPointHash,
}
}

View File

@@ -1,22 +0,0 @@
package appmessage
// MsgRequestIBDRootHashMessage implements the Message interface and represents a kaspa
// MsgRequestIBDRootHashMessage message. It is used to request the IBD root hash
// from a peer during IBD.
//
// This message has no payload.
type MsgRequestIBDRootHashMessage struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestIBDRootHashMessage) Command() MessageCommand {
return CmdRequestIBDRootHash
}
// NewMsgRequestIBDRootHashMessage returns a new kaspa RequestIBDRootHash message that conforms to the
// Message interface.
func NewMsgRequestIBDRootHashMessage() *MsgRequestIBDRootHashMessage {
return &MsgRequestIBDRootHashMessage{}
}

View File

@@ -0,0 +1,16 @@
package appmessage
// MsgRequestPruningPointHashMessage represents a kaspa RequestPruningPointHashMessage message
type MsgRequestPruningPointHashMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgRequestPruningPointHashMessage) Command() MessageCommand {
return CmdRequestPruningPointHash
}
// NewMsgRequestPruningPointHashMessage returns a new kaspa RequestPruningPointHash message
func NewMsgRequestPruningPointHashMessage() *MsgRequestPruningPointHashMessage {
return &MsgRequestPruningPointHashMessage{}
}

View File

@@ -0,0 +1,16 @@
package appmessage
// MsgUnexpectedPruningPoint represents a kaspa UnexpectedPruningPoint message
type MsgUnexpectedPruningPoint struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgUnexpectedPruningPoint) Command() MessageCommand {
return CmdUnexpectedPruningPoint
}
// NewMsgUnexpectedPruningPoint returns a new kaspa UnexpectedPruningPoint message
func NewMsgUnexpectedPruningPoint() *MsgUnexpectedPruningPoint {
return &MsgUnexpectedPruningPoint{}
}

View File

@@ -5,7 +5,6 @@ package appmessage
type GetBlockRequestMessage struct {
baseMessage
Hash string
SubnetworkID string
IncludeTransactionVerboseData bool
}
@@ -15,10 +14,9 @@ func (msg *GetBlockRequestMessage) Command() MessageCommand {
}
// NewGetBlockRequestMessage returns a instance of the message
func NewGetBlockRequestMessage(hash string, subnetworkID string, includeTransactionVerboseData bool) *GetBlockRequestMessage {
func NewGetBlockRequestMessage(hash string, includeTransactionVerboseData bool) *GetBlockRequestMessage {
return &GetBlockRequestMessage{
Hash: hash,
SubnetworkID: subnetworkID,
IncludeTransactionVerboseData: includeTransactionVerboseData,
}
}

View File

@@ -46,4 +46,5 @@ type GetConnectedPeerInfoMessage struct {
UserAgent string
AdvertisedProtocolVersion uint32
TimeConnected int64
IsIBDPeer bool
}

View File

@@ -19,11 +19,33 @@ func NewSubmitBlockRequestMessage(block *MsgBlock) *SubmitBlockRequestMessage {
}
}
// RejectReason describes the reason why a block sent by SubmitBlock was rejected
type RejectReason byte
// RejectReason constants
// Not using iota, since in the .proto file those are hardcoded
const (
RejectReasonNone RejectReason = 0
RejectReasonBlockInvalid RejectReason = 1
RejectReasonIsInIBD RejectReason = 2
)
var rejectReasonToString = map[RejectReason]string{
RejectReasonNone: "None",
RejectReasonBlockInvalid: "Block is invalid",
RejectReasonIsInIBD: "Node is in IBD",
}
func (rr RejectReason) String() string {
return rejectReasonToString[rr]
}
// SubmitBlockResponseMessage is an appmessage corresponding to
// its respective RPC message
type SubmitBlockResponseMessage struct {
baseMessage
Error *RPCError
RejectReason RejectReason
Error *RPCError
}
// Command returns the protocol command string for the message

View File

@@ -2,9 +2,10 @@ package app
import (
"fmt"
"github.com/kaspanet/kaspad/domain/utxoindex"
"sync/atomic"
"github.com/kaspanet/kaspad/domain/utxoindex"
infrastructuredatabase "github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/domain"
@@ -79,7 +80,7 @@ func (a *ComponentManager) Stop() {
func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database, interrupt chan<- struct{}) (
*ComponentManager, error) {
domain, err := domain.New(cfg.ActiveNetParams, db)
domain, err := domain.New(cfg.ActiveNetParams, db, cfg.IsArchivalNode)
if err != nil {
return nil, err
}

View File

@@ -1,9 +1,8 @@
package flowcontext
import (
"sync/atomic"
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/pkg/errors"
@@ -42,7 +41,10 @@ func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
blocklogger.LogBlock(block)
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
_ = f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
_, err = f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
if err != nil {
return err
}
if f.onBlockAddedToDAGHandler != nil {
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
@@ -115,24 +117,45 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
// IsIBDRunning returns true if IBD is currently marked as running
func (f *FlowContext) IsIBDRunning() bool {
return atomic.LoadUint32(&f.isInIBD) != 0
f.ibdPeerMutex.RLock()
defer f.ibdPeerMutex.RUnlock()
return f.ibdPeer != nil
}
// TrySetIBDRunning attempts to set `isInIBD`. Returns false
// if it is already set
func (f *FlowContext) TrySetIBDRunning() bool {
succeeded := atomic.CompareAndSwapUint32(&f.isInIBD, 0, 1)
if succeeded {
log.Infof("IBD started")
func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
f.ibdPeerMutex.Lock()
defer f.ibdPeerMutex.Unlock()
if f.ibdPeer != nil {
return false
}
return succeeded
f.ibdPeer = ibdPeer
log.Infof("IBD started")
return true
}
// UnsetIBDRunning unsets isInIBD
func (f *FlowContext) UnsetIBDRunning() {
succeeded := atomic.CompareAndSwapUint32(&f.isInIBD, 1, 0)
if !succeeded {
f.ibdPeerMutex.Lock()
defer f.ibdPeerMutex.Unlock()
if f.ibdPeer == nil {
panic("attempted to unset isInIBD when it was not set to begin with")
}
f.ibdPeer = nil
log.Infof("IBD finished")
}
// IBDPeer returns the current IBD peer or null if the node is not
// in IBD
func (f *FlowContext) IBDPeer() *peerpkg.Peer {
f.ibdPeerMutex.RLock()
defer f.ibdPeerMutex.RUnlock()
return f.ibdPeer
}

View File

@@ -48,7 +48,8 @@ type FlowContext struct {
sharedRequestedBlocks *blockrelay.SharedRequestedBlocks
isInIBD uint32
ibdPeer *peerpkg.Peer
ibdPeerMutex sync.RWMutex
peers map[id.ID]*peerpkg.Peer
peersMutex sync.RWMutex

View File

@@ -4,6 +4,8 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
)
@@ -160,3 +162,49 @@ func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externa
log.Infof("Unorphaned block %s", orphanHash)
return blockInsertionResult, true, nil
}
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan
func (f *FlowContext) GetOrphanRoots(orphan *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "GetOrphanRoots")
defer onEnd()
f.orphansMutex.RLock()
defer f.orphansMutex.RUnlock()
_, ok := f.orphans[*orphan]
if !ok {
return nil, false, nil
}
queue := []*externalapi.DomainHash{orphan}
addedToQueueSet := hashset.New()
addedToQueueSet.Add(orphan)
roots := []*externalapi.DomainHash{}
for len(queue) > 0 {
var current *externalapi.DomainHash
current, queue = queue[0], queue[1:]
block, ok := f.orphans[*current]
if !ok {
blockInfo, err := f.domain.Consensus().GetBlockInfo(current)
if err != nil {
return nil, false, err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
roots = append(roots, current)
}
continue
}
for _, parent := range block.Header.ParentHashes() {
if !addedToQueueSet.Contains(parent) {
queue = append(queue, parent)
addedToQueueSet.Add(parent)
}
}
}
return roots, true, nil
}

View File

@@ -3,7 +3,7 @@ package flowcontext
import "github.com/kaspanet/kaspad/util/mstime"
const (
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 300_000
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
)
// ShouldMine returns whether it's ok to use block template from this node

View File

@@ -5,14 +5,12 @@ import (
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// ReceiveAddressesContext is the interface for the context needed for the ReceiveAddresses flow.
type ReceiveAddressesContext interface {
Config() *config.Config
AddressManager() *addressmanager.AddressManager
}

View File

@@ -1,7 +1,6 @@
package addressexchange
import (
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"math/rand"
"github.com/kaspanet/kaspad/app/appmessage"
@@ -17,16 +16,11 @@ type SendAddressesContext interface {
// SendAddresses sends addresses to a peer that requests it.
func SendAddresses(context SendAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
for {
message, err := incomingRoute.Dequeue()
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
_, ok := message.(*appmessage.MsgRequestAddresses)
if !ok {
return protocolerrors.Errorf(true, "unexpected message. "+
"Expected: %s, got: %s", appmessage.CmdRequestAddresses, message.Command())
}
addresses := context.AddressManager().Addresses()
msgAddresses := appmessage.NewMsgAddresses(shuffleAddresses(addresses))

View File

@@ -25,7 +25,8 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
return err
}
msgRequestIBDBlocks := message.(*appmessage.MsgRequestIBDBlocks)
for _, hash := range msgRequestIBDBlocks.Hashes {
log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes))
for i, hash := range msgRequestIBDBlocks.Hashes {
// Fetch the block from the database.
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
if err != nil {
@@ -47,6 +48,7 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
if err != nil {
return err
}
log.Debugf("sent %d out of %d", i, len(msgRequestIBDBlocks.Hashes))
}
}
}

View File

@@ -1,49 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleIBDRootHashRequestsFlowContext is the interface for the context needed for the handleIBDRootHashRequestsFlow flow.
type HandleIBDRootHashRequestsFlowContext interface {
Domain() domain.Domain
}
type handleIBDRootHashRequestsFlow struct {
HandleIBDRootHashRequestsFlowContext
incomingRoute, outgoingRoute *router.Route
}
// HandleIBDRootHashRequests listens to appmessage.MsgRequestIBDRootHashMessage messages and sends
// the IBD root hash as response.
func HandleIBDRootHashRequests(context HandleIBDRootHashRequestsFlowContext, incomingRoute,
outgoingRoute *router.Route) error {
flow := &handleIBDRootHashRequestsFlow{
HandleIBDRootHashRequestsFlowContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleIBDRootHashRequestsFlow) start() error {
for {
_, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil {
return err
}
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgIBDRootHashMessage(pruningPoint))
if err != nil {
return err
}
}
}

View File

@@ -0,0 +1,51 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandlePruningPointHashRequestsFlowContext is the interface for the context needed for the handlePruningPointHashRequestsFlow flow.
type HandlePruningPointHashRequestsFlowContext interface {
Domain() domain.Domain
}
type handlePruningPointHashRequestsFlow struct {
HandlePruningPointHashRequestsFlowContext
incomingRoute, outgoingRoute *router.Route
}
// HandlePruningPointHashRequests listens to appmessage.MsgRequestPruningPointHashMessage messages and sends
// the pruning point hash as response.
func HandlePruningPointHashRequests(context HandlePruningPointHashRequestsFlowContext, incomingRoute,
outgoingRoute *router.Route) error {
flow := &handlePruningPointHashRequestsFlow{
HandlePruningPointHashRequestsFlowContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handlePruningPointHashRequestsFlow) start() error {
for {
_, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
log.Debugf("Got request for a pruning point hash")
pruningPoint, err := flow.Domain().Consensus().PruningPoint()
if err != nil {
return err
}
err = flow.outgoingRoute.Enqueue(appmessage.NewPruningPointHashMessage(pruningPoint))
if err != nil {
return err
}
log.Debugf("Sent pruning point hash %s", pruningPoint)
}
}

View File

@@ -26,6 +26,7 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
return err
}
getRelayBlocksMessage := message.(*appmessage.MsgRequestRelayBlocks)
log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes)
for _, hash := range getRelayBlocksMessage.Hashes {
// Fetch the block from the database.
blockInfo, err := context.Domain().Consensus().GetBlockInfo(hash)
@@ -46,6 +47,7 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
if err != nil {
return err
}
log.Debugf("Relayed block with hash %s", hash)
}
}
}

View File

@@ -10,7 +10,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
@@ -24,14 +23,14 @@ var orphanResolutionRange uint32 = 5
type RelayInvsContext interface {
Domain() domain.Domain
Config() *config.Config
NetAdapter() *netadapter.NetAdapter
OnNewBlock(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
SharedRequestedBlocks() *SharedRequestedBlocks
Broadcast(message appmessage.Message) error
AddOrphan(orphanBlock *externalapi.DomainBlock)
GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error)
IsOrphan(blockHash *externalapi.DomainHash) bool
IsIBDRunning() bool
TrySetIBDRunning() bool
TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool
UnsetIBDRunning()
}
@@ -71,7 +70,7 @@ func (flow *handleRelayInvsFlow) start() error {
if err != nil {
return err
}
if blockInfo.Exists {
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
if blockInfo.BlockStatus == externalapi.StatusInvalid {
return protocolerrors.Errorf(true, "sent inv of an invalid block %s",
inv.Hash)
@@ -81,7 +80,11 @@ func (flow *handleRelayInvsFlow) start() error {
}
if flow.IsOrphan(inv.Hash) {
log.Debugf("Block %s is a known orphan. continuing...", inv.Hash)
log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash)
err := flow.AddOrphanRootsToQueue(inv.Hash)
if err != nil {
return err
}
continue
}
@@ -104,10 +107,14 @@ func (flow *handleRelayInvsFlow) start() error {
log.Debugf("Processing block %s", inv.Hash)
missingParents, blockInsertionResult, err := flow.processBlock(block)
if err != nil {
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Infof("Ignoring duplicate block %s", inv.Hash)
continue
}
return err
}
if len(missingParents) > 0 {
log.Debugf("Block %s contains orphans: %s", inv.Hash, missingParents)
log.Debugf("Block %s is orphan and has missing parents: %s", inv.Hash, missingParents)
err := flow.processOrphan(block, missingParents)
if err != nil {
return err
@@ -238,9 +245,10 @@ func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock, m
}
if isBlockInOrphanResolutionRange {
log.Debugf("Block %s is within orphan resolution range. "+
"Adding it to the orphan set and requesting its missing parents", blockHash)
flow.addToOrphanSetAndRequestMissingParents(block, missingParents)
return nil
"Adding it to the orphan set", blockHash)
flow.AddOrphan(block)
log.Debugf("Requesting block %s missing ancestors", blockHash)
return flow.AddOrphanRootsToQueue(blockHash)
}
// Start IBD unless we already are in IBD
@@ -277,13 +285,25 @@ func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *exter
return false, nil
}
func (flow *handleRelayInvsFlow) addToOrphanSetAndRequestMissingParents(
block *externalapi.DomainBlock, missingParents []*externalapi.DomainHash) {
flow.AddOrphan(block)
invMessages := make([]*appmessage.MsgInvRelayBlock, len(missingParents))
for i, missingParent := range missingParents {
invMessages[i] = appmessage.NewMsgInvBlock(missingParent)
func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.DomainHash) error {
orphanRoots, orphanExists, err := flow.GetOrphanRoots(orphan)
if err != nil {
return err
}
if !orphanExists {
log.Infof("Orphan block %s was missing from the orphan pool while requesting for its roots. This "+
"probably happened because it was randomly evicted immediately after it was added.", orphan)
}
log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots))
invMessages := make([]*appmessage.MsgInvRelayBlock, len(orphanRoots))
for i, root := range orphanRoots {
log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root)
invMessages[i] = appmessage.NewMsgInvBlock(root)
}
flow.invsQueue = append(invMessages, flow.invsQueue...)
return nil
}

View File

@@ -36,6 +36,8 @@ func (flow *handleRequestBlockLocatorFlow) start() error {
if err != nil {
return err
}
log.Debugf("Received getBlockLocator with lowHash: %s, highHash: %s, limit: %d",
lowHash, highHash, limit)
locator, err := flow.Domain().Consensus().CreateBlockLocator(lowHash, highHash, limit)
if err != nil || len(locator) == 0 {

View File

@@ -42,9 +42,10 @@ func (flow *handleRequestBlocksFlow) start() error {
if err != nil {
return err
}
log.Debugf("Recieved requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash)
for !lowHash.Equal(highHash) {
log.Debugf("Getting block hashes between %s and %s to %s", lowHash, highHash, flow.peer)
log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer)
// GetHashesBetween is a relatively heavy operation so we limit it
// in order to avoid locking the consensus for too long

View File

@@ -1,65 +0,0 @@
package blockrelay
import (
"errors"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleRequestIBDRootUTXOSetAndBlockContext is the interface for the context needed for the HandleRequestIBDRootUTXOSetAndBlock flow.
type HandleRequestIBDRootUTXOSetAndBlockContext interface {
Domain() domain.Domain
}
type handleRequestIBDRootUTXOSetAndBlockFlow struct {
HandleRequestIBDRootUTXOSetAndBlockContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestIBDRootUTXOSetAndBlock listens to appmessage.MsgRequestIBDRootUTXOSetAndBlock messages and sends
// the IBD root UTXO set and block body.
func HandleRequestIBDRootUTXOSetAndBlock(context HandleRequestIBDRootUTXOSetAndBlockContext, incomingRoute,
outgoingRoute *router.Route) error {
flow := &handleRequestIBDRootUTXOSetAndBlockFlow{
HandleRequestIBDRootUTXOSetAndBlockContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestIBDRootUTXOSetAndBlockFlow) start() error {
for {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
msgRequestIBDRootUTXOSetAndBlock := message.(*appmessage.MsgRequestIBDRootUTXOSetAndBlock)
utxoSet, err := flow.Domain().Consensus().GetPruningPointUTXOSet(msgRequestIBDRootUTXOSetAndBlock.IBDRoot)
if err != nil {
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgIBDRootNotFound())
if err != nil {
return err
}
continue
}
}
block, err := flow.Domain().Consensus().GetBlock(msgRequestIBDRootUTXOSetAndBlock.IBDRoot)
if err != nil {
return err
}
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgIBDRootUTXOSetAndBlock(utxoSet,
appmessage.DomainBlockToMsgBlock(block)))
if err != nil {
return err
}
}
}

View File

@@ -0,0 +1,144 @@
package blockrelay
import (
"errors"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleRequestPruningPointUTXOSetAndBlockContext is the interface for the context needed for the HandleRequestPruningPointUTXOSetAndBlock flow.
type HandleRequestPruningPointUTXOSetAndBlockContext interface {
Domain() domain.Domain
}
type handleRequestPruningPointUTXOSetAndBlockFlow struct {
HandleRequestPruningPointUTXOSetAndBlockContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestPruningPointUTXOSetAndBlock listens to appmessage.MsgRequestPruningPointUTXOSetAndBlock messages and sends
// the pruning point UTXO set and block body.
func HandleRequestPruningPointUTXOSetAndBlock(context HandleRequestPruningPointUTXOSetAndBlockContext, incomingRoute,
outgoingRoute *router.Route) error {
flow := &handleRequestPruningPointUTXOSetAndBlockFlow{
HandleRequestPruningPointUTXOSetAndBlockContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) start() error {
for {
msgRequestPruningPointUTXOSetAndBlock, err := flow.waitForRequestPruningPointUTXOSetAndBlockMessages()
if err != nil {
return err
}
err = flow.handleRequestPruningPointUTXOSetAndBlockMessage(msgRequestPruningPointUTXOSetAndBlock)
if err != nil {
return err
}
}
}
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) handleRequestPruningPointUTXOSetAndBlockMessage(
msgRequestPruningPointUTXOSetAndBlock *appmessage.MsgRequestPruningPointUTXOSetAndBlock) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetAndBlockFlow")
defer onEnd()
log.Debugf("Got request for PruningPointHash UTXOSet and Block")
err := flow.sendPruningPointBlock(msgRequestPruningPointUTXOSetAndBlock)
if err != nil {
return err
}
return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSetAndBlock)
}
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) waitForRequestPruningPointUTXOSetAndBlockMessages() (
*appmessage.MsgRequestPruningPointUTXOSetAndBlock, error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
msgRequestPruningPointUTXOSetAndBlock, ok := message.(*appmessage.MsgRequestPruningPointUTXOSetAndBlock)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSetAndBlock, message.Command())
}
return msgRequestPruningPointUTXOSetAndBlock, nil
}
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) sendPruningPointBlock(
msgRequestPruningPointUTXOSetAndBlock *appmessage.MsgRequestPruningPointUTXOSetAndBlock) error {
block, err := flow.Domain().Consensus().GetBlock(msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
if err != nil {
return err
}
log.Debugf("Retrieved pruning block %s", msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgIBDBlock(appmessage.DomainBlockToMsgBlock(block)))
}
func (flow *handleRequestPruningPointUTXOSetAndBlockFlow) sendPruningPointUTXOSet(
msgRequestPruningPointUTXOSetAndBlock *appmessage.MsgRequestPruningPointUTXOSetAndBlock) error {
// Send the UTXO set in `step`-sized chunks
const step = 1000
var fromOutpoint *externalapi.DomainOutpoint
chunksSent := 0
for {
pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs(
msgRequestPruningPointUTXOSetAndBlock.PruningPointHash, fromOutpoint, step)
if err != nil {
if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) {
return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint())
}
}
log.Debugf("Retrieved %d UTXOs for pruning block %s",
len(pruningPointUTXOs), msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
outpointAndUTXOEntryPairs :=
appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs)
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs))
if err != nil {
return err
}
if len(pruningPointUTXOs) < step {
log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSetAndBlock.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
}
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
chunksSent++
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
if chunksSent%ibdBatchSize == 0 {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
}
}
}
}

View File

@@ -1,37 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
type hashesQueueSet struct {
queue []*externalapi.DomainHash
set map[externalapi.DomainHash]struct{}
}
func (r *hashesQueueSet) enqueueIfNotExists(hash *externalapi.DomainHash) {
if _, ok := r.set[*hash]; ok {
return
}
r.queue = append(r.queue, hash)
r.set[*hash] = struct{}{}
}
func (r *hashesQueueSet) dequeue(numItems int) []*externalapi.DomainHash {
var hashes []*externalapi.DomainHash
hashes, r.queue = r.queue[:numItems], r.queue[numItems:]
for _, hash := range hashes {
delete(r.set, *hash)
}
return hashes
}
func (r *hashesQueueSet) len() int {
return len(r.queue)
}
func newHashesQueueSet() *hashesQueueSet {
return &hashesQueueSet{
set: make(map[externalapi.DomainHash]struct{}),
}
}

View File

@@ -1,8 +1,13 @@
package blockrelay
import (
"fmt"
"time"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
@@ -13,7 +18,7 @@ import (
)
func (flow *handleRelayInvsFlow) runIBDIfNotRunning(highHash *externalapi.DomainHash) error {
wasIBDNotRunning := flow.TrySetIBDRunning()
wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer)
if !wasIBDNotRunning {
log.Debugf("IBD is already running")
return nil
@@ -22,67 +27,24 @@ func (flow *handleRelayInvsFlow) runIBDIfNotRunning(highHash *externalapi.Domain
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
// Fetch all the headers if we don't already have them
log.Debugf("Downloading headers up to %s", highHash)
log.Debugf("Syncing headers up to %s", highHash)
err := flow.syncHeaders(highHash)
if err != nil {
return err
}
log.Debugf("Finished downloading headers up to %s", highHash)
log.Debugf("Finished syncing headers up to %s", highHash)
// Fetch the UTXO set if we don't already have it
log.Debugf("Checking if there's a new pruning point under %s", highHash)
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDRootHashMessage())
log.Debugf("Syncing the current pruning point UTXO set")
syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet()
if err != nil {
return err
}
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return err
if !syncedPruningPointUTXOSetSuccessfully {
log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync")
return nil
}
log.Debugf("Finished syncing the current pruning point UTXO set")
msgIBDRootHash, ok := message.(*appmessage.MsgIBDRootHashMessage)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDRootHash, message.Command())
}
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(msgIBDRootHash.Hash)
if err != nil {
return err
}
if blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", msgIBDRootHash.Hash)
isValid, err := flow.Domain().Consensus().IsValidPruningPoint(msgIBDRootHash.Hash)
if err != nil {
return err
}
if !isValid {
log.Infof("The suggested pruning point %s is incompatible to this node DAG, so stopping IBD with this"+
" peer", msgIBDRootHash.Hash)
return nil
}
log.Info("Fetching the pruning point UTXO set")
succeed, err := flow.fetchMissingUTXOSet(msgIBDRootHash.Hash)
if err != nil {
return err
}
if !succeed {
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
return nil
}
log.Info("Fetched the new pruning point UTXO set")
} else {
log.Debugf("Already has the block data of the new suggested pruning point %s", msgIBDRootHash.Hash)
}
// Fetch the block bodies
log.Debugf("Downloading block bodies up to %s", highHash)
err = flow.syncMissingBlockBodies(highHash)
if err != nil {
@@ -120,60 +82,104 @@ func (flow *handleRelayInvsFlow) syncHeaders(highHash *externalapi.DomainHash) e
}
func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(targetHash *externalapi.DomainHash) (*externalapi.DomainHash, error) {
lowHash := flow.Config().ActiveNetParams.GenesisHash
highHash, err := flow.Domain().Consensus().GetHeadersSelectedTip()
log.Debugf("Sending a blockLocator to %s between pruning point and headers selected tip", flow.peer)
blockLocator, err := flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
if err != nil {
return nil, err
}
for !lowHash.Equal(highHash) {
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
blockLocator, err := flow.Domain().Consensus().CreateBlockLocator(lowHash, highHash, 0)
for {
highestHash, err := flow.fetchHighestHash(targetHash, blockLocator)
if err != nil {
return nil, err
}
highestHashIndex, err := flow.findHighestHashIndex(highestHash, blockLocator)
if err != nil {
return nil, err
}
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
err = flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
if err != nil {
return nil, err
}
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, err
}
ibdBlockLocatorHighestHashMessage, ok := message.(*appmessage.MsgIBDBlockLocatorHighestHash)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
}
highestHash := ibdBlockLocatorHighestHashMessage.HighestHash
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
if highestHashIndex == 0 ||
// If the block locator contains only two adjacent chain blocks, the
// syncer will always find the same highest chain block, so to avoid
// an endless loop, we explicitly stop the loop in such situation.
(len(blockLocator) == 2 && highestHashIndex == 1) {
highestHashIndex := 0
highestHashIndexFound := false
for i, blockLocatorHash := range blockLocator {
if highestHash.Equal(blockLocatorHash) {
highestHashIndex = i
highestHashIndexFound = true
break
}
return highestHash, nil
}
if !highestHashIndexFound {
return nil, protocolerrors.Errorf(true, "highest hash %s "+
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
}
log.Debugf("The index of the highest hash in the original "+
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
locatorHashAboveHighestHash := highestHash
if highestHashIndex > 0 {
locatorHashAboveHighestHash = blockLocator[highestHashIndex-1]
}
highHash = locatorHashAboveHighestHash
lowHash = highestHash
blockLocator, err = flow.nextBlockLocator(highestHash, locatorHashAboveHighestHash)
if err != nil {
return nil, err
}
}
return highHash, nil
}
func (flow *handleRelayInvsFlow) nextBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
log.Debugf("Sending a blockLocator to %s between %s and %s", flow.peer, lowHash, highHash)
blockLocator, err := flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
if err != nil {
if errors.Is(model.ErrBlockNotInSelectedParentChain, err) {
return nil, err
}
log.Debugf("Headers selected parent chain moved since findHighestSharedBlockHash - " +
"restarting with full block locator")
blockLocator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator()
if err != nil {
return nil, err
}
}
return blockLocator, nil
}
func (flow *handleRelayInvsFlow) findHighestHashIndex(
highestHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (int, error) {
highestHashIndex := 0
highestHashIndexFound := false
for i, blockLocatorHash := range blockLocator {
if highestHash.Equal(blockLocatorHash) {
highestHashIndex = i
highestHashIndexFound = true
break
}
}
if !highestHashIndexFound {
return 0, protocolerrors.Errorf(true, "highest hash %s "+
"returned from peer %s is not in the original blockLocator", highestHash, flow.peer)
}
log.Debugf("The index of the highest hash in the original "+
"blockLocator sent to %s is %d", flow.peer, highestHashIndex)
return highestHashIndex, nil
}
func (flow *handleRelayInvsFlow) fetchHighestHash(
targetHash *externalapi.DomainHash, blockLocator externalapi.BlockLocator) (*externalapi.DomainHash, error) {
ibdBlockLocatorMessage := appmessage.NewMsgIBDBlockLocator(targetHash, blockLocator)
err := flow.outgoingRoute.Enqueue(ibdBlockLocatorMessage)
if err != nil {
return nil, err
}
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, err
}
ibdBlockLocatorHighestHashMessage, ok := message.(*appmessage.MsgIBDBlockLocatorHighestHash)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlockLocatorHighestHash, message.Command())
}
highestHash := ibdBlockLocatorHighestHashMessage.HighestHash
log.Debugf("The highest hash the peer %s knows is %s", flow.peer, highestHash)
return highestHash, nil
}
func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externalapi.DomainHash,
@@ -274,59 +280,186 @@ func (flow *handleRelayInvsFlow) processHeader(msgBlockHeader *appmessage.MsgBlo
if !errors.As(err, &ruleerrors.RuleError{}) {
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
}
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block %s during IBD", blockHash)
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping block header %s as it is a duplicate", blockHash)
} else {
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash)
}
}
return nil
}
func (flow *handleRelayInvsFlow) fetchMissingUTXOSet(ibdRootHash *externalapi.DomainHash) (succeed bool, err error) {
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDRootUTXOSetAndBlock(ibdRootHash))
func (flow *handleRelayInvsFlow) syncPruningPointUTXOSet() (bool, error) {
log.Debugf("Checking if a new pruning point is available")
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointHashMessage())
if err != nil {
return false, err
}
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return false, err
}
msgPruningPointHash, ok := message.(*appmessage.MsgPruningPointHashMessage)
if !ok {
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdPruningPointHash, message.Command())
}
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(msgPruningPointHash.Hash)
if err != nil {
return false, err
}
utxoSet, block, found, err := flow.receiveIBDRootUTXOSetAndBlock()
if !blockInfo.Exists {
return false, errors.Errorf("The pruning point header is missing")
}
if blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
log.Debugf("Already has the block data of the new suggested pruning point %s", msgPruningPointHash.Hash)
return true, nil
}
log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", msgPruningPointHash.Hash)
isValid, err := flow.Domain().Consensus().IsValidPruningPoint(msgPruningPointHash.Hash)
if err != nil {
return false, err
}
if !found {
if !isValid {
log.Infof("The suggested pruning point %s is incompatible to this node DAG, so stopping IBD with this"+
" peer", msgPruningPointHash.Hash)
return false, nil
}
err = flow.Domain().Consensus().ValidateAndInsertPruningPoint(block, utxoSet)
log.Info("Fetching the pruning point UTXO set")
succeed, err := flow.fetchMissingUTXOSet(msgPruningPointHash.Hash)
if err != nil {
return false, err
}
if !succeed {
log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.")
return false, nil
}
log.Info("Fetched the new pruning point UTXO set")
return true, nil
}
func (flow *handleRelayInvsFlow) fetchMissingUTXOSet(pruningPointHash *externalapi.DomainHash) (succeed bool, err error) {
defer func() {
err := flow.Domain().Consensus().ClearImportedPruningPointData()
if err != nil {
panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err))
}
}()
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSetAndBlock(pruningPointHash))
if err != nil {
return false, err
}
block, err := flow.receivePruningPointBlock()
if err != nil {
return false, err
}
receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(pruningPointHash)
if err != nil {
return false, err
}
if !receivedAll {
return false, nil
}
err = flow.Domain().Consensus().ValidateAndInsertImportedPruningPoint(block)
if err != nil {
// TODO: Find a better way to deal with finality conflicts.
if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) {
return false, nil
}
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with IBD root UTXO set")
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set")
}
return true, nil
}
func (flow *handleRelayInvsFlow) receiveIBDRootUTXOSetAndBlock() ([]byte, *externalapi.DomainBlock, bool, error) {
func (flow *handleRelayInvsFlow) receivePruningPointBlock() (*externalapi.DomainBlock, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "receivePruningPointBlock")
defer onEnd()
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, nil, false, err
return nil, err
}
switch message := message.(type) {
case *appmessage.MsgIBDRootUTXOSetAndBlock:
return message.UTXOSet,
appmessage.MsgBlockToDomainBlock(message.Block), true, nil
case *appmessage.MsgIBDRootNotFound:
return nil, nil, false, nil
default:
return nil, nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s, got: %s",
appmessage.CmdIBDRootUTXOSetAndBlock, appmessage.CmdIBDRootNotFound, message.Command(),
ibdBlockMessage, ok := message.(*appmessage.MsgIBDBlock)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
}
block := appmessage.MsgBlockToDomainBlock(ibdBlockMessage.MsgBlock)
log.Debugf("Received pruning point block %s", consensushashing.BlockHash(block))
return block, nil
}
func (flow *handleRelayInvsFlow) receiveAndInsertPruningPointUTXOSet(
pruningPointHash *externalapi.DomainHash) (bool, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet")
defer onEnd()
receivedChunkCount := 0
receivedUTXOCount := 0
for {
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return false, err
}
switch message := message.(type) {
case *appmessage.MsgPruningPointUTXOSetChunk:
receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs)
domainOutpointAndUTXOEntryPairs :=
appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs)
err := flow.Domain().Consensus().AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs)
if err != nil {
return false, err
}
receivedChunkCount++
if receivedChunkCount%ibdBatchSize == 0 {
log.Debugf("Received %d UTXO set chunks so far, totaling in %d UTXOs",
receivedChunkCount, receivedUTXOCount)
requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk()
err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage)
if err != nil {
return false, err
}
}
case *appmessage.MsgDonePruningPointUTXOSetChunks:
log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount)
return true, nil
case *appmessage.MsgUnexpectedPruningPoint:
log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+
"is no longer the pruning point of peer %s", pruningPointHash, flow.peer)
return false, nil
default:
return false, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk,
appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(),
)
}
}
}
@@ -369,6 +502,10 @@ func (flow *handleRelayInvsFlow) syncMissingBlockBodies(highHash *externalapi.Do
blockInsertionResult, err := flow.Domain().Consensus().ValidateAndInsertBlock(block)
if err != nil {
if errors.Is(err, ruleerrors.ErrDuplicateBlock) {
log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash)
continue
}
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
}
err = flow.OnNewBlock(block, blockInsertionResult)

View File

@@ -82,7 +82,7 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
err := context.AddToPeers(peer)
if err != nil {
if errors.As(err, &common.ErrPeerWithSameIDExists) {
if errors.Is(err, common.ErrPeerWithSameIDExists) {
return nil, protocolerrors.Wrap(false, err, "peer already exists")
}
return nil, err

View File

@@ -42,9 +42,11 @@ func ReceiveVersion(context HandleHandshakeContext, incomingRoute *router.Route,
}
func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveVersionFlow.start()")
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveVersionFlow.start")
defer onEnd()
log.Debugf("Starting receiveVersionFlow with %s", flow.peer.Address())
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err

View File

@@ -51,6 +51,8 @@ func (flow *sendVersionFlow) start() error {
onEnd := logger.LogAndMeasureExecutionTime(log, "sendVersionFlow.start")
defer onEnd()
log.Debugf("Starting sendVersionFlow with %s", flow.peer.Address())
// Version message.
localAddress := flow.AddressManager().BestLocalAddress(flow.peer.Connection().NetAddress())
subnetworkID := flow.Config().SubnetworkID

View File

@@ -0,0 +1,23 @@
package testing
import (
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/pkg/errors"
"strings"
"testing"
)
func checkFlowError(t *testing.T, err error, isProtocolError bool, shouldBan bool, contains string) {
pErr := &protocolerrors.ProtocolError{}
if errors.As(err, &pErr) != isProtocolError {
t.Fatalf("Unexepcted error %+v", err)
}
if pErr.ShouldBan != shouldBan {
t.Fatalf("Exepcted shouldBan %t but got %t", shouldBan, pErr.ShouldBan)
}
if !strings.Contains(err.Error(), contains) {
t.Fatalf("Unexpected error. Expected error to contain '%s' but got: %+v", contains, err)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,50 @@
package testing
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/addressexchange"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"testing"
"time"
)
type fakeReceiveAddressesContext struct{}
func (f fakeReceiveAddressesContext) AddressManager() *addressmanager.AddressManager {
return nil
}
func TestReceiveAddressesErrors(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, params *dagconfig.Params) {
incomingRoute := router.NewRoute()
outgoingRoute := router.NewRoute()
peer := peerpkg.New(nil)
errChan := make(chan error)
go func() {
errChan <- addressexchange.ReceiveAddresses(fakeReceiveAddressesContext{}, incomingRoute, outgoingRoute, peer)
}()
_, err := outgoingRoute.DequeueWithTimeout(time.Second)
if err != nil {
t.Fatalf("DequeueWithTimeout: %+v", err)
}
// Sending addressmanager.GetAddressesMax+1 addresses should trigger a ban
err = incomingRoute.Enqueue(appmessage.NewMsgAddresses(make([]*appmessage.NetAddress,
addressmanager.GetAddressesMax+1)))
if err != nil {
t.Fatalf("Enqueue: %+v", err)
}
select {
case err := <-errChan:
checkFlowError(t, err, true, true, "address count exceeded")
case <-time.After(time.Second):
t.Fatalf("timed out after %s", time.Second)
}
})
}

View File

@@ -159,7 +159,7 @@ func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransact
return err
}
if msgTxNotFound != nil {
if msgTxNotFound.ID != expectedID {
if !msgTxNotFound.ID.Equal(expectedID) {
return protocolerrors.Errorf(true, "expected transaction %s, but got %s",
expectedID, msgTxNotFound.ID)
}

View File

@@ -37,6 +37,12 @@ func (m *Manager) Peers() []*peerpkg.Peer {
return m.context.Peers()
}
// IBDPeer returns the current IBD peer or null if the node is not
// in IBD
func (m *Manager) IBDPeer() *peerpkg.Peer {
return m.context.IBDPeer()
}
// AddTransaction adds transaction to the mempool and propagates it.
func (m *Manager) AddTransaction(tx *externalapi.DomainTransaction) error {
return m.context.AddTransaction(tx)
@@ -73,3 +79,8 @@ func (m *Manager) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMemp
func (m *Manager) ShouldMine() (bool, error) {
return m.context.ShouldMine()
}
// IsIBDRunning returns true if IBD is currently marked as running
func (m *Manager) IsIBDRunning() bool {
return m.context.IsIBDRunning()
}

View File

@@ -44,6 +44,7 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
panic(err)
}
if isBanned {
log.Infof("Peer %s is banned. Disconnecting...", netConnection)
netConnection.Disconnect()
return
}
@@ -87,6 +88,7 @@ func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection
panic(err)
}
}
log.Debugf("Disconnecting from %s (reason: %s)", netConnection, protocolErr.Cause)
netConnection.Disconnect()
return
}
@@ -135,8 +137,9 @@ func (m *Manager) registerBlockRelayFlows(router *routerpkg.Router, isStopping *
return []*flow{
m.registerFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator, appmessage.CmdIBDBlock,
appmessage.CmdDoneHeaders, appmessage.CmdIBDRootNotFound, appmessage.CmdIBDRootUTXOSetAndBlock,
appmessage.CmdBlockHeaders, appmessage.CmdIBDRootHash, appmessage.CmdIBDBlockLocatorHighestHash},
appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk,
appmessage.CmdBlockHeaders, appmessage.CmdPruningPointHash, appmessage.CmdIBDBlockLocatorHighestHash,
appmessage.CmdDonePruningPointUTXOSetChunks},
isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRelayInvs(m.context, incomingRoute,
outgoingRoute, peer)
@@ -163,10 +166,11 @@ func (m *Manager) registerBlockRelayFlows(router *routerpkg.Router, isStopping *
},
),
m.registerFlow("HandleRequestIBDRootUTXOSetAndBlock", router,
[]appmessage.MessageCommand{appmessage.CmdRequestIBDRootUTXOSetAndBlock}, isStopping, errChan,
m.registerFlow("HandleRequestPruningPointUTXOSetAndBlock", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointUTXOSetAndBlock,
appmessage.CmdRequestNextPruningPointUTXOSetChunk}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleRequestIBDRootUTXOSetAndBlock(m.context, incomingRoute, outgoingRoute)
return blockrelay.HandleRequestPruningPointUTXOSetAndBlock(m.context, incomingRoute, outgoingRoute)
},
),
@@ -177,10 +181,10 @@ func (m *Manager) registerBlockRelayFlows(router *routerpkg.Router, isStopping *
},
),
m.registerFlow("HandleIBDRootHashRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestIBDRootHash}, isStopping, errChan,
m.registerFlow("HandlePruningPointHashRequests", router,
[]appmessage.MessageCommand{appmessage.CmdRequestPruningPointHash}, isStopping, errChan,
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
return blockrelay.HandleIBDRootHashRequests(m.context, incomingRoute, outgoingRoute)
return blockrelay.HandlePruningPointHashRequests(m.context, incomingRoute, outgoingRoute)
},
),

View File

@@ -9,7 +9,7 @@ import (
// ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage converts
// VirtualSelectedParentChainChanges to VirtualSelectedParentChainChangedNotificationMessage
func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage(
selectedParentChainChanges *externalapi.SelectedParentChainChanges) (*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
selectedParentChainChanges *externalapi.SelectedChainPath) (*appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) {
removedChainBlockHashes := make([]string, len(selectedParentChainChanges.Removed))
for i, removed := range selectedParentChainChanges.Removed {

View File

@@ -229,7 +229,8 @@ func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification(
for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses {
listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString
if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok {
notification.Added = ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)
notification.Added = append(notification.Added,
ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs)...)
}
if removedOutpoints, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok {
for outpoint := range removedOutpoints {

View File

@@ -4,6 +4,7 @@ import (
"encoding/hex"
"fmt"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/util/difficulty"
"math"
"math/big"
"strconv"
@@ -19,7 +20,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/pointers"
)
@@ -83,7 +83,7 @@ func (ctx *Context) GetDifficultyRatio(bits uint32, params *dagconfig.Params) fl
// converted back to a number. Note this is not the same as the proof of
// work limit directly because the block difficulty is encoded in a block
// with the compact form which loses precision.
target := util.CompactToBig(bits)
target := difficulty.CompactToBig(bits)
difficulty := new(big.Rat).SetFrac(params.PowMax, target)
diff, _ := difficulty.Float64()

View File

@@ -9,6 +9,7 @@ import (
// HandleGetConnectedPeerInfo handles the respectively named RPC command
func HandleGetConnectedPeerInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
peers := context.ProtocolManager.Peers()
ibdPeer := context.ProtocolManager.IBDPeer()
infos := make([]*appmessage.GetConnectedPeerInfoMessage, 0, len(peers))
for _, peer := range peers {
info := &appmessage.GetConnectedPeerInfoMessage{
@@ -20,6 +21,7 @@ func HandleGetConnectedPeerInfo(context *rpccontext.Context, _ *router.Router, _
UserAgent: peer.UserAgent(),
AdvertisedProtocolVersion: peer.AdvertisedProtocolVersion(),
TimeConnected: peer.TimeConnected().Milliseconds(),
IsIBDPeer: peer == ibdPeer,
}
infos = append(infos, info)
}

View File

@@ -3,12 +3,27 @@ package rpchandlers
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleGetMempoolEntries handles the respectively named RPC command
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
response := &appmessage.GetMempoolEntriesResponseMessage{}
response.Error = appmessage.RPCErrorf("not implemented")
return response, nil
transactions := context.Domain.MiningManager().AllTransactions()
entries := make([]*appmessage.MempoolEntry, 0, len(transactions))
for _, tx := range transactions {
transactionVerboseData, err := context.BuildTransactionVerboseData(
tx, consensushashing.TransactionID(tx).String(), nil, "")
if err != nil {
return nil, err
}
entries = append(entries, &appmessage.MempoolEntry{
Fee: tx.Fee,
TransactionVerboseData: transactionVerboseData,
})
}
return appmessage.NewGetMempoolEntriesResponseMessage(entries), nil
}

View File

@@ -16,14 +16,22 @@ func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request ap
msgBlock := submitBlockRequest.Block
domainBlock := appmessage.MsgBlockToDomainBlock(msgBlock)
if context.ProtocolManager.IsIBDRunning() {
return &appmessage.SubmitBlockResponseMessage{
Error: appmessage.RPCErrorf("Block not submitted - IBD is running"),
RejectReason: appmessage.RejectReasonIsInIBD,
}, nil
}
err := context.ProtocolManager.AddBlock(domainBlock)
if err != nil {
if !errors.As(err, &ruleerrors.RuleError{}) {
return nil, err
}
errorMessage := &appmessage.SubmitBlockResponseMessage{}
errorMessage.Error = appmessage.RPCErrorf("Block rejected. Reason: %s", err)
return errorMessage, nil
return &appmessage.SubmitBlockResponseMessage{
Error: appmessage.RPCErrorf("Block rejected. Reason: %s", err),
RejectReason: appmessage.RejectReasonBlockInvalid,
}, nil
}
log.Infof("Accepted block %s via submitBlock", consensushashing.BlockHash(domainBlock))

View File

@@ -13,14 +13,15 @@ test -z "$(go fmt ./...)"
golint -set_exit_status ./...
staticcheck -checks "\
SA4006,SA4008,SA4009,SA4010,SA5003,SA1004,SA1014,SA1021,SA1023,SA1024,SA1025,SA1026,SA1027,SA1028,SA2000,SA2001, \
SA2003,SA4000,SA4001,SA4003,SA4004,SA4011,SA4012,SA4013,SA4014,SA4015,SA4016,SA4017,SA4018,SA4019,SA4020,SA4021, \
SA4022,SA4023,SA5000,SA5002,SA5004,SA5005,SA5007,SA5008,SA5009,SA5010,SA5011,SA5012,SA6001,SA6002,SA9001,SA9002, \
SA9003,SA9004,SA9005,SA9006,ST1019" ./...
staticcheck -checks SA4006,SA4008,SA4009,SA4010,SA5003,SA1004,SA1014,SA1021,SA1023,SA1024,SA1025,SA1026,SA1027,SA1028,SA2000,SA2001,SA2003,SA4000,SA4001,SA4003,SA4004,SA4011,SA4012,SA4013,SA4014,SA4015,SA4016,SA4017,SA4018,SA4019,SA4020,SA4021,SA4022,SA4023,SA5000,SA5002,SA5004,SA5005,SA5007,SA5008,SA5009,SA5010,SA5011,SA5012,SA6001,SA6002,SA9001,SA9002,SA9003,SA9004,SA9005,SA9006,ST1019 ./...
go vet -composites=false $FLAGS ./...
go build $FLAGS -o kaspad .
go test $FLAGS ./...
if [ -n "${NO_PARALLEL}" ]
then
go test -parallel=1 $FLAGS ./...
else
go test $FLAGS ./...
fi

View File

@@ -1,104 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"github.com/pkg/errors"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
flags "github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/util"
)
type config struct {
Directory string `short:"d" long:"directory" description:"Directory to write certificate pair"`
Years int `short:"y" long:"years" description:"How many years a certificate is valid for"`
Organization string `short:"o" long:"org" description:"Organization in certificate"`
ExtraHosts []string `short:"H" long:"host" description:"Additional hosts/IPs to create certificate for"`
Force bool `short:"f" long:"force" description:"Force overwriting of any old certs and keys"`
}
func main() {
cfg := config{
Years: 10,
Organization: "gencerts",
}
parser := flags.NewParser(&cfg, flags.Default)
_, err := parser.Parse()
if err != nil {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
parser.WriteHelp(os.Stderr)
}
return
}
if cfg.Directory == "" {
var err error
cfg.Directory, err = os.Getwd()
if err != nil {
fmt.Fprintf(os.Stderr, "no directory specified and cannot get working directory\n")
os.Exit(1)
}
}
cfg.Directory = cleanAndExpandPath(cfg.Directory)
certFile := filepath.Join(cfg.Directory, "rpc.cert")
keyFile := filepath.Join(cfg.Directory, "rpc.key")
if !cfg.Force {
if fileExists(certFile) || fileExists(keyFile) {
fmt.Fprintf(os.Stderr, "%s: certificate and/or key files exist; use -f to force\n", cfg.Directory)
os.Exit(1)
}
}
validUntil := time.Now().Add(time.Duration(cfg.Years) * 365 * 24 * time.Hour)
cert, key, err := util.NewTLSCertPair(cfg.Organization, validUntil, cfg.ExtraHosts)
if err != nil {
fmt.Fprintf(os.Stderr, "cannot generate certificate pair: %s\n", err)
os.Exit(1)
}
// Write cert and key files.
if err = ioutil.WriteFile(certFile, cert, 0666); err != nil {
fmt.Fprintf(os.Stderr, "cannot write cert: %s\n", err)
os.Exit(1)
}
if err = ioutil.WriteFile(keyFile, key, 0600); err != nil {
os.Remove(certFile)
fmt.Fprintf(os.Stderr, "cannot write key: %s\n", err)
os.Exit(1)
}
}
// cleanAndExpandPath expands environement variables and leading ~ in the
// passed path, cleans the result, and returns it.
func cleanAndExpandPath(path string) string {
// Expand initial ~ to OS specific home directory.
if strings.HasPrefix(path, "~") {
appHomeDir := util.AppDataDir("gencerts", false)
homeDir := filepath.Dir(appHomeDir)
path = strings.Replace(path, "~", homeDir, 1)
}
// NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%,
// but they variables can still be expanded via POSIX-style $VARIABLE.
return filepath.Clean(os.ExpandEnv(path))
}
// filesExists reports whether the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}

53
cmd/kaspactl/README.md Normal file
View File

@@ -0,0 +1,53 @@
# kaspactl
kaspactl is an RPC client for kaspad
## Requirements
Go 1.14 or later.
## Installation
#### Build from Source
- Install Go according to the installation instructions here:
http://golang.org/doc/install
- Ensure Go was installed properly and is a supported version:
```bash
$ go version
```
- Run the following commands to obtain and install kaspad including all dependencies:
```bash
$ git clone https://github.com/kaspanet/kaspad
$ cd kaspad/cmd/kaspactl
$ go install .
```
- Kaspactl should now be installed in `$(go env GOPATH)/bin`. If you did not already add the bin directory to your
system path during Go installation, you are encouraged to do so now.
## Usage
The full kaspctl configuration options can be seen with:
```bash
$ kaspctl --help
```
But the minimum configuration needed to run it is:
```bash
$ kaspactl <REQUEST_JSON>
```
For example:
```
$ kaspactl '{"getBlockDagInfoRequest":{}}'
```
For a list of all available requests check out the [RPC documentation](infrastructure/network/netadapter/server/grpcserver/protowire/rpc.md)

View File

@@ -0,0 +1,171 @@
package main
import (
"reflect"
"strconv"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server/grpcserver/protowire"
"github.com/pkg/errors"
)
func parseCommand(args []string, commandDescs []*commandDescription) (*protowire.KaspadMessage, error) {
commandName, parameterStrings := args[0], args[1:]
var commandDesc *commandDescription
for _, cd := range commandDescs {
if cd.name == commandName {
commandDesc = cd
break
}
}
if commandDesc == nil {
return nil, errors.Errorf("unknown command: %s. Use --list-commands to list all commands", commandName)
}
if len(parameterStrings) != len(commandDesc.parameters) {
return nil, errors.Errorf("command '%s' expects %d parameters but got %d",
commandName, len(commandDesc.parameters), len(parameterStrings))
}
commandValue := reflect.New(unwrapCommandType(commandDesc.typeof))
for i, parameterDesc := range commandDesc.parameters {
parameterValue, err := stringToValue(parameterDesc, parameterStrings[i])
if err != nil {
return nil, err
}
setField(commandValue, parameterValue, parameterDesc)
}
return generateKaspadMessage(commandValue, commandDesc)
}
func setField(commandValue reflect.Value, parameterValue reflect.Value, parameterDesc *parameterDescription) {
parameterField := commandValue.Elem().FieldByName(parameterDesc.name)
parameterField.Set(parameterValue)
}
func stringToValue(parameterDesc *parameterDescription, valueStr string) (reflect.Value, error) {
var value interface{}
var err error
switch parameterDesc.typeof.Kind() {
case reflect.Bool:
value, err = strconv.ParseBool(valueStr)
if err != nil {
return reflect.Value{}, errors.WithStack(err)
}
case reflect.Int8:
var valueInt64 int64
valueInt64, err = strconv.ParseInt(valueStr, 10, 8)
if err != nil {
return reflect.Value{}, errors.WithStack(err)
}
value = int8(valueInt64)
case reflect.Int16:
var valueInt64 int64
valueInt64, err = strconv.ParseInt(valueStr, 10, 16)
if err != nil {
return reflect.Value{}, errors.WithStack(err)
}
value = int16(valueInt64)
case reflect.Int32:
var valueInt64 int64
valueInt64, err = strconv.ParseInt(valueStr, 10, 32)
if err != nil {
return reflect.Value{}, errors.WithStack(err)
}
value = int32(valueInt64)
case reflect.Int64:
value, err = strconv.ParseInt(valueStr, 10, 64)
if err != nil {
return reflect.Value{}, errors.WithStack(err)
}
case reflect.Uint8:
var valueUInt64 uint64
valueUInt64, err = strconv.ParseUint(valueStr, 10, 8)
if err != nil {
return reflect.Value{}, errors.WithStack(err)
}
value = uint8(valueUInt64)
case reflect.Uint16:
var valueUInt64 uint64
valueUInt64, err = strconv.ParseUint(valueStr, 10, 16)
if err != nil {
return reflect.Value{}, errors.WithStack(err)
}
value = uint16(valueUInt64)
case reflect.Uint32:
var valueUInt64 uint64
valueUInt64, err = strconv.ParseUint(valueStr, 10, 32)
if err != nil {
return reflect.Value{}, errors.WithStack(err)
}
value = uint32(valueUInt64)
case reflect.Uint64:
value, err = strconv.ParseUint(valueStr, 10, 64)
if err != nil {
return reflect.Value{}, errors.WithStack(err)
}
case reflect.Float32:
var valueFloat64 float64
valueFloat64, err = strconv.ParseFloat(valueStr, 32)
if err != nil {
return reflect.Value{}, errors.WithStack(err)
}
value = float32(valueFloat64)
case reflect.Float64:
value, err = strconv.ParseFloat(valueStr, 64)
if err != nil {
return reflect.Value{}, errors.WithStack(err)
}
case reflect.String:
value = valueStr
case reflect.Struct:
pointer := reflect.New(parameterDesc.typeof) // create pointer to this type
fieldInterface := pointer.Interface().(proto.Message)
err := protojson.Unmarshal([]byte(valueStr), fieldInterface)
if err != nil {
return reflect.Value{}, errors.WithStack(err)
}
// Unpointer the value once it's ready
fieldInterfaceValue := reflect.ValueOf(fieldInterface)
value = fieldInterfaceValue.Elem().Interface()
case reflect.Ptr:
dummyParameterDesc := &parameterDescription{
name: "valuePointedTo",
typeof: parameterDesc.typeof.Elem(),
}
valuePointedTo, err := stringToValue(dummyParameterDesc, valueStr)
if err != nil {
return reflect.Value{}, errors.WithStack(err)
}
pointer := pointerToValue(valuePointedTo)
value = pointer.Interface()
// Int and uint are not supported because their size is platform-dependant
case reflect.Int,
reflect.Uint,
// Other types are not supported simply because they are not used in any command right now
// but support can be added if and when needed
reflect.Slice,
reflect.Func,
reflect.Interface,
reflect.Map,
reflect.UnsafePointer,
reflect.Invalid,
reflect.Uintptr,
reflect.Complex64,
reflect.Complex128,
reflect.Array,
reflect.Chan:
fallthrough
default:
return reflect.Value{},
errors.Errorf("Unsupported type '%s' for parameter '%s'", parameterDesc.typeof.Kind(), parameterDesc.name)
}
return reflect.ValueOf(value), nil
}

87
cmd/kaspactl/commands.go Normal file
View File

@@ -0,0 +1,87 @@
package main
import (
"fmt"
"reflect"
"strings"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server/grpcserver/protowire"
)
var commandTypes = []reflect.Type{
reflect.TypeOf(protowire.KaspadMessage_AddPeerRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetConnectedPeerInfoRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetPeerAddressesRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetCurrentNetworkRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetBlockRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetBlocksRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetHeadersRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetBlockCountRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetBlockDagInfoRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetSelectedTipHashRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetVirtualSelectedParentBlueScoreRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetVirtualSelectedParentChainFromBlockRequest{}),
reflect.TypeOf(protowire.KaspadMessage_ResolveFinalityConflictRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetBlockTemplateRequest{}),
reflect.TypeOf(protowire.KaspadMessage_SubmitBlockRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetMempoolEntryRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetMempoolEntriesRequest{}),
reflect.TypeOf(protowire.KaspadMessage_SubmitTransactionRequest{}),
reflect.TypeOf(protowire.KaspadMessage_GetUtxosByAddressesRequest{}),
}
type commandDescription struct {
name string
parameters []*parameterDescription
typeof reflect.Type
}
type parameterDescription struct {
name string
typeof reflect.Type
}
func commandDescriptions() []*commandDescription {
commandDescriptions := make([]*commandDescription, len(commandTypes))
for i, commandTypeWrapped := range commandTypes {
commandType := unwrapCommandType(commandTypeWrapped)
name := strings.TrimSuffix(commandType.Name(), "RequestMessage")
numFields := commandType.NumField()
var parameters []*parameterDescription
for i := 0; i < numFields; i++ {
field := commandType.Field(i)
if !isFieldExported(field) {
continue
}
parameters = append(parameters, &parameterDescription{
name: field.Name,
typeof: field.Type,
})
}
commandDescriptions[i] = &commandDescription{
name: name,
parameters: parameters,
typeof: commandTypeWrapped,
}
}
return commandDescriptions
}
func (cd *commandDescription) help() string {
sb := &strings.Builder{}
sb.WriteString(cd.name)
for _, parameter := range cd.parameters {
_, _ = fmt.Fprintf(sb, " [%s]", parameter.name)
}
return sb.String()
}

View File

@@ -12,9 +12,11 @@ var (
)
type configFlags struct {
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
Timeout uint64 `short:"t" long:"timeout" description:"Timeout for the request (in seconds)"`
RequestJSON string `description:"The request in JSON format"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
Timeout uint64 `short:"t" long:"timeout" description:"Timeout for the request (in seconds)"`
RequestJSON string `short:"j" long:"json" description:"The request in JSON format"`
ListCommands bool `short:"l" long:"list-commands" description:"List all commands and exit"`
CommandAndParameters []string
config.NetworkFlags
}
@@ -23,21 +25,29 @@ func parseConfig() (*configFlags, error) {
RPCServer: defaultRPCServer,
Timeout: defaultTimeout,
}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
args, err := parser.Parse()
parser := flags.NewParser(cfg, flags.HelpFlag)
parser.Usage = "kaspactl [OPTIONS] [COMMAND] [COMMAND PARAMETERS].\n\nCommand can be supplied only if --json is not used." +
"\n\nUse `kaspactl --list-commands` to get a list of all commands and their parameters"
remainingArgs, err := parser.Parse()
if err != nil {
return nil, err
}
if cfg.ListCommands {
return cfg, nil
}
err = cfg.ResolveNetwork(parser)
if err != nil {
return nil, err
}
if len(args) != 1 {
return nil, errors.New("the last parameter must be the request in JSON format")
cfg.CommandAndParameters = remainingArgs
if len(cfg.CommandAndParameters) == 0 && cfg.RequestJSON == "" ||
len(cfg.CommandAndParameters) > 0 && cfg.RequestJSON != "" {
return nil, errors.New("Exactly one of --json or a command must be specified")
}
cfg.RequestJSON = args[0]
return cfg, nil
}

View File

@@ -2,9 +2,14 @@ package main
import (
"fmt"
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient/grpcclient"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server/grpcserver/protowire"
"os"
"time"
"github.com/pkg/errors"
"google.golang.org/protobuf/encoding/protojson"
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient/grpcclient"
)
func main() {
@@ -12,6 +17,10 @@ func main() {
if err != nil {
printErrorAndExit(fmt.Sprintf("error parsing command-line arguments: %s", err))
}
if cfg.ListCommands {
printAllCommands()
return
}
rpcAddress, err := cfg.NetParams().NormalizeRPCServerAddress(cfg.RPCServer)
if err != nil {
@@ -23,28 +32,69 @@ func main() {
}
defer client.Disconnect()
var responseString string
done := make(chan struct{})
responseChan := make(chan string)
go func() {
requestString := cfg.RequestJSON
var err error
responseString, err = client.PostJSON(requestString)
if err != nil {
printErrorAndExit(fmt.Sprintf("error posting the request to the RPC server: %s", err))
}
done <- struct{}{}
}()
if cfg.RequestJSON != "" {
go postJSON(cfg, client, responseChan)
} else {
go postCommand(cfg, client, responseChan)
}
timeout := time.Duration(cfg.Timeout) * time.Second
select {
case <-done:
fmt.Println(responseString)
case responseString := <-responseChan:
prettyResponseString := prettifyResponse(responseString)
fmt.Println(prettyResponseString)
case <-time.After(timeout):
printErrorAndExit(fmt.Sprintf("timeout of %s has been exceeded", timeout))
}
}
func printAllCommands() {
requestDescs := commandDescriptions()
for _, requestDesc := range requestDescs {
fmt.Printf("\t%s\n", requestDesc.help())
}
}
func postCommand(cfg *configFlags, client *grpcclient.GRPCClient, responseChan chan string) {
message, err := parseCommand(cfg.CommandAndParameters, commandDescriptions())
if err != nil {
printErrorAndExit(fmt.Sprintf("error parsing command: %s", err))
}
response, err := client.Post(message)
if err != nil {
printErrorAndExit(fmt.Sprintf("error posting the request to the RPC server: %s", err))
}
responseBytes, err := protojson.Marshal(response)
if err != nil {
printErrorAndExit(errors.Wrapf(err, "error parsing the response from the RPC server").Error())
}
responseChan <- string(responseBytes)
}
func postJSON(cfg *configFlags, client *grpcclient.GRPCClient, doneChan chan string) {
responseString, err := client.PostJSON(cfg.RequestJSON)
if err != nil {
printErrorAndExit(fmt.Sprintf("error posting the request to the RPC server: %s", err))
}
doneChan <- responseString
}
func prettifyResponse(response string) string {
kaspadMessage := &protowire.KaspadMessage{}
err := protojson.Unmarshal([]byte(response), kaspadMessage)
if err != nil {
printErrorAndExit(fmt.Sprintf("error parsing the response from the RPC server: %s", err))
}
marshalOptions := &protojson.MarshalOptions{}
marshalOptions.Indent = " "
return marshalOptions.Format(kaspadMessage)
}
func printErrorAndExit(message string) {
fmt.Fprintf(os.Stderr, fmt.Sprintf("%s\n", message))
os.Exit(1)

View File

@@ -0,0 +1,46 @@
package main
import (
"reflect"
"unicode"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server/grpcserver/protowire"
)
// protobuf generates the command types with two types:
// 1. A concrete type that holds the fields of the command bearing the name of the command with `RequestMessage` as suffix
// 2. A wrapper that implements isKaspadMessage_Payload, having a single field pointing to the concrete command
// bearing the name of the command with `KaspadMessage_` prefix and `Request` suffix
// unwrapCommandType converts a reflect.Type signifying a wrapper type into the concrete request type
func unwrapCommandType(requestTypeWrapped reflect.Type) reflect.Type {
return requestTypeWrapped.Field(0).Type.Elem()
}
// unwrapCommandValue convertes a reflect.Value of a pointer to a wrapped command into a concrete command
func unwrapCommandValue(commandValueWrapped reflect.Value) reflect.Value {
return commandValueWrapped.Elem().Field(0)
}
// isFieldExported returns true if the given field is exported.
// Currently the only way to check this is to check if the first rune in the field's name is upper case.
func isFieldExported(field reflect.StructField) bool {
return unicode.IsUpper(rune(field.Name[0]))
}
// generateKaspadMessage generates a wrapped KaspadMessage with the given `commandValue`
func generateKaspadMessage(commandValue reflect.Value, commandDesc *commandDescription) (*protowire.KaspadMessage, error) {
commandWrapper := reflect.New(commandDesc.typeof)
unwrapCommandValue(commandWrapper).Set(commandValue)
kaspadMessage := reflect.New(reflect.TypeOf(protowire.KaspadMessage{}))
kaspadMessage.Elem().FieldByName("Payload").Set(commandWrapper)
return kaspadMessage.Interface().(*protowire.KaspadMessage), nil
}
// pointerToValue returns a reflect.Value that represents a pointer to the given value
func pointerToValue(valuePointedTo reflect.Value) reflect.Value {
pointer := reflect.New(valuePointedTo.Type())
pointer.Elem().Set(valuePointedTo)
return pointer
}

45
cmd/kaspaminer/README.md Normal file
View File

@@ -0,0 +1,45 @@
# kaspaminer
Kaspaminer is a CPU-based miner for kaspad
## Requirements
Go 1.14 or later.
## Installation
#### Build from Source
- Install Go according to the installation instructions here:
http://golang.org/doc/install
- Ensure Go was installed properly and is a supported version:
```bash
$ go version
```
- Run the following commands to obtain and install kaspad including all dependencies:
```bash
$ git clone https://github.com/kaspanet/kaspad
$ cd kaspad/cmd/kaspaminer
$ go install .
```
- Kapaminer should now be installed in `$(go env GOPATH)/bin`. If you did
not already add the bin directory to your system path during Go installation,
you are encouraged to do so now.
## Usage
The full kaspaminer configuration options can be seen with:
```bash
$ kaspaminer --help
```
But the minimum configuration needed to run it is:
```bash
$ kaspaminer --miningaddr=<YOUR_MINING_ADDRESS>
```

View File

@@ -30,12 +30,13 @@ var (
)
type configFlags struct {
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
MiningAddr string `long:"miningaddr" description:"Address to mine to"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
TargetBlocksPerSecond float64 `long:"target-blocks-per-second" description:"Sets a maximum block rate. This flag is for debugging purposes."`
config.NetworkFlags
}

View File

@@ -48,7 +48,7 @@ func main() {
doneChan := make(chan struct{})
spawn("mineLoop", func() {
err = mineLoop(client, cfg.NumberOfBlocks, cfg.MineWhenNotSynced, miningAddr)
err = mineLoop(client, cfg.NumberOfBlocks, cfg.TargetBlocksPerSecond, cfg.MineWhenNotSynced, miningAddr)
if err != nil {
panic(errors.Wrap(err, "error in mine loop"))
}

View File

@@ -2,6 +2,7 @@ package main
import (
nativeerrors "errors"
"github.com/kaspanet/kaspad/util/difficulty"
"math/rand"
"sync/atomic"
"time"
@@ -19,13 +20,13 @@ import (
"github.com/pkg/errors"
)
var random = rand.New(rand.NewSource(time.Now().UnixNano()))
var hashesTried uint64
const logHashRateInterval = 10 * time.Second
func mineLoop(client *minerClient, numberOfBlocks uint64, mineWhenNotSynced bool,
func mineLoop(client *minerClient, numberOfBlocks uint64, targetBlocksPerSecond float64, mineWhenNotSynced bool,
miningAddr util.Address) error {
rand.Seed(time.Now().UnixNano()) // Seed the global concurrent-safe random source.
errChan := make(chan error)
@@ -33,7 +34,18 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, mineWhenNotSynced bool
doneChan := make(chan struct{})
spawn("mineLoop-internalLoop", func() {
const windowSize = 10
var expectedDurationForWindow time.Duration
var windowExpectedEndTime time.Time
hasBlockRateTarget := targetBlocksPerSecond != 0
if hasBlockRateTarget {
expectedDurationForWindow = time.Duration(float64(windowSize)/targetBlocksPerSecond) * time.Second
windowExpectedEndTime = time.Now().Add(expectedDurationForWindow)
}
blockInWindowIndex := 0
for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ {
foundBlock := make(chan *externalapi.DomainBlock)
mineNextBlock(client, miningAddr, foundBlock, mineWhenNotSynced, templateStopChan, errChan)
block := <-foundBlock
@@ -42,6 +54,21 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, mineWhenNotSynced bool
if err != nil {
errChan <- err
}
if hasBlockRateTarget {
blockInWindowIndex++
if blockInWindowIndex == windowSize-1 {
deviation := windowExpectedEndTime.Sub(time.Now())
if deviation > 0 {
log.Infof("Finished to mine %d blocks %s earlier than expected. Sleeping %s to compensate",
windowSize, deviation, deviation)
time.Sleep(deviation)
}
blockInWindowIndex = 0
windowExpectedEndTime = time.Now().Add(expectedDurationForWindow)
}
}
}
doneChan <- struct{}{}
})
@@ -60,7 +87,7 @@ func logHashRate() {
spawn("logHashRate", func() {
lastCheck := time.Now()
for range time.Tick(logHashRateInterval) {
currentHashesTried := hashesTried
currentHashesTried := atomic.LoadUint64(&hashesTried)
currentTime := time.Now()
kiloHashesTried := float64(currentHashesTried) / 1000.0
hashRate := kiloHashesTried / currentTime.Sub(lastCheck).Seconds()
@@ -88,17 +115,21 @@ func handleFoundBlock(client *minerClient, block *externalapi.DomainBlock) error
blockHash := consensushashing.BlockHash(block)
log.Infof("Found block %s with parents %s. Submitting to %s", blockHash, block.Header.ParentHashes(), client.Address())
err := client.SubmitBlock(block)
rejectReason, err := client.SubmitBlock(block)
if err != nil {
if rejectReason == appmessage.RejectReasonIsInIBD {
log.Warnf("Block %s was rejected because the node is in IBD", blockHash)
return nil
}
return errors.Errorf("Error submitting block %s to %s: %s", blockHash, client.Address(), err)
}
return nil
}
func solveBlock(block *externalapi.DomainBlock, stopChan chan struct{}, foundBlock chan *externalapi.DomainBlock) {
targetDifficulty := util.CompactToBig(block.Header.Bits())
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
headerForMining := block.Header.ToMutable()
initialNonce := random.Uint64()
initialNonce := rand.Uint64() // Use the global concurrent-safe random source.
for i := initialNonce; i != initialNonce-1; i++ {
select {
case <-stopChan:
@@ -160,8 +191,9 @@ func solveLoop(newTemplateChan chan *appmessage.GetBlockTemplateResponseMessage,
stopOldTemplateSolving = make(chan struct{})
block := appmessage.MsgBlockToDomainBlock(template.MsgBlock)
stopOldTemplateSolvingCopy := stopOldTemplateSolving
spawn("solveBlock", func() {
solveBlock(block, stopOldTemplateSolving, foundBlock)
solveBlock(block, stopOldTemplateSolvingCopy, foundBlock)
})
}
if stopOldTemplateSolving != nil {

View File

@@ -8,6 +8,35 @@ WALLET
`wallet` is a simple, no-frills wallet software operated via the command line.\
It is capable of generating wallet key-pairs, printing a wallet's current balance, and sending simple transactions.
## Requirements
Go 1.14 or later.
## Installation
#### Build from Source
- Install Go according to the installation instructions here:
http://golang.org/doc/install
- Ensure Go was installed properly and is a supported version:
```bash
$ go version
```
- Run the following commands to obtain and install kaspad including all dependencies:
```bash
$ git clone https://github.com/kaspanet/kaspad
$ cd kaspad/cmd/wallet
$ go install .
```
- Wallet should now be installed in `$(go env GOPATH)/bin`. If you did
not already add the bin directory to your system path during Go installation,
you are encouraged to do so now.
Usage
-----

View File

@@ -18,7 +18,7 @@ COPY go.sum .
COPY . .
RUN ./build_and_test.sh
RUN NO_PARALLEL=1 ./build_and_test.sh
# --- multistage docker build: stage #2: runtime image
FROM alpine

View File

@@ -1,9 +1,9 @@
package consensus
import (
"github.com/kaspanet/kaspad/infrastructure/db/database"
"sync"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
@@ -32,19 +32,20 @@ type consensus struct {
reachabilityManager model.ReachabilityManager
finalityManager model.FinalityManager
acceptanceDataStore model.AcceptanceDataStore
blockStore model.BlockStore
blockHeaderStore model.BlockHeaderStore
pruningStore model.PruningStore
ghostdagDataStore model.GHOSTDAGDataStore
blockRelationStore model.BlockRelationStore
blockStatusStore model.BlockStatusStore
consensusStateStore model.ConsensusStateStore
headersSelectedTipStore model.HeaderSelectedTipStore
multisetStore model.MultisetStore
reachabilityDataStore model.ReachabilityDataStore
utxoDiffStore model.UTXODiffStore
finalityStore model.FinalityStore
acceptanceDataStore model.AcceptanceDataStore
blockStore model.BlockStore
blockHeaderStore model.BlockHeaderStore
pruningStore model.PruningStore
ghostdagDataStore model.GHOSTDAGDataStore
blockRelationStore model.BlockRelationStore
blockStatusStore model.BlockStatusStore
consensusStateStore model.ConsensusStateStore
headersSelectedTipStore model.HeaderSelectedTipStore
multisetStore model.MultisetStore
reachabilityDataStore model.ReachabilityDataStore
utxoDiffStore model.UTXODiffStore
finalityStore model.FinalityStore
headersSelectedChainStore model.HeadersSelectedChainStore
}
// BuildBlock builds a block over the current state, with the transactions
@@ -198,7 +199,9 @@ func (s *consensus) GetMissingBlockBodyHashes(highHash *externalapi.DomainHash)
return s.syncManager.GetMissingBlockBodyHashes(highHash)
}
func (s *consensus) GetPruningPointUTXOSet(expectedPruningPointHash *externalapi.DomainHash) ([]byte, error) {
func (s *consensus) GetPruningPointUTXOs(expectedPruningPointHash *externalapi.DomainHash,
fromOutpoint *externalapi.DomainOutpoint, limit int) ([]*externalapi.OutpointAndUTXOEntryPair, error) {
s.lock.Lock()
defer s.lock.Unlock()
@@ -213,11 +216,11 @@ func (s *consensus) GetPruningPointUTXOSet(expectedPruningPointHash *externalapi
pruningPointHash)
}
serializedUTXOSet, err := s.pruningStore.PruningPointSerializedUTXOSet(s.databaseContext)
pruningPointUTXOs, err := s.pruningStore.PruningPointUTXOs(s.databaseContext, fromOutpoint, limit)
if err != nil {
return nil, err
}
return serializedUTXOSet, nil
return pruningPointUTXOs, nil
}
func (s *consensus) PruningPoint() (*externalapi.DomainHash, error) {
@@ -227,11 +230,25 @@ func (s *consensus) PruningPoint() (*externalapi.DomainHash, error) {
return s.pruningStore.PruningPoint(s.databaseContext)
}
func (s *consensus) ValidateAndInsertPruningPoint(newPruningPoint *externalapi.DomainBlock, serializedUTXOSet []byte) error {
func (s *consensus) ClearImportedPruningPointData() error {
s.lock.Lock()
defer s.lock.Unlock()
return s.blockProcessor.ValidateAndInsertPruningPoint(newPruningPoint, serializedUTXOSet)
return s.pruningManager.ClearImportedPruningPointData()
}
func (s *consensus) AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error {
s.lock.Lock()
defer s.lock.Unlock()
return s.pruningManager.AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs)
}
func (s *consensus) ValidateAndInsertImportedPruningPoint(newPruningPoint *externalapi.DomainBlock) error {
s.lock.Lock()
defer s.lock.Unlock()
return s.blockProcessor.ValidateAndInsertImportedPruningPoint(newPruningPoint)
}
func (s *consensus) GetVirtualSelectedParent() (*externalapi.DomainHash, error) {
@@ -297,15 +314,29 @@ func (s *consensus) CreateBlockLocator(lowHash, highHash *externalapi.DomainHash
return s.syncManager.CreateBlockLocator(lowHash, highHash, limit)
}
func (s *consensus) FindNextBlockLocatorBoundaries(blockLocator externalapi.BlockLocator) (lowHash, highHash *externalapi.DomainHash, err error) {
func (s *consensus) CreateFullHeadersSelectedChainBlockLocator() (externalapi.BlockLocator, error) {
s.lock.Lock()
defer s.lock.Unlock()
if len(blockLocator) == 0 {
return nil, nil, errors.Errorf("empty block locator")
lowHash, err := s.pruningStore.PruningPoint(s.databaseContext)
if err != nil {
return nil, err
}
return s.syncManager.FindNextBlockLocatorBoundaries(blockLocator)
highHash, err := s.headersSelectedTipStore.HeadersSelectedTip(s.databaseContext)
if err != nil {
return nil, err
}
return s.syncManager.CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
}
func (s *consensus) CreateHeadersSelectedChainBlockLocator(lowHash,
highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
s.lock.Lock()
defer s.lock.Unlock()
return s.syncManager.CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
}
func (s *consensus) GetSyncInfo() (*externalapi.SyncInfo, error) {
@@ -327,7 +358,7 @@ func (s *consensus) IsValidPruningPoint(blockHash *externalapi.DomainHash) (bool
return s.pruningManager.IsValidPruningPoint(blockHash)
}
func (s *consensus) GetVirtualSelectedParentChainFromBlock(blockHash *externalapi.DomainHash) (*externalapi.SelectedParentChainChanges, error) {
func (s *consensus) GetVirtualSelectedParentChainFromBlock(blockHash *externalapi.DomainHash) (*externalapi.SelectedChainPath, error) {
s.lock.Lock()
defer s.lock.Unlock()

View File

@@ -14,7 +14,7 @@ import (
func TestConsensus_GetBlockInfo(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, params *dagconfig.Params) {
factory := NewFactory()
consensus, teardown, err := factory.NewTestConsensus(params, "TestConsensus_GetBlockInfo")
consensus, teardown, err := factory.NewTestConsensus(params, false, "TestConsensus_GetBlockInfo")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}

View File

@@ -0,0 +1,13 @@
package binaryserialization
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// SerializeHash serializes hash to a slice of bytes
func SerializeHash(hash *externalapi.DomainHash) []byte {
return hash.ByteSlice()
}
// DeserializeHash a slice of bytes to a hash
func DeserializeHash(hashBytes []byte) (*externalapi.DomainHash, error) {
return externalapi.NewDomainHashFromByteSlice(hashBytes)
}

View File

@@ -0,0 +1,15 @@
package binaryserialization
import "encoding/binary"
// SerializeChainBlockIndex serializes chain block index
func SerializeChainBlockIndex(index uint64) []byte {
var keyBytes [8]byte
binary.LittleEndian.PutUint64(keyBytes[:], index)
return keyBytes[:]
}
// DeserializeChainBlockIndex deserializes chain block index to uint64
func DeserializeChainBlockIndex(indexBytes []byte) uint64 {
return binary.LittleEndian.Uint64(indexBytes)
}

View File

@@ -0,0 +1,52 @@
package binaryserialization
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/util/binaryserializer"
"io"
)
// SerializeUTXOCollection serializes the given utxoCollection into the given writer
func SerializeUTXOCollection(writer io.Writer, utxoCollection model.UTXOCollection) error {
length := uint64(utxoCollection.Len())
err := binaryserializer.PutUint64(writer, length)
if err != nil {
return err
}
utxoIterator := utxoCollection.Iterator()
for ok := utxoIterator.First(); ok; ok = utxoIterator.Next() {
outpoint, utxoEntry, err := utxoIterator.Get()
if err != nil {
return err
}
err = utxo.SerializeUTXOIntoWriter(writer, utxoEntry, outpoint)
if err != nil {
return err
}
}
return nil
}
// DeserializeUTXOCollection deserializes a utxoCollection out of the given reader
func DeserializeUTXOCollection(reader io.Reader) (model.UTXOCollection, error) {
length, err := binaryserializer.Uint64(reader)
if err != nil {
return nil, err
}
utxoMap := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry, length)
for i := uint64(0); i < length; i++ {
utxoEntry, outpoint, err := utxo.DeserializeUTXOOutOfReader(reader)
if err != nil {
return nil, err
}
utxoMap[*outpoint] = utxoEntry
}
utxoCollection := utxo.NewUTXOCollection(utxoMap)
return utxoCollection, nil
}

View File

@@ -0,0 +1,29 @@
package binaryserialization
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"io"
)
// SerializeUTXODiff serializes the given utxoDiff into the given writer
func SerializeUTXODiff(writer io.Writer, utxoDiff model.UTXODiff) error {
err := SerializeUTXOCollection(writer, utxoDiff.ToAdd())
if err != nil {
return err
}
return SerializeUTXOCollection(writer, utxoDiff.ToRemove())
}
// DeserializeUTXODiff deserializes a utxoDiff out of the given reader
func DeserializeUTXODiff(reader io.Reader) (model.UTXODiff, error) {
toAdd, err := DeserializeUTXOCollection(reader)
if err != nil {
return nil, err
}
toRemove, err := DeserializeUTXOCollection(reader)
if err != nil {
return nil, err
}
return utxo.NewUTXODiffFromCollections(toAdd, toRemove)
}

View File

@@ -6,9 +6,18 @@ import (
)
func dbBucketToDatabaseBucket(bucket model.DBBucket) *database.Bucket {
if bucket, ok := bucket.(dbBucket); ok {
return bucket.bucket
}
// This assumes that MakeBucket(src).Path() == src. which is not promised anywhere.
return database.MakeBucket(bucket.Path())
}
// MakeBucket creates a new Bucket using the given path of buckets.
func MakeBucket(path []byte) model.DBBucket {
return dbBucket{bucket: database.MakeBucket(path)}
}
type dbBucket struct {
bucket *database.Bucket
}
@@ -26,5 +35,5 @@ func (d dbBucket) Path() []byte {
}
func newDBBucket(bucket *database.Bucket) model.DBBucket {
return &dbBucket{bucket: bucket}
return dbBucket{bucket: bucket}
}

View File

@@ -17,6 +17,14 @@ func (dbw *dbManager) Has(key model.DBKey) (bool, error) {
return dbw.db.Has(dbKeyToDatabaseKey(key))
}
func (dbw *dbManager) Put(key model.DBKey, value []byte) error {
return dbw.db.Put(dbKeyToDatabaseKey(key), value)
}
func (dbw *dbManager) Delete(key model.DBKey) error {
return dbw.db.Delete(dbKeyToDatabaseKey(key))
}
func (dbw *dbManager) Cursor(bucket model.DBBucket) (model.DBCursor, error) {
cursor, err := dbw.db.Cursor(dbBucketToDatabaseBucket(bucket))
if err != nil {

View File

@@ -0,0 +1,14 @@
package database
import (
"github.com/kaspanet/kaspad/infrastructure/db/database"
)
// ErrNotFound denotes that the requested item was not
// found in the database.
var ErrNotFound = database.ErrNotFound
// IsNotFoundError checks whether an error is an ErrNotFound.
func IsNotFoundError(err error) bool {
return database.IsNotFoundError(err)
}

View File

@@ -6,6 +6,12 @@ import (
)
func dbKeyToDatabaseKey(key model.DBKey) *database.Key {
if key, ok := key.(dbKey); ok {
return key.key
}
if key, ok := key.(*dbKey); ok {
return key.key
}
return dbBucketToDatabaseBucket(key.Bucket()).Key(key.Suffix())
}
@@ -26,5 +32,5 @@ func (d dbKey) Suffix() []byte {
}
func newDBKey(key *database.Key) model.DBKey {
return &dbKey{key: key}
return dbKey{key: key}
}

View File

@@ -10,7 +10,7 @@ func utxoCollectionToDBUTXOCollection(utxoCollection model.UTXOCollection) ([]*D
items := make([]*DbUtxoCollectionItem, utxoCollection.Len())
i := 0
utxoIterator := utxoCollection.Iterator()
for utxoIterator.Next() {
for ok := utxoIterator.First(); ok; ok = utxoIterator.Next() {
outpoint, entry, err := utxoIterator.Get()
if err != nil {
return nil, err

View File

@@ -9,6 +9,22 @@ type dbTransaction struct {
transaction database.Transaction
}
func (d *dbTransaction) Get(key model.DBKey) ([]byte, error) {
return d.transaction.Get(dbKeyToDatabaseKey(key))
}
func (d *dbTransaction) Has(key model.DBKey) (bool, error) {
return d.transaction.Has(dbKeyToDatabaseKey(key))
}
func (d *dbTransaction) Cursor(bucket model.DBBucket) (model.DBCursor, error) {
cursor, err := d.transaction.Cursor(dbBucketToDatabaseBucket(bucket))
if err != nil {
return nil, err
}
return newDBCursor(cursor), nil
}
func (d *dbTransaction) Put(key model.DBKey, value []byte) error {
return d.transaction.Put(dbKeyToDatabaseKey(key), value)
}

View File

@@ -1,15 +1,15 @@
package acceptancedatastore
import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"google.golang.org/protobuf/proto"
)
var bucket = dbkeys.MakeBucket([]byte("acceptance-data"))
var bucket = database.MakeBucket([]byte("acceptance-data"))
// acceptanceDataStore represents a store of AcceptanceData
type acceptanceDataStore struct {

View File

@@ -2,15 +2,15 @@ package blockheaderstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("block-headers"))
var countKey = dbkeys.MakeBucket().Key([]byte("block-headers-count"))
var bucket = database.MakeBucket([]byte("block-headers"))
var countKey = database.MakeBucket(nil).Key([]byte("block-headers-count"))
// blockHeaderStore represents a store of blocks
type blockHeaderStore struct {

View File

@@ -2,14 +2,14 @@ package blockrelationstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("block-relations"))
var bucket = database.MakeBucket([]byte("block-relations"))
// blockRelationStore represents a store of BlockRelations
type blockRelationStore struct {

View File

@@ -2,14 +2,14 @@ package blockstatusstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("block-statuses"))
var bucket = database.MakeBucket([]byte("block-statuses"))
// blockStatusStore represents a store of BlockStatuses
type blockStatusStore struct {

View File

@@ -2,15 +2,15 @@ package blockstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("blocks"))
var countKey = dbkeys.MakeBucket().Key([]byte("blocks-count"))
var bucket = database.MakeBucket([]byte("blocks"))
var countKey = database.MakeBucket(nil).Key([]byte("blocks-count"))
// blockStore represents a store of blocks
type blockStore struct {

View File

@@ -11,7 +11,6 @@ type consensusStateStore struct {
tipsStaging []*externalapi.DomainHash
virtualDiffParentsStaging []*externalapi.DomainHash
virtualUTXODiffStaging model.UTXODiff
virtualUTXOSetStaging model.UTXOCollection
virtualUTXOSetCache *utxolrucache.LRUCache
@@ -30,7 +29,6 @@ func (css *consensusStateStore) Discard() {
css.tipsStaging = nil
css.virtualUTXODiffStaging = nil
css.virtualDiffParentsStaging = nil
css.virtualUTXOSetStaging = nil
}
func (css *consensusStateStore) Commit(dbTx model.DBTransaction) error {
@@ -48,11 +46,6 @@ func (css *consensusStateStore) Commit(dbTx model.DBTransaction) error {
return err
}
err = css.commitVirtualUTXOSet(dbTx)
if err != nil {
return err
}
css.Discard()
return nil

View File

@@ -2,13 +2,13 @@ package consensusstatestore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
)
var tipsKey = dbkeys.MakeBucket().Key([]byte("tips"))
var tipsKey = database.MakeBucket(nil).Key([]byte("tips"))
func (css *consensusStateStore) Tips(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
if css.tipsStaging != nil {

View File

@@ -1,14 +1,14 @@
package consensusstatestore
import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/pkg/errors"
)
var utxoSetBucket = dbkeys.MakeBucket([]byte("virtual-utxo-set"))
var utxoSetBucket = database.MakeBucket([]byte("virtual-utxo-set"))
func utxoKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) {
serializedOutpoint, err := serializeOutpoint(outpoint)
@@ -19,22 +19,25 @@ func utxoKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) {
return utxoSetBucket.Key(serializedOutpoint), nil
}
func (css *consensusStateStore) StageVirtualUTXODiff(virtualUTXODiff model.UTXODiff) error {
if css.virtualUTXOSetStaging != nil {
return errors.New("cannot stage virtual UTXO diff while virtual UTXO set is staged")
}
func (css *consensusStateStore) StageVirtualUTXODiff(virtualUTXODiff model.UTXODiff) {
css.virtualUTXODiffStaging = virtualUTXODiff
return nil
}
func (css *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction) error {
hadStartedImportingPruningPointUTXOSet, err := css.HadStartedImportingPruningPointUTXOSet(dbTx)
if err != nil {
return err
}
if hadStartedImportingPruningPointUTXOSet {
return errors.New("cannot commit virtual UTXO diff after starting to import the pruning point UTXO set")
}
if css.virtualUTXODiffStaging == nil {
return nil
}
toRemoveIterator := css.virtualUTXODiffStaging.ToRemove().Iterator()
for toRemoveIterator.Next() {
for ok := toRemoveIterator.First(); ok; ok = toRemoveIterator.Next() {
toRemoveOutpoint, _, err := toRemoveIterator.Get()
if err != nil {
return err
@@ -53,7 +56,7 @@ func (css *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction)
}
toAddIterator := css.virtualUTXODiffStaging.ToAdd().Iterator()
for toAddIterator.Next() {
for ok := toAddIterator.First(); ok; ok = toAddIterator.Next() {
toAddOutpoint, toAddEntry, err := toAddIterator.Get()
if err != nil {
return err
@@ -80,46 +83,9 @@ func (css *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction)
return nil
}
func (css *consensusStateStore) commitVirtualUTXOSet(dbTx model.DBTransaction) error {
if css.virtualUTXOSetStaging == nil {
return nil
}
css.virtualUTXOSetCache.Clear()
iterator := css.virtualUTXOSetStaging.Iterator()
for iterator.Next() {
outpoint, utxoEntry, err := iterator.Get()
if err != nil {
return err
}
css.virtualUTXOSetCache.Add(outpoint, utxoEntry)
dbKey, err := utxoKey(outpoint)
if err != nil {
return err
}
serializedEntry, err := serializeUTXOEntry(utxoEntry)
if err != nil {
return err
}
err = dbTx.Put(dbKey, serializedEntry)
if err != nil {
return err
}
}
// Note: we don't discard the staging here since that's
// being done at the end of Commit()
return nil
}
func (css *consensusStateStore) UTXOByOutpoint(dbContext model.DBReader, outpoint *externalapi.DomainOutpoint) (
externalapi.UTXOEntry, error) {
if css.virtualUTXOSetStaging != nil {
return css.utxoByOutpointFromStagedVirtualUTXOSet(outpoint)
}
return css.utxoByOutpointFromStagedVirtualUTXODiff(dbContext, outpoint)
}
@@ -159,20 +125,7 @@ func (css *consensusStateStore) utxoByOutpointFromStagedVirtualUTXODiff(dbContex
return entry, nil
}
func (css *consensusStateStore) utxoByOutpointFromStagedVirtualUTXOSet(outpoint *externalapi.DomainOutpoint) (
externalapi.UTXOEntry, error) {
if utxoEntry, ok := css.virtualUTXOSetStaging.Get(outpoint); ok {
return utxoEntry, nil
}
return nil, errors.Errorf("outpoint was not found")
}
func (css *consensusStateStore) HasUTXOByOutpoint(dbContext model.DBReader, outpoint *externalapi.DomainOutpoint) (bool, error) {
if css.virtualUTXOSetStaging != nil {
return css.hasUTXOByOutpointFromStagedVirtualUTXOSet(outpoint), nil
}
return css.hasUTXOByOutpointFromStagedVirtualUTXODiff(dbContext, outpoint)
}
@@ -196,10 +149,6 @@ func (css *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXODiff(dbCon
return dbContext.Has(key)
}
func (css *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXOSet(outpoint *externalapi.DomainOutpoint) bool {
return css.virtualUTXOSetStaging.Contains(outpoint)
}
func (css *consensusStateStore) VirtualUTXOSetIterator(dbContext model.DBReader) (model.ReadOnlyUTXOSetIterator, error) {
cursor, err := dbContext.Cursor(utxoSetBucket)
if err != nil {
@@ -222,6 +171,10 @@ func newCursorUTXOSetIterator(cursor model.DBCursor) model.ReadOnlyUTXOSetIterat
return &utxoSetIterator{cursor: cursor}
}
func (u utxoSetIterator) First() bool {
return u.cursor.First()
}
func (u utxoSetIterator) Next() bool {
return u.cursor.Next()
}
@@ -249,25 +202,3 @@ func (u utxoSetIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry
return outpoint, utxoEntry, nil
}
func (css *consensusStateStore) StageVirtualUTXOSet(virtualUTXOSetIterator model.ReadOnlyUTXOSetIterator) error {
if css.virtualUTXODiffStaging != nil {
return errors.New("cannot stage virtual UTXO set while virtual UTXO diff is staged")
}
utxoMap := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)
for virtualUTXOSetIterator.Next() {
outpoint, entry, err := virtualUTXOSetIterator.Get()
if err != nil {
return err
}
if _, exists := utxoMap[*outpoint]; exists {
return errors.Errorf("outpoint %s is found more than once in the given iterator", outpoint)
}
utxoMap[*outpoint] = entry
}
css.virtualUTXOSetStaging = utxo.NewUTXOCollection(utxoMap)
return nil
}

View File

@@ -2,13 +2,13 @@ package consensusstatestore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
)
var virtualDiffParentsKey = dbkeys.MakeBucket().Key([]byte("virtual-diff-parents"))
var virtualDiffParentsKey = database.MakeBucket(nil).Key([]byte("virtual-diff-parents"))
func (css *consensusStateStore) VirtualDiffParents(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
if css.virtualDiffParentsStaging != nil {

View File

@@ -0,0 +1,81 @@
package consensusstatestore
import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/pkg/errors"
)
var importingPruningPointUTXOSetKey = database.MakeBucket(nil).Key([]byte("importing-pruning-point-utxo-set"))
func (css *consensusStateStore) StartImportingPruningPointUTXOSet(dbContext model.DBWriter) error {
return dbContext.Put(importingPruningPointUTXOSetKey, []byte{0})
}
func (css *consensusStateStore) HadStartedImportingPruningPointUTXOSet(dbContext model.DBWriter) (bool, error) {
return dbContext.Has(importingPruningPointUTXOSetKey)
}
func (css *consensusStateStore) FinishImportingPruningPointUTXOSet(dbContext model.DBWriter) error {
return dbContext.Delete(importingPruningPointUTXOSetKey)
}
func (css *consensusStateStore) ImportPruningPointUTXOSetIntoVirtualUTXOSet(dbContext model.DBWriter,
pruningPointUTXOSetIterator model.ReadOnlyUTXOSetIterator) error {
if css.virtualUTXODiffStaging != nil {
return errors.New("cannot import virtual UTXO set while virtual UTXO diff is staged")
}
hadStartedImportingPruningPointUTXOSet, err := css.HadStartedImportingPruningPointUTXOSet(dbContext)
if err != nil {
return err
}
if !hadStartedImportingPruningPointUTXOSet {
return errors.New("cannot import pruning point UTXO set " +
"without calling StartImportingPruningPointUTXOSet first")
}
// Clear the cache
css.virtualUTXOSetCache.Clear()
// Delete all the old UTXOs from the database
deleteCursor, err := dbContext.Cursor(utxoSetBucket)
if err != nil {
return err
}
for ok := deleteCursor.First(); ok; ok = deleteCursor.Next() {
key, err := deleteCursor.Key()
if err != nil {
return err
}
err = dbContext.Delete(key)
if err != nil {
return err
}
}
// Insert all the new UTXOs into the database
for ok := pruningPointUTXOSetIterator.First(); ok; ok = pruningPointUTXOSetIterator.Next() {
outpoint, entry, err := pruningPointUTXOSetIterator.Get()
if err != nil {
return err
}
key, err := utxoKey(outpoint)
if err != nil {
return err
}
serializedUTXOEntry, err := serializeUTXOEntry(entry)
if err != nil {
return err
}
err = dbContext.Put(key, serializedUTXOEntry)
if err != nil {
return err
}
}
return nil
}

View File

@@ -1,13 +1,13 @@
package finalitystore
import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("finality-points"))
var bucket = database.MakeBucket([]byte("finality-points"))
type finalityStore struct {
staging map[externalapi.DomainHash]*externalapi.DomainHash

View File

@@ -2,14 +2,14 @@ package ghostdagdatastore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("block-ghostdag-data"))
var bucket = database.MakeBucket([]byte("block-ghostdag-data"))
// ghostdagDataStore represents a store of BlockGHOSTDAGData
type ghostdagDataStore struct {

View File

@@ -0,0 +1,229 @@
package headersselectedchainstore
import (
"encoding/binary"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucacheuint64tohash"
"github.com/pkg/errors"
)
var bucketChainBlockHashByIndex = database.MakeBucket([]byte("chain-block-hash-by-index"))
var bucketChainBlockIndexByHash = database.MakeBucket([]byte("chain-block-index-by-hash"))
var highestChainBlockIndexKey = database.MakeBucket(nil).Key([]byte("highest-chain-block-index"))
type headersSelectedChainStore struct {
stagingAddedByHash map[externalapi.DomainHash]uint64
stagingRemovedByHash map[externalapi.DomainHash]struct{}
stagingAddedByIndex map[uint64]*externalapi.DomainHash
stagingRemovedByIndex map[uint64]struct{}
cacheByIndex *lrucacheuint64tohash.LRUCache
cacheByHash *lrucache.LRUCache
cacheHighestChainBlockIndex uint64
}
// New instantiates a new HeadersSelectedChainStore
func New(cacheSize int) model.HeadersSelectedChainStore {
return &headersSelectedChainStore{
stagingAddedByHash: make(map[externalapi.DomainHash]uint64),
stagingRemovedByHash: make(map[externalapi.DomainHash]struct{}),
stagingAddedByIndex: make(map[uint64]*externalapi.DomainHash),
stagingRemovedByIndex: make(map[uint64]struct{}),
cacheByIndex: lrucacheuint64tohash.New(cacheSize),
cacheByHash: lrucache.New(cacheSize),
}
}
// Stage stages the given chain changes
func (hscs *headersSelectedChainStore) Stage(dbContext model.DBReader,
chainChanges *externalapi.SelectedChainPath) error {
if hscs.IsStaged() {
return errors.Errorf("can't stage when there's already staged data")
}
for _, blockHash := range chainChanges.Removed {
index, err := hscs.GetIndexByHash(dbContext, blockHash)
if err != nil {
return err
}
hscs.stagingRemovedByIndex[index] = struct{}{}
hscs.stagingRemovedByHash[*blockHash] = struct{}{}
}
currentIndex := uint64(0)
highestChainBlockIndex, exists, err := hscs.highestChainBlockIndex(dbContext)
if err != nil {
return err
}
if exists {
currentIndex = highestChainBlockIndex - uint64(len(chainChanges.Removed)) + 1
}
for _, blockHash := range chainChanges.Added {
hscs.stagingAddedByIndex[currentIndex] = blockHash
hscs.stagingAddedByHash[*blockHash] = currentIndex
currentIndex++
}
return nil
}
func (hscs *headersSelectedChainStore) IsStaged() bool {
return len(hscs.stagingAddedByHash) != 0 ||
len(hscs.stagingRemovedByHash) != 0 ||
len(hscs.stagingAddedByIndex) != 0 ||
len(hscs.stagingAddedByIndex) != 0
}
func (hscs *headersSelectedChainStore) Discard() {
hscs.stagingAddedByHash = make(map[externalapi.DomainHash]uint64)
hscs.stagingRemovedByHash = make(map[externalapi.DomainHash]struct{})
hscs.stagingAddedByIndex = make(map[uint64]*externalapi.DomainHash)
hscs.stagingRemovedByIndex = make(map[uint64]struct{})
}
func (hscs *headersSelectedChainStore) Commit(dbTx model.DBTransaction) error {
if !hscs.IsStaged() {
return nil
}
for hash := range hscs.stagingRemovedByHash {
hashCopy := hash
err := dbTx.Delete(hscs.hashAsKey(&hashCopy))
if err != nil {
return err
}
hscs.cacheByHash.Remove(&hashCopy)
}
for index := range hscs.stagingRemovedByIndex {
err := dbTx.Delete(hscs.indexAsKey(index))
if err != nil {
return err
}
hscs.cacheByIndex.Remove(index)
}
highestIndex := uint64(0)
for hash, index := range hscs.stagingAddedByHash {
hashCopy := hash
err := dbTx.Put(hscs.hashAsKey(&hashCopy), hscs.serializeIndex(index))
if err != nil {
return err
}
err = dbTx.Put(hscs.indexAsKey(index), binaryserialization.SerializeHash(&hashCopy))
if err != nil {
return err
}
hscs.cacheByHash.Add(&hashCopy, index)
hscs.cacheByIndex.Add(index, &hashCopy)
if index > highestIndex {
highestIndex = index
}
}
err := dbTx.Put(highestChainBlockIndexKey, hscs.serializeIndex(highestIndex))
if err != nil {
return err
}
hscs.cacheHighestChainBlockIndex = highestIndex
hscs.Discard()
return nil
}
// Get gets the chain block index for the given blockHash
func (hscs *headersSelectedChainStore) GetIndexByHash(dbContext model.DBReader, blockHash *externalapi.DomainHash) (uint64, error) {
if index, ok := hscs.stagingAddedByHash[*blockHash]; ok {
return index, nil
}
if _, ok := hscs.stagingRemovedByHash[*blockHash]; ok {
return 0, errors.Wrapf(database.ErrNotFound, "couldn't find block %s", blockHash)
}
if index, ok := hscs.cacheByHash.Get(blockHash); ok {
return index.(uint64), nil
}
indexBytes, err := dbContext.Get(hscs.hashAsKey(blockHash))
if err != nil {
return 0, err
}
index := hscs.deserializeIndex(indexBytes)
hscs.cacheByHash.Add(blockHash, index)
return index, nil
}
func (hscs *headersSelectedChainStore) GetHashByIndex(dbContext model.DBReader, index uint64) (*externalapi.DomainHash, error) {
if blockHash, ok := hscs.stagingAddedByIndex[index]; ok {
return blockHash, nil
}
if _, ok := hscs.stagingRemovedByIndex[index]; ok {
return nil, errors.Wrapf(database.ErrNotFound, "couldn't find chain block with index %d", index)
}
if blockHash, ok := hscs.cacheByIndex.Get(index); ok {
return blockHash, nil
}
hashBytes, err := dbContext.Get(hscs.indexAsKey(index))
if err != nil {
return nil, err
}
blockHash, err := binaryserialization.DeserializeHash(hashBytes)
if err != nil {
return nil, err
}
hscs.cacheByIndex.Add(index, blockHash)
return blockHash, nil
}
func (hscs *headersSelectedChainStore) serializeIndex(index uint64) []byte {
return binaryserialization.SerializeChainBlockIndex(index)
}
func (hscs *headersSelectedChainStore) deserializeIndex(indexBytes []byte) uint64 {
return binaryserialization.DeserializeChainBlockIndex(indexBytes)
}
func (hscs *headersSelectedChainStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
return bucketChainBlockIndexByHash.Key(hash.ByteSlice())
}
func (hscs *headersSelectedChainStore) indexAsKey(index uint64) model.DBKey {
var keyBytes [8]byte
binary.BigEndian.PutUint64(keyBytes[:], index)
return bucketChainBlockHashByIndex.Key(keyBytes[:])
}
func (hscs *headersSelectedChainStore) highestChainBlockIndex(dbContext model.DBReader) (uint64, bool, error) {
if hscs.cacheHighestChainBlockIndex != 0 {
return hscs.cacheHighestChainBlockIndex, true, nil
}
indexBytes, err := dbContext.Get(highestChainBlockIndexKey)
if err != nil {
if errors.Is(err, database.ErrNotFound) {
return 0, false, nil
}
return 0, false, err
}
index := hscs.deserializeIndex(indexBytes)
hscs.cacheHighestChainBlockIndex = index
return index, true, nil
}

View File

@@ -2,13 +2,13 @@ package headersselectedtipstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
)
var headerSelectedTipKey = dbkeys.MakeBucket().Key([]byte("headers-selected-tip"))
var headerSelectedTipKey = database.MakeBucket(nil).Key([]byte("headers-selected-tip"))
type headerSelectedTipStore struct {
staging *externalapi.DomainHash

View File

@@ -2,14 +2,14 @@ package multisetstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("multisets"))
var bucket = database.MakeBucket([]byte("multisets"))
// multisetStore represents a store of Multisets
type multisetStore struct {

View File

@@ -0,0 +1,213 @@
package pruningstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
var importedPruningPointUTXOsBucket = database.MakeBucket([]byte("imported-pruning-point-utxos"))
var importedPruningPointMultiset = database.MakeBucket(nil).Key([]byte("imported-pruning-point-multiset"))
func (ps *pruningStore) ClearImportedPruningPointUTXOs(dbContext model.DBWriter) error {
cursor, err := dbContext.Cursor(importedPruningPointUTXOsBucket)
if err != nil {
return err
}
for ok := cursor.First(); ok; ok = cursor.Next() {
key, err := cursor.Key()
if err != nil {
return err
}
err = dbContext.Delete(key)
if err != nil {
return err
}
}
return nil
}
func (ps *pruningStore) AppendImportedPruningPointUTXOs(dbTx model.DBTransaction,
outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error {
for _, outpointAndUTXOEntryPair := range outpointAndUTXOEntryPairs {
key, err := ps.importedPruningPointUTXOKey(outpointAndUTXOEntryPair.Outpoint)
if err != nil {
return err
}
serializedUTXOEntry, err := serializeUTXOEntry(outpointAndUTXOEntryPair.UTXOEntry)
if err != nil {
return err
}
err = dbTx.Put(key, serializedUTXOEntry)
if err != nil {
return err
}
}
return nil
}
func (ps *pruningStore) ImportedPruningPointUTXOIterator(dbContext model.DBReader) (model.ReadOnlyUTXOSetIterator, error) {
cursor, err := dbContext.Cursor(importedPruningPointUTXOsBucket)
if err != nil {
return nil, err
}
return ps.newCursorUTXOSetIterator(cursor), nil
}
type utxoSetIterator struct {
cursor model.DBCursor
}
func (ps *pruningStore) newCursorUTXOSetIterator(cursor model.DBCursor) model.ReadOnlyUTXOSetIterator {
return &utxoSetIterator{cursor: cursor}
}
func (u *utxoSetIterator) First() bool {
return u.cursor.First()
}
func (u *utxoSetIterator) Next() bool {
return u.cursor.Next()
}
func (u *utxoSetIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry, err error) {
key, err := u.cursor.Key()
if err != nil {
panic(err)
}
utxoEntryBytes, err := u.cursor.Value()
if err != nil {
return nil, nil, err
}
outpoint, err = deserializeOutpoint(key.Suffix())
if err != nil {
return nil, nil, err
}
utxoEntry, err = deserializeUTXOEntry(utxoEntryBytes)
if err != nil {
return nil, nil, err
}
return outpoint, utxoEntry, nil
}
func (ps *pruningStore) importedPruningPointUTXOKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) {
serializedOutpoint, err := serializeOutpoint(outpoint)
if err != nil {
return nil, err
}
return importedPruningPointUTXOsBucket.Key(serializedOutpoint), nil
}
func serializeOutpoint(outpoint *externalapi.DomainOutpoint) ([]byte, error) {
return proto.Marshal(serialization.DomainOutpointToDbOutpoint(outpoint))
}
func serializeUTXOEntry(entry externalapi.UTXOEntry) ([]byte, error) {
return proto.Marshal(serialization.UTXOEntryToDBUTXOEntry(entry))
}
func deserializeOutpoint(outpointBytes []byte) (*externalapi.DomainOutpoint, error) {
dbOutpoint := &serialization.DbOutpoint{}
err := proto.Unmarshal(outpointBytes, dbOutpoint)
if err != nil {
return nil, err
}
return serialization.DbOutpointToDomainOutpoint(dbOutpoint)
}
func deserializeUTXOEntry(entryBytes []byte) (externalapi.UTXOEntry, error) {
dbEntry := &serialization.DbUtxoEntry{}
err := proto.Unmarshal(entryBytes, dbEntry)
if err != nil {
return nil, err
}
return serialization.DBUTXOEntryToUTXOEntry(dbEntry)
}
func (ps *pruningStore) ClearImportedPruningPointMultiset(dbContext model.DBWriter) error {
return dbContext.Delete(importedPruningPointMultiset)
}
func (ps *pruningStore) ImportedPruningPointMultiset(dbContext model.DBReader) (model.Multiset, error) {
multisetBytes, err := dbContext.Get(importedPruningPointMultiset)
if err != nil {
return nil, err
}
return ps.deserializeMultiset(multisetBytes)
}
func (ps *pruningStore) UpdateImportedPruningPointMultiset(dbTx model.DBTransaction, multiset model.Multiset) error {
multisetBytes, err := ps.serializeMultiset(multiset)
if err != nil {
return err
}
return dbTx.Put(importedPruningPointMultiset, multisetBytes)
}
func (ps *pruningStore) serializeMultiset(multiset model.Multiset) ([]byte, error) {
return proto.Marshal(serialization.MultisetToDBMultiset(multiset))
}
func (ps *pruningStore) deserializeMultiset(multisetBytes []byte) (model.Multiset, error) {
dbMultiset := &serialization.DbMultiset{}
err := proto.Unmarshal(multisetBytes, dbMultiset)
if err != nil {
return nil, err
}
return serialization.DBMultisetToMultiset(dbMultiset)
}
func (ps *pruningStore) CommitImportedPruningPointUTXOSet(dbContext model.DBWriter) error {
// Delete all the old UTXOs from the database
deleteCursor, err := dbContext.Cursor(pruningPointUTXOSetBucket)
if err != nil {
return err
}
for ok := deleteCursor.First(); ok; ok = deleteCursor.Next() {
key, err := deleteCursor.Key()
if err != nil {
return err
}
err = dbContext.Delete(key)
if err != nil {
return err
}
}
// Insert all the new UTXOs into the database
insertCursor, err := dbContext.Cursor(importedPruningPointUTXOsBucket)
if err != nil {
return err
}
for ok := insertCursor.First(); ok; ok = insertCursor.Next() {
importedPruningPointUTXOSetKey, err := insertCursor.Key()
if err != nil {
return err
}
pruningPointUTXOSetKey := pruningPointUTXOSetBucket.Key(importedPruningPointUTXOSetKey.Suffix())
serializedUTXOEntry, err := insertCursor.Value()
if err != nil {
return err
}
err = dbContext.Put(pruningPointUTXOSetKey, serializedUTXOEntry)
if err != nil {
return err
}
}
return nil
}

View File

@@ -2,23 +2,30 @@ package pruningstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
)
var pruningBlockHashKey = dbkeys.MakeBucket().Key([]byte("pruning-block-hash"))
var candidatePruningPointHashKey = dbkeys.MakeBucket().Key([]byte("candidate-pruning-point-hash"))
var pruningSerializedUTXOSetKey = dbkeys.MakeBucket().Key([]byte("pruning-utxo-set"))
var pruningBlockHashKey = database.MakeBucket(nil).Key([]byte("pruning-block-hash"))
var candidatePruningPointHashKey = database.MakeBucket(nil).Key([]byte("candidate-pruning-point-hash"))
var pruningPointUTXOSetBucket = database.MakeBucket([]byte("pruning-point-utxo-set"))
var updatingPruningPointUTXOSetKey = database.MakeBucket(nil).Key([]byte("updating-pruning-point-utxo-set"))
// pruningStore represents a store for the current pruning state
type pruningStore struct {
pruningPointStaging *externalapi.DomainHash
pruningPointCandidateStaging *externalapi.DomainHash
serializedUTXOSetStaging []byte
pruningPointCandidateCache *externalapi.DomainHash
pruningPointCache *externalapi.DomainHash
pruningPointCandidateStaging *externalapi.DomainHash
pruningPointCandidateCache *externalapi.DomainHash
startUpdatingPruningPointUTXOSetStaging bool
}
// New instantiates a new PruningStore
func New() model.PruningStore {
return &pruningStore{}
}
func (ps *pruningStore) StagePruningPointCandidate(candidate *externalapi.DomainHash) {
@@ -59,24 +66,18 @@ func (ps *pruningStore) HasPruningPointCandidate(dbContext model.DBReader) (bool
return dbContext.Has(candidatePruningPointHashKey)
}
// New instantiates a new PruningStore
func New() model.PruningStore {
return &pruningStore{}
}
// Stage stages the pruning state
func (ps *pruningStore) StagePruningPoint(pruningPointBlockHash *externalapi.DomainHash, pruningPointUTXOSetBytes []byte) {
func (ps *pruningStore) StagePruningPoint(pruningPointBlockHash *externalapi.DomainHash) {
ps.pruningPointStaging = pruningPointBlockHash
ps.serializedUTXOSetStaging = pruningPointUTXOSetBytes
}
func (ps *pruningStore) IsStaged() bool {
return ps.pruningPointStaging != nil || ps.serializedUTXOSetStaging != nil
return ps.pruningPointStaging != nil || ps.startUpdatingPruningPointUTXOSetStaging
}
func (ps *pruningStore) Discard() {
ps.pruningPointStaging = nil
ps.serializedUTXOSetStaging = nil
ps.startUpdatingPruningPointUTXOSetStaging = false
}
func (ps *pruningStore) Commit(dbTx model.DBTransaction) error {
@@ -104,12 +105,8 @@ func (ps *pruningStore) Commit(dbTx model.DBTransaction) error {
ps.pruningPointCandidateCache = ps.pruningPointCandidateStaging
}
if ps.serializedUTXOSetStaging != nil {
utxoSetBytes, err := ps.serializeUTXOSetBytes(ps.serializedUTXOSetStaging)
if err != nil {
return err
}
err = dbTx.Put(pruningSerializedUTXOSetKey, utxoSetBytes)
if ps.startUpdatingPruningPointUTXOSetStaging {
err := dbTx.Put(updatingPruningPointUTXOSetKey, []byte{0})
if err != nil {
return err
}
@@ -119,6 +116,49 @@ func (ps *pruningStore) Commit(dbTx model.DBTransaction) error {
return nil
}
func (ps *pruningStore) UpdatePruningPointUTXOSet(dbContext model.DBWriter,
utxoSetIterator model.ReadOnlyUTXOSetIterator) error {
// Delete all the old UTXOs from the database
deleteCursor, err := dbContext.Cursor(pruningPointUTXOSetBucket)
if err != nil {
return err
}
for ok := deleteCursor.First(); ok; ok = deleteCursor.Next() {
key, err := deleteCursor.Key()
if err != nil {
return err
}
err = dbContext.Delete(key)
if err != nil {
return err
}
}
// Insert all the new UTXOs into the database
for ok := utxoSetIterator.First(); ok; ok = utxoSetIterator.Next() {
outpoint, entry, err := utxoSetIterator.Get()
if err != nil {
return err
}
serializedOutpoint, err := serializeOutpoint(outpoint)
if err != nil {
return err
}
key := pruningPointUTXOSetBucket.Key(serializedOutpoint)
serializedUTXOEntry, err := serializeUTXOEntry(entry)
if err != nil {
return err
}
err = dbContext.Put(key, serializedUTXOEntry)
if err != nil {
return err
}
}
return nil
}
// PruningPoint gets the current pruning point
func (ps *pruningStore) PruningPoint(dbContext model.DBReader) (*externalapi.DomainHash, error) {
if ps.pruningPointStaging != nil {
@@ -142,24 +182,6 @@ func (ps *pruningStore) PruningPoint(dbContext model.DBReader) (*externalapi.Dom
return pruningPoint, nil
}
// PruningPointSerializedUTXOSet returns the serialized UTXO set of the current pruning point
func (ps *pruningStore) PruningPointSerializedUTXOSet(dbContext model.DBReader) ([]byte, error) {
if ps.serializedUTXOSetStaging != nil {
return ps.serializedUTXOSetStaging, nil
}
dbPruningPointUTXOSetBytes, err := dbContext.Get(pruningSerializedUTXOSetKey)
if err != nil {
return nil, err
}
pruningPointUTXOSet, err := ps.deserializeUTXOSetBytes(dbPruningPointUTXOSetBytes)
if err != nil {
return nil, err
}
return pruningPointUTXOSet, nil
}
func (ps *pruningStore) serializeHash(hash *externalapi.DomainHash) ([]byte, error) {
return proto.Marshal(serialization.DomainHashToDbHash(hash))
}
@@ -199,3 +221,51 @@ func (ps *pruningStore) HasPruningPoint(dbContext model.DBReader) (bool, error)
return dbContext.Has(pruningBlockHashKey)
}
func (ps *pruningStore) PruningPointUTXOs(dbContext model.DBReader,
fromOutpoint *externalapi.DomainOutpoint, limit int) ([]*externalapi.OutpointAndUTXOEntryPair, error) {
cursor, err := dbContext.Cursor(pruningPointUTXOSetBucket)
if err != nil {
return nil, err
}
if fromOutpoint != nil {
serializedFromOutpoint, err := serializeOutpoint(fromOutpoint)
if err != nil {
return nil, err
}
seekKey := pruningPointUTXOSetBucket.Key(serializedFromOutpoint)
err = cursor.Seek(seekKey)
if err != nil {
return nil, err
}
}
pruningPointUTXOIterator := ps.newCursorUTXOSetIterator(cursor)
outpointAndUTXOEntryPairs := make([]*externalapi.OutpointAndUTXOEntryPair, 0, limit)
for len(outpointAndUTXOEntryPairs) < limit && pruningPointUTXOIterator.Next() {
outpoint, utxoEntry, err := pruningPointUTXOIterator.Get()
if err != nil {
return nil, err
}
outpointAndUTXOEntryPairs = append(outpointAndUTXOEntryPairs, &externalapi.OutpointAndUTXOEntryPair{
Outpoint: outpoint,
UTXOEntry: utxoEntry,
})
}
return outpointAndUTXOEntryPairs, nil
}
func (ps *pruningStore) StageStartUpdatingPruningPointUTXOSet() {
ps.startUpdatingPruningPointUTXOSetStaging = true
}
func (ps *pruningStore) HadStartedUpdatingPruningPointUTXOSet(dbContext model.DBWriter) (bool, error) {
return dbContext.Has(updatingPruningPointUTXOSetKey)
}
func (ps *pruningStore) FinishUpdatingPruningPointUTXOSet(dbContext model.DBWriter) error {
return dbContext.Delete(updatingPruningPointUTXOSetKey)
}

View File

@@ -2,15 +2,15 @@ package reachabilitydatastore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var reachabilityDataBucket = dbkeys.MakeBucket([]byte("reachability-data"))
var reachabilityReindexRootKey = dbkeys.MakeBucket().Key([]byte("reachability-reindex-root"))
var reachabilityDataBucket = database.MakeBucket([]byte("reachability-data"))
var reachabilityReindexRootKey = database.MakeBucket(nil).Key([]byte("reachability-reindex-root"))
// reachabilityDataStore represents a store of ReachabilityData
type reachabilityDataStore struct {

View File

@@ -1,17 +1,19 @@
package utxodiffstore
import (
"bytes"
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/pkg/errors"
)
var utxoDiffBucket = dbkeys.MakeBucket([]byte("utxo-diffs"))
var utxoDiffChildBucket = dbkeys.MakeBucket([]byte("utxo-diff-children"))
var utxoDiffBucket = database.MakeBucket([]byte("utxo-diffs"))
var utxoDiffChildBucket = database.MakeBucket([]byte("utxo-diff-children"))
// utxoDiffStore represents a store of UTXODiffs
type utxoDiffStore struct {
@@ -188,27 +190,17 @@ func (uds *utxoDiffStore) utxoDiffChildHashAsKey(hash *externalapi.DomainHash) m
}
func (uds *utxoDiffStore) serializeUTXODiff(utxoDiff model.UTXODiff) ([]byte, error) {
dbUtxoDiff, err := serialization.UTXODiffToDBUTXODiff(utxoDiff)
writer := &bytes.Buffer{}
err := binaryserialization.SerializeUTXODiff(writer, utxoDiff)
if err != nil {
return nil, err
}
bytes, err := proto.Marshal(dbUtxoDiff)
if err != nil {
return nil, errors.WithStack(err)
}
return bytes, nil
return writer.Bytes(), nil
}
func (uds *utxoDiffStore) deserializeUTXODiff(utxoDiffBytes []byte) (model.UTXODiff, error) {
dbUTXODiff := &serialization.DbUtxoDiff{}
err := proto.Unmarshal(utxoDiffBytes, dbUTXODiff)
if err != nil {
return nil, errors.WithStack(err)
}
return serialization.DBUTXODiffToUTXODiff(dbUTXODiff)
reader := bytes.NewReader(utxoDiffBytes)
return binaryserialization.DeserializeUTXODiff(reader)
}
func (uds *utxoDiffStore) serializeUTXODiffChild(utxoDiffChild *externalapi.DomainHash) ([]byte, error) {

View File

@@ -0,0 +1,168 @@
package utxodiffstore
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"math/rand"
"testing"
)
func TestUTXODiffSerializationAndDeserialization(t *testing.T) {
utxoDiffStore := New(0).(*utxoDiffStore)
testUTXODiff, err := buildTestUTXODiff()
if err != nil {
t.Fatalf("Could not create UTXODiff from toAdd and toRemove collections: %s", err)
}
serializedUTXODiff, err := utxoDiffStore.serializeUTXODiff(testUTXODiff)
if err != nil {
t.Fatalf("Could not serialize UTXO diff: %s", err)
}
deserializedUTXODiff, err := utxoDiffStore.deserializeUTXODiff(serializedUTXODiff)
if err != nil {
t.Fatalf("Could not deserialize UTXO diff: %s", err)
}
if testUTXODiff.ToAdd().Len() != deserializedUTXODiff.ToAdd().Len() {
t.Fatalf("Unexpected toAdd length in deserialized utxoDiff. Want: %d, got: %d",
testUTXODiff.ToAdd().Len(), deserializedUTXODiff.ToAdd().Len())
}
if testUTXODiff.ToRemove().Len() != deserializedUTXODiff.ToRemove().Len() {
t.Fatalf("Unexpected toRemove length in deserialized utxoDiff. Want: %d, got: %d",
testUTXODiff.ToRemove().Len(), deserializedUTXODiff.ToRemove().Len())
}
testToAddIterator := testUTXODiff.ToAdd().Iterator()
for ok := testToAddIterator.First(); ok; ok = testToAddIterator.Next() {
testOutpoint, testUTXOEntry, err := testToAddIterator.Get()
if err != nil {
t.Fatalf("Could not get an outpoint-utxoEntry pair out of the toAdd iterator: %s", err)
}
deserializedUTXOEntry, ok := deserializedUTXODiff.ToAdd().Get(testOutpoint)
if !ok {
t.Fatalf("Outpoint %s:%d not found in the deserialized toAdd collection",
testOutpoint.TransactionID, testOutpoint.Index)
}
if !testUTXOEntry.Equal(deserializedUTXOEntry) {
t.Fatalf("Deserialized UTXO entry is not equal to the original UTXO entry for outpoint %s:%d "+
"in the toAdd collection", testOutpoint.TransactionID, testOutpoint.Index)
}
}
testToRemoveIterator := testUTXODiff.ToRemove().Iterator()
for ok := testToRemoveIterator.First(); ok; ok = testToRemoveIterator.Next() {
testOutpoint, testUTXOEntry, err := testToRemoveIterator.Get()
if err != nil {
t.Fatalf("Could not get an outpoint-utxoEntry pair out of the toRemove iterator: %s", err)
}
deserializedUTXOEntry, ok := deserializedUTXODiff.ToRemove().Get(testOutpoint)
if !ok {
t.Fatalf("Outpoint %s:%d not found in the deserialized toRemove collection",
testOutpoint.TransactionID, testOutpoint.Index)
}
if !testUTXOEntry.Equal(deserializedUTXOEntry) {
t.Fatalf("Deserialized UTXO entry is not equal to the original UTXO entry for outpoint %s:%d "+
"in the toRemove collection", testOutpoint.TransactionID, testOutpoint.Index)
}
}
}
func BenchmarkUTXODiffSerialization(b *testing.B) {
utxoDiffStore := New(0).(*utxoDiffStore)
testUTXODiff, err := buildTestUTXODiff()
if err != nil {
b.Fatalf("Could not create UTXODiff from toAdd and toRemove collections: %s", err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := utxoDiffStore.serializeUTXODiff(testUTXODiff)
if err != nil {
b.Fatalf("Could not serialize UTXO diff: %s", err)
}
}
}
func BenchmarkUTXODiffDeserialization(b *testing.B) {
utxoDiffStore := New(0).(*utxoDiffStore)
testUTXODiff, err := buildTestUTXODiff()
if err != nil {
b.Fatalf("Could not create UTXODiff from toAdd and toRemove collections: %s", err)
}
serializedUTXODiff, err := utxoDiffStore.serializeUTXODiff(testUTXODiff)
if err != nil {
b.Fatalf("Could not serialize UTXO diff: %s", err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err = utxoDiffStore.deserializeUTXODiff(serializedUTXODiff)
if err != nil {
b.Fatalf("Could not deserialize UTXO diff: %s", err)
}
}
}
func BenchmarkUTXODiffSerializationAndDeserialization(b *testing.B) {
utxoDiffStore := New(0).(*utxoDiffStore)
testUTXODiff, err := buildTestUTXODiff()
if err != nil {
b.Fatalf("Could not create UTXODiff from toAdd and toRemove collections: %s", err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
serializedUTXODiff, err := utxoDiffStore.serializeUTXODiff(testUTXODiff)
if err != nil {
b.Fatalf("Could not serialize UTXO diff: %s", err)
}
_, err = utxoDiffStore.deserializeUTXODiff(serializedUTXODiff)
if err != nil {
b.Fatalf("Could not deserialize UTXO diff: %s", err)
}
}
}
func buildTestUTXODiff() (model.UTXODiff, error) {
toAdd := buildTestUTXOCollection()
toRemove := buildTestUTXOCollection()
utxoDiff, err := utxo.NewUTXODiffFromCollections(toAdd, toRemove)
if err != nil {
return nil, err
}
return utxoDiff, nil
}
func buildTestUTXOCollection() model.UTXOCollection {
utxoMap := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)
for i := 0; i < 100_000; i++ {
var outpointTransactionIDBytes [32]byte
rand.Read(outpointTransactionIDBytes[:])
outpointTransactionID := externalapi.NewDomainTransactionIDFromByteArray(&outpointTransactionIDBytes)
outpointIndex := rand.Uint32()
outpoint := externalapi.NewDomainOutpoint(outpointTransactionID, outpointIndex)
utxoEntryAmount := rand.Uint64()
var utxoEntryScriptPublicKeyScript [256]byte
rand.Read(utxoEntryScriptPublicKeyScript[:])
utxoEntryScriptPublicKeyVersion := uint16(rand.Uint32())
utxoEntryScriptPublicKey := &externalapi.ScriptPublicKey{
Script: utxoEntryScriptPublicKeyScript[:],
Version: utxoEntryScriptPublicKeyVersion,
}
utxoEntryIsCoinbase := rand.Float32() > 0.5
utxoEntryBlockBlueScore := rand.Uint64()
utxoEntry := utxo.NewUTXOEntry(utxoEntryAmount, utxoEntryScriptPublicKey, utxoEntryIsCoinbase, utxoEntryBlockBlueScore)
utxoMap[*outpoint] = utxoEntry
}
return utxo.NewUTXOCollection(utxoMap)
}

Some files were not shown because too many files have changed in this diff Show More