Compare commits

...

109 Commits

Author SHA1 Message Date
Svarog
4a25ef90ea [NOD-1064] Don't send GetBlockInvsMsg with lowHash = nil (#769) 2020-06-21 09:09:27 +03:00
Ori Newman
6219b93430 [NOD-1018] Exit after 2 minutes if graceful shutdown fails (#732)
* [NOD-1018] Exit after 2 minutes if graceful shutdown fails

* [NOD-1018] Change time.Tick to time.After
2020-05-25 14:30:43 +03:00
Ori Newman
6463a4b5d0 [NOD-1011] Don't cache isSynced on getBlockTemplate (#728) 2020-05-20 14:38:24 +03:00
Svarog
0ca127853d [NOD-974] UTXO-Commitments shouldn't include the new block's transactions (#727)
* [NOD-975] Don't include block transactions inside its UTXO commitment (#711)

* [NOD-975] Don't include block transactions inside its UTXO commitment.

* Revert "[NOD-975] Don't include block transactions inside its UTXO commitment."

This reverts commit b1a2ae66

* [NOD-975] Implement a (currently failing) TestUTXOCommitment.

* [NOD-975] Remove the block's own transactions from calcMultiset.

* [NOD-975] Simplify calcMultiset.

* [NOD-975] Add a comment on top of selectedParentMultiset.

* [NOD-975] Use pastUTXO instead of selectedParentUTXO in calcMultiset.

* [NOD-975] Use selected parent's pastUTXO instead of this block's pastUTXO in calcMultiset.

* [NOD-975] Extract selectedParentPastUTXO to a separate function.

* [NOD-975] Remove selectedParentUTXO from pastUTXO's return values.

* [NOD-975] Add txs to TestUTXOCommitment.

* [NOD-975] Remove debug code.

* [NOD-975] In pastUTXOMultiSet, copy the multiset to avoid modifying the original.

* [NOD-975] Add a test: TestPastUTXOMultiSet.

* [NOD-975] Improve TestPastUTXOMultiSet.

* [NOD-976] Implement tests for UTXO commitments (#715)

* [NOD-975] Don't include block transactions inside its UTXO commitment.

* Revert "[NOD-975] Don't include block transactions inside its UTXO commitment."

This reverts commit b1a2ae66

* [NOD-975] Implement a (currently failing) TestUTXOCommitment.

* [NOD-975] Remove the block's own transactions from calcMultiset.

* [NOD-975] Simplify calcMultiset.

* [NOD-975] Add a comment on top of selectedParentMultiset.

* [NOD-975] Use pastUTXO instead of selectedParentUTXO in calcMultiset.

* [NOD-975] Use selected parent's pastUTXO instead of this block's pastUTXO in calcMultiset.

* [NOD-975] Extract selectedParentPastUTXO to a separate function.

* [NOD-975] Remove selectedParentUTXO from pastUTXO's return values.

* [NOD-975] Add txs to TestUTXOCommitment.

* [NOD-976] Generate new blockDB blocks for tests.

* [NOD-976] Fix TestBlueBlockWindow.

* [NOD-976] Fix TestIsKnownBlock.

* [NOD-976] Fix TestGHOSTDAG.

* [NOD-976] Fix TestUTXOCommitment.

* [NOD-976] Remove kaka.

* [NOD-990] Save utxo diffs of past UTXO (#724)

* [NOD-990] Save UTXO diffs of past UTXO

* [NOD-990] Check for block double spends with its past instead of building its UTXO

* [NOD-990] Call resetExtraNonceForTest in TestUTXOCommitment

* [NOD-990] Remove redundant functions diffFromTx and diffFromAcceptedTx

* [NOD-990] Rename i->j to avoid confusion

* [NOD-990] Break long lines

* [NOD-990] Rename ErrDoubleSpendsWithBlockTransaction -> ErrDoubleSpendInSameBlock

* [NOD-990] Make ErrDoubleSpendInSameBlock more detailed

* [NOD-990] Add testProcessBlockRuleError

* [NOD-990] Fix comment

* [NOD-990] Add test for duplicate transactions on the same block

* [NOD-990] Use pkg/errors on panic

* [NOD-990] Make cloneWithoutBase method

* [NOD-990] Break long lines

* [NOD-990] Fix comment

* [NOD-990] Fix wrong variable names

* [NOD-990] Fix comment

* [NOD-974] Generate new test blocks.

* [NOD-974] Fix TestIsKnownBlock and TestGHOSTDAG.

* [NOD-974] Fix TestUTXOCommitment.

* [NOD-974] Fix comments

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
Co-authored-by: stasatdaglabs <stas@daglabs.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-05-20 12:43:52 +03:00
stasatdaglabs
b884ba128e [NOD-1008] In utxoDiffStore, keep diffData in memory for blocks whose blueScore is at least virtualBlueScore - X (#726)
* [NOD-1008] Use *blockNode as keys in utxoDiffStore.loaded and .dirty.

* [NOD-1008] Implement clearOldEntries.

* [NOD-1008] Increase maxBlueScoreDifferenceToKeepLoaded to 100.

* [NOD-1008] Fix a typo.

* [NOD-1008] Add clearOldEntries to saveChangesFromBlock.

* [NOD-1008] Begin implementing TestClearOldEntries.

* [NOD-1008] Finish implementing TestClearOldEntries.

* [NOD-1008] Fix a comment.

* [NOD-1008] Rename diffDataByHash to diffDataByBlockNode.

* [NOD-1008] Use dag.TipHashes instead of tracking tips manually.
2020-05-20 10:47:01 +03:00
Svarog
fe25ea3d8c [NOD-1001] Make an error in Peer.start() stop the connection process from continuing. (#723)
* [NOD-1001] Move side-effects of connection out of OnVersion

* [NOD-1001] Make AssociateConnection synchronous

* [NOD-1001] Wait for 2 veracks in TestPeerListeners

* [NOD-1001] Made AssociateConnection return error

* [NOD-1001] Remove temporary logs

* [NOD-1001] Fix typos and find-and-replace errors

* [NOD-1001] Move example_test back out of peer package + fix some typos

* [NOD-1001] Use correct remote address in setupPeersWithConns and return to address string literals

* [NOD-1001] Use separate verack channels for inPeer and outPeer

* [NOD-1001] Make verack channels buffered

* [NOD-1001] Removed temporary sleep of 1 second

* [NOD-1001] Removed redundant //
2020-05-20 10:36:44 +03:00
stasatdaglabs
e0f587f599 [NOD-877] Separate UTXO header code to two fields in serialization: blue score and packed flags (#725)
* [NOD-877] In UTXOEntry serialization, extract packedFlags out to a separate Uint8.

* [NOD-877] Generate new test blocks.

* [NOD-877] Fix TestIsKnownBlock.

* [NOD-877] Fix TestBlueBlockWindow.

* [NOD-877] Fix TestUTXOSerialization and TestGHOSTDAG.

* [NOD-877] Fix TestVirtualBlock.
2020-05-19 17:56:07 +03:00
stasatdaglabs
e9e1ef4772 [NOD-1006] Make use of a pool to avoid excessive allocation of big.Ints (#722)
* [NOD-1006] Make CompactToBig take an out param so that we can reuse the same big.Int in averageTarget.

* [NOD-1006] Fix merge errors.

* [NOD-1006] Use CompactToBigWithDestination only in averageTarget.

* [NOD-1006] Fix refactor errors.

* [NOD-1006] Fix refactor errors.

* [NOD-1006] Optimize averageTarget with a big.Int pool.

* [NOD-1006] Defer releasing bigInts.

* [NOD-1006] Use a pool for requiredDifficulty as well.

* [NOD-1006] Move the big int pool to utils.

* [NOD-1006] Remove unnecessary line.
2020-05-19 16:29:21 +03:00
Svarog
eb8b841850 [NOD-1005] Use sm.isSynced to check whether should request blocks from invs (#721)
* [NOD-1005] Moved isSyncedForMining to netsync manager, and renamed to isSynced + removed isCurrent

* [NOD-1005] Use sm.isSynced to check whether should request blocks from invs

* [NOD-1005] Use private version of isSynced to avoid infinite loop

* [NOD-1005] Fix a few typos
2020-05-18 10:42:58 +03:00
Svarog
28681affda [NOD-994] Greatly increase the amount of logs kaspad keeps before rotating them away (#720)
* [NOD-994] Greatly increased the amount of logs kaspad keeps before rotating them away

* [NOD-994] Actually invcrease the log file

* [NOD-994] Update comments

* [NOD-994] Fix typo
2020-05-14 10:58:46 +03:00
Svarog
378f0b659a [NOD-993] Get rid of redundant error types + Use %+v when printing startup errors (#719)
* [NOD-993] Use %+v when printing errors

* [NOD-993] Get rid of AssertError

* [NOD-993] Made ruleError use github.com/pkg/errors

* [NOD-993] remove redundant TODO

* [NOD-993] remove redundant Comment

* [NOD-993] Removed DeploymentError
2020-05-13 17:27:53 +03:00
stasatdaglabs
35b943e04f [NOD-996] Disable kaspad logs in TestScripts (#718)
* [NOD-996] Disable kaspad logs in TestScripts.

* [NOD-996] Return the log level to its original state after TestScripts is done.
2020-05-13 15:57:30 +03:00
stasatdaglabs
65f75c17fc [NOD-982] Log message with level WARN when getting MsgReject (#717)
* [NOD-982] Log message with level WARN when getting MsgReject.

* [NOD-982] Fix wrong logLevel in Write and Writef.

* [NOD-982] Use Write and Writef inside Trace, Tracef, Debug, Debugf, etc...

* [NOD-982] Move peer message logging to a separate file.
2020-05-13 10:03:37 +03:00
stasatdaglabs
806eab817c [NOD-820] When the node isn't synced, make getBlockTemplate return a boolean isSynced instead of an error (#716)
* [NOD-820] Add IsSynced to GetBlockTemplateResult.

* [NOD-820] Add isSynced to the help file.

* [NOD-820] Add MineWhenNotSynced to the kaspaminer config.

* [NOD-820] Implement miner MineWhenNotSynced logic.

* [NOD-820] Fixed capitalization in an error message.
2020-05-12 15:08:24 +03:00
Ori Newman
585510d76c [NOD-847] Fix CIDR protection and prevent connecting to the same address twice (#714)
* [NOD-847] Fix CIDR protection and prevent connecting to the same address twice

* [NOD-847] Fix Tests

* [NOD-847] Add TestDuplicateOutboundConnections and TestSameOutboundGroupConnections

* [NOD-847] Fix TestRetryPermanent, TestNetworkFailure and wait 10 ms before restoring the previous active config

* [NOD-847] Add "is" before boolean methods

* [NOD-847] Fix Connect's lock

* [NOD-847] Make numAddressesInAddressManager an argument

* [NOD-847] Add teardown function for address manager

* [NOD-847] Add stack trace to ConnManager errors

* [NOD-847] Change emptyAddressManagerForTest->createEmptyAddressManagerForTest and fix typos

* [NOD-847] Fix wrong test name for addressManagerForTest

* [NOD-847] Change error message if New fails

* [NOD-847] Add new line on releaseAddress

* [NOD-847] Always try to reconnect on disconnect
2020-05-12 13:47:15 +03:00
Svarog
c8a381d5bb [NOD-981] Fixed error message when both --notls and --rpccert ommited (#713) 2020-05-06 13:05:48 +03:00
Ori Newman
3d04e6bded [NOD-943] Add acceptedBlockHashes to GetBlockVerboseResult (#708)
* [NOD-943] Add acceptedBlockHashes to GetBlockVerboseResult

* [NOD-943] Remove intermediate variables

* [NOD-943] Add block hash to error message

* [NOD-943] Change comment
2020-05-05 17:26:54 +03:00
Svarog
f8e851a6ed [NOD-968] Wrap all ldb errors with pkg/errors (#712) 2020-05-04 16:33:23 +03:00
stasatdaglabs
e70a615135 [NOD-872] Defer all currently undeferred unlocks in the database package (#706)
* [NOD-872] Defer unlocks in write.go.

* [NOD-872] Defer unlocks in rollback.go.

* [NOD-872] Defer unlocks in read.go.

* [NOD-872] Fix duplicate RUnlock.

* [NOD-872] Remove a redundant empty line.

* [NOD-872] Extract closeCurrentWriteCursorFile to a separate method.
2020-05-04 13:07:40 +03:00
Ori Newman
73ad0adf72 [NOD-913] Use sync rate in getBlockTemplate (#705)
* [NOD-913] Use sync rate in getBlockTemplate

* [NOD-913] Rename addBlockProcessTime->addBlockProcessTimestamp, maxDiff->maxTipAge

* [NOD-913] Pass maxDeviation as an argument

* [NOD-913] Change maxDeviation to +5%

* [NOD-913] Rename variables

* [NOD-913] Rename variables and functions and change comments

* [NOD-913] Split addBlockProcessingTimestamp
2020-05-04 09:09:23 +03:00
stasatdaglabs
5b74e51db1 [NOD-956] Increase K to 15. (#710) 2020-05-03 14:56:47 +03:00
stasatdaglabs
2e2492cc5d [NOD-849] Database tests (#695)
* [NOD-849] Cover ffldb/transaction with tests.

* [NOD-849] Cover cursor.go with tests.

* [NOD-849] Cover ldb/transaction with tests.

* [NOD-849] Cover location.go with tests.

* [NOD-849] Write TestFlatFileMultiFileRollback.

* [NOD-849] Fix merge errors.

* [NOD-849] Fix a comment.

* [NOD-849] Fix a comment.

* [NOD-849] Add a test that makes sure that files get deleted on rollback.

* [NOD-849] Add a test that makes sure that serializeLocation serialized to an expected value.

* [NOD-849] Improve TestFlatFileLocationDeserializationErrors.

* [NOD-849] Fix a copy+paste error.

* [NOD-849] Explain maxFileSize = 16.

* [NOD-849] Remove redundant RollbackUnlessClosed call.

* [NOD-849] Extract bucket to a variable in TestCursorSanity.

* [NOD-849] Rename TestKeyValueTransactionCommit to TestTransactionCommitForLevelDBMethods.

* [NOD-849] Extract prepareXXX into separate functions.

* [NOD-849] Simplify function calls in TestTransactionCloseErrors.

* [NOD-849] Extract validateCurrentCursorKeyAndValue to a separate function.

* [NOD-849] Add a comment over TestCursorSanity.

* [NOD-849] Add a comment over function in TestCursorCloseErrors.

* [NOD-849] Add a comment over function in TestTransactionCloseErrors.

* [NOD-849] Separate TestTransactionCloseErrors to TestTransactionCommitErrors and TestTransactionRollbackErrors.

* [NOD-849] Separate TestTransactionCloseErrors to TestTransactionCommitErrors and TestTransactionRollbackErrors.

* [NOD-849] Fix copy+paste error in comments.

* [NOD-849] Fix merge errors.

* [NOD-849] Merge TestTransactionCommitErrors and TestTransactionRollbackErrors into TestTransactionCloseErrors.

* [NOD-849] Move prepareDatabaseForTest into ffldb_test.go.

* [NOD-849] Add cursorKey to Value error messages in validateCurrentCursorKeyAndValue.
2020-05-03 12:19:09 +03:00
Ori Newman
2ef5c2cbac [NOD-915] Check if lockableFile underlying file is nil before closing it (#709) 2020-04-30 14:43:38 +03:00
Ori Newman
3c89e1f7b3 [NOD-952] Fix nil derefernce bug on outboundPeerConnectionFailed (#704) 2020-04-27 13:50:09 +03:00
stasatdaglabs
2910724b49 [NOD-934] Fix addresses not getting their retry attempt counter incremented if they fail to connect (#702)
* [NOD-934] Fix addresses not getting their retry attempt counter incremented if they fail to connect.

* [NOD-922] Inline parseNetAddress.

* [NOD-922] Fix debug logs.
2020-04-23 17:01:09 +03:00
stasatdaglabs
3af945692e [NOD-922] Panic from cursor Next and First (#703)
* [NOD-922] Panic in Cursor First and Next if the cursor is closed.

* [NOD-922] Fix broken tests.

* [NOD-922] Fix a comment.
2020-04-23 16:55:25 +03:00
stasatdaglabs
5fe9dae557 [NOD-863] Write interface tests for the new database (#697)
* [NOD-863] Write TestCursorNext.

* [NOD-863] Write TestCursorFirst.

* [NOD-863] Fix merge errors.

* [NOD-863] Add TestCursorSeek.

* [NOD-863] Add TestCursorCloseErrors.

* [NOD-863] Add TestCursorCloseFirstAndNext.

* [NOD-863] Add TestDataAccessorPut.

* [NOD-863] Add TestDataAccessorGet.

* [NOD-863] Add TestDataAccessorHas.

* [NOD-863] Add TestDatabaseDelete.

* [NOD-863] Add TestDatabaseAppendToStoreAndRetrieveFromStore.

* [NOD-863] Add TestTransactionAppendToStoreAndRetrieveFromStore.

* [NOD-863] Add TestTransactionDelete.

* [NOD-863] Add TestTransactionHas.

* [NOD-863] Add TestTransactionGet.

* [NOD-863] Add TestTransactionPut.

* [NOD-863] Move cursor tests to the bottom of interface_test.go.

* [NOD-863] Move interface_test.go to a database_test package.

* [NOD-863] Make each test in interface_test.go run for every database driver. Currently, only ffldb.

* [NOD-863] Make each cursor test in interface_test.go run for every database driver. Currently, only ffldb.

* [NOD-863] Split interface_test.go into separate files.

* [NOD-863] Rename interface_test.go to common_test.go.

* [NOD-863] Extract testForAllDatabaseTypes to a separate function.

* [NOD-863] Reorganize how test data gets added to the database.

* [NOD-863] Add explanations about testForAllDatabaseTypes.

* [NOD-863] Add tests that make sure that database changes don't affect previously opened transactions.

* [NOD-863] Extract databasePrepareFunc to a type alias.

* [NOD-863] Fix comments.

* [NOD-863] Add cursor exhaustion test to testCursorFirst.

* [NOD-863] Add cursor Next clause to testCursorSeek.

* [NOD-863] Add additional varification to testDatabasePut.

* [NOD-863] Add an additional verification into to testTransactionGet.

* [NOD-863] Add TestTransactionCommit.

* [NOD-863] Add TestTransactionRollback.

* [NOD-863] Add TestTransactionRollbackUnlessClosed.

* [NOD-863] Remove equals sign from databasePrepareFunc declaration.
2020-04-20 12:14:55 +03:00
Svarog
42c53ec3e2 [NOD-869] Add a print after os.Exit(1) to see if it is ever called (#701) 2020-04-16 16:08:32 +03:00
Ori Newman
291df8bfef [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer (#700)
* [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer

* [NOD-858] SetShouldSendBlockLocator(false) on OnBlockLocator

* [NOD-858] Rename shouldSendBlockLocator->wasBlockLocatorRequested

* [NOD-858] Move panic to shouldReplaceSyncPeer
2020-04-13 15:50:55 +03:00
Ori Newman
d015286f65 [NOD-909] Add tests for double spends (#694)
* [NOD-909] Add tests for double spends

* [NOD-909] Add prepareAndProcessBlock that gets parent hashes and transactions as argument

* [NOD-909] Use PrepareAndProcessBlockForTest where possible

* [NOD-909] Use more meaningful names

* [NOD-909] Change a comment

* [NOD-909] Fix comment

* [NOD-909] Fix comment
2020-04-13 12:28:59 +03:00
Ori Newman
fe91b4c878 [NOD-914] Make LevelDB.Cursor receive bucket instead of prefix (#696) 2020-04-12 09:25:40 +03:00
Ori Newman
7609c50641 [NOD-885] Use database.Key and database.Bucket instead of byte slices (#692)
* [NOD-885] Create database.Key type

* [NOD-885] Rename FullKey()->FullKeyBytes() and Key()->KeyBytes()

* [NOD-885] Make Key.String return a hex string

* [NOD-885] Rename key parts

* [NOD-885] Rename separator->bucketSeparator

* [NOD-885] Rename SuffixBytes->Suffix and PrefixBytes->Prefix

* [NOD-885] Change comments

* [NOD-885] Change key prefix to bucket

* [NOD-885] Don't use database.NewKey inside dbaccess

* [NOD-885] Fix nil bug in Bucket.Path()

* [NOD-885] Rename helpers.go -> keys.go

* [NOD-885] Unexport database.NewKey

* [NOD-885] Remove redundant code in Bucket.Path()
2020-04-08 12:12:21 +03:00
Ori Newman
df934990d7 [NOD-822] Don't return rule errors from utxoset code (#693)
* [NOD-822] Remove rule errors from the UTXO diff code

* [NOD-822] Rename applyTransactions -> applyAndVerifyBlockTransactionsToPastUTXO

* [NOD-822] Fix comment
2020-04-07 12:45:12 +03:00
stasatdaglabs
3c4a80f16d [NOD-899] Inside the database, in case we're out of disk space, panic without printing the stack trace (#691)
* [NOD-899] Inside the database, in case we're out of disk space, panic without printing the stack trace.

* [NOD-899] Fix bad variable name.

* [NOD-899] Reduce code duplication.
2020-04-06 16:00:48 +03:00
stasatdaglabs
a31139d4a5 [NOD-895] Break down initDAGState to sub-routines (#690) 2020-04-06 11:08:57 +03:00
Mike Zak
6da3606721 Update to version 0.4.0 2020-04-05 16:23:01 +03:00
Ori Newman
bfbc72724d [NOD-873] Reuse allocated space when updating the UTXO set in database (#688) 2020-04-05 11:46:16 +03:00
stasatdaglabs
956b6f7d95 [NOD-900] Fix bad key in Seek (#687)
* [NOD-900] Fix Seek not working at expected.

* [NOD-900] Wrap error messages.

* [NOD-900] Use ldbIterator.Key instead of LevelDBCursor.Key.

* [NOD-900] Add a comment.
2020-04-02 17:47:51 +03:00
stasatdaglabs
c1a039de3f [NOD-900] Fix Seek not working as expected (#686)
* [NOD-900] Fix Seek not working at expected.

* [NOD-900] Wrap error messages.
2020-04-02 17:05:58 +03:00
stasatdaglabs
f8b18e09d6 [NOD-805] Redesign the database (#685)
* [NOD-828] Reimplement FFLDB (#663)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-887] Add a couple of QoL features to Cursor (#674)

* [NOD-887] Changed First to not return an error.

* [NOD-887] Fix merge error.

* [NOD-887] Make Cursor.Key not return the entire key path.

* [NOD-888] Add RollbackUnlessClosed to Context (#676)

* [NOD-888] Add RollbackUnlessClosed to Context.

* [NOD-888] Fix copy+paste error.

* [NOD-889] Instead of returning a boolean for not-found, return an error (#677)

* [NOD-889] Instead of returning a boolean for not-found, return an error.

* [NOD-889] Wrapped ErrNotFound for Get calls with nicer error messages.

* [NOD-889] Fix format.

* [NOD-889] Fix double space in a comment.

* [NOD-889] Add IsNotFoundError to dbaccess.

* [NOD-862] Replace calls to Tx.StoreBlock, Tx.HasBlock, Tx.FetchBlock with appropriate calls in dbaccess (#672)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-862] Use dbaccess.HasBlock instead of Tx.HasBlock in initDAGState.

* [NOD-862] Use dbaccess.StoreBlock instead of dbStoreBlock.

* [NOD-862] Use dbaccess.FetchBlock instead of various block fetching mechanisms.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Implement a dbaccess.BlockNode struct.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* [NOD-828] Implement toDBBlockNode and fromDBBlockNode.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Implement storing index blocks.

* [NOD-862] Use database transactions where appropriate.

* [NOD-862] Fix tests failing on DAGSetup.

* [NOD-862] Fix bad make call.

* [NOD-862] Fix remaining database opening problems in tests.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Iterate over the new block index in dagio.

* [NOD-862] Fix block index key.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-862] Pass byte slices to dbaccess instead of objects.

* [NOD-862] Fix errors.

* [NOD-862] Fix maybeAcceptBlock not checking block existence.

* [NOD-862] Fix TestAcceptanceIndexRecover.

* [NOD-862] Add comments to StoreBlockIndex and BlockIndexCursor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-862] Fix merge errors.

* [NOD-862] Fix DAGSetup using bad temp directories.

* [NOD-862] Fix TestProcessDelayedBlocks not closing the database properly.

* [NOD-862] Fix merge errors.

* [NOD-862] Merge flushToDBWithContext and flushToDB.

* [NOD-862] Remove TODO.

* [NOD-862] Add prefix to the temp dir in DAGSetup.

* [NOD-862] Bring back dbFetchBlockByHash.

* [NOD-862] Use BlockDAG.BlockByHash in p2p and rpc.

* [NOD-862] Use daghash.Hash in dbaccess.

* [NOD-862] Add defer to RollbackUnlessClosed after NewTx().

* [NOD-862] Extract dbStoreBlock to a separate function.

* [NOD-862] Fix grammar in comment.

* [NOD-862] Fix merge errors.

* [NOD-867] Migrate database logic in blockdag/dagio.go to dbaccess (#675)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-862] Use dbaccess.HasBlock instead of Tx.HasBlock in initDAGState.

* [NOD-862] Use dbaccess.StoreBlock instead of dbStoreBlock.

* [NOD-862] Use dbaccess.FetchBlock instead of various block fetching mechanisms.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Implement a dbaccess.BlockNode struct.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* [NOD-828] Implement toDBBlockNode and fromDBBlockNode.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Implement storing index blocks.

* [NOD-862] Use database transactions where appropriate.

* [NOD-862] Fix tests failing on DAGSetup.

* [NOD-862] Fix bad make call.

* [NOD-862] Fix remaining database opening problems in tests.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Iterate over the new block index in dagio.

* [NOD-862] Fix block index key.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-862] Pass byte slices to dbaccess instead of objects.

* [NOD-862] Fix errors.

* [NOD-862] Fix maybeAcceptBlock not checking block existence.

* [NOD-862] Fix TestAcceptanceIndexRecover.

* [NOD-862] Add comments to StoreBlockIndex and BlockIndexCursor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-862] Fix merge errors.

* [NOD-862] Fix DAGSetup using bad temp directories.

* [NOD-862] Fix TestProcessDelayedBlocks not closing the database properly.

* [NOD-867] Remove blockIndexBucket from dagio.

* [NOD-867] Fix wrong key in StoreIndexBucket.

* [NOD-867] Migrate DAG state to dbaccess.

* [NOD-867] Remove utxoSetVersionKeyName.

* [NOD-862] Fix merge errors.

* [NOD-867] Move localSubnetworkID into dagState.

* [NOD-867] Fix a comment.

* [NOD-867] Remove an unused function.

* [NOD-867] Migrate the database's UTXO set to dbaccess.

* [NOD-867] Add missing error check.

* [NOD-867] Changed First to not return an error.

* [NOD-867] Make Cursor.Key not return the entire key path.

* [NOD-887] Fix the comment above BlockIndexCursorFrom.

* [NOD-862] Merge flushToDBWithContext and flushToDB.

* [NOD-862] Remove TODO.

* [NOD-862] Add prefix to the temp dir in DAGSetup.

* [NOD-862] Bring back dbFetchBlockByHash.

* [NOD-862] Use BlockDAG.BlockByHash in p2p and rpc.

* [NOD-862] Use daghash.Hash in dbaccess.

* [NOD-862] Add defer to RollbackUnlessClosed after NewTx().

* [NOD-862] Extract dbStoreBlock to a separate function.

* [NOD-867] Remove TODOs.

* [NOD-867] Fix merge errors.

* [NOD-867] Fix comments and errors.

* [NOD-867] Unexport blockIndexKey.

* [NOD-867] Fix merge errors.

* [NOD-867] Move a misplaced comment.

* [NOD-867] Fix an error message.

* [NOD-867] Remove preallocation in initDAGState.

* [NOD-866] Migrate database logic in blockdag/indexers package to dbaccess (#682)

* [NOD-865] Delete blockidhash.go.

* [NOD-865] Remove a lot of no-longer relevant logic from indexers.

* [NOD-865] Pass TxContext to ConnectBlock.

* [NOD-865] Migrate the acceptance index to dbaccess.

* [NOD-865] Fix a block not being sent to ConnectBlock.

* [NOD-865] Pass the block's hash instead of the whole block.

* [NOD-865] Add forgotten Commit call.

* [NOD-865] Add comments.

* [NOD-866] Fix a comment.

* [NOD-866] Fix a comment.

* [NOD-866] Remove pointless indirection in acceptanceindex.

* [NOD-866] Fix comment over ForEachHash.

* [NOD-866] Rename ClearAcceptanceIndex to DropAcceptanceIndex.

* [NOD-866] Explain collecting keys before deleting them.

* [NOD-865] Move misc db logic to db access (#681)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-865] Move fee data db operations to dbaccess

* [NOD-865] Move reachability data db operations to dbaccess

* [NOD-865] Move UTXO diff data db operations to dbaccess

* [NOD-865] Move subnetwork data db operations to dbaccess

* [NOD-865] Fix createDAGState

* [NOD-865] Remove old Get signature with "exists"

* [NOD-865] Move multiset db operations to dbaccess

* [NOD-865] Use dbaccess transactions where possible

* [NOD-865] Remove old Get signature with "exists"

* [NOD-881] Recover TestGHOSTDAGErrors

* [NOD-865] Create function for db keys

* [NOD-865] Change Exists to Has, and use accessor.Has where possible

* [NOD-865] Make ClearReachabilityData transactive

* [NOD-865] Don't iterate cursors while changing db data

* [NOD-865] Rename RegisterSubnetwork -> StoreSubnetwork

* [NOD-865] Change bucket from utxodiffs to utxo-diffs

* [NOD-865] Rename SubnetworkExists->HasSubnetwork

* [NOD-865] Change a comment

* [NOD-865] Fix tests

* [NOD-865] Fix comment

* [NOD-865] Remove the prefix "db" from some functions

* [NOD-865] Remove redundant comments

* [NOD-865] Make clearBucket function

* [NOD-865] Make clear functions get a dbTx as an arg

* [NOD-865] Remove erroneous tx commit

Co-authored-by: stasatdaglabs <stas@daglabs.com>

* [NOD-868] Delete the old database package (#683)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-865] Move fee data db operations to dbaccess

* [NOD-865] Move reachability data db operations to dbaccess

* [NOD-865] Move UTXO diff data db operations to dbaccess

* [NOD-865] Move subnetwork data db operations to dbaccess

* [NOD-865] Fix createDAGState

* [NOD-865] Remove old Get signature with "exists"

* [NOD-865] Move multiset db operations to dbaccess

* [NOD-865] Use dbaccess transactions where possible

* [NOD-865] Remove old Get signature with "exists"

* [NOD-881] Recover TestGHOSTDAGErrors

* [NOD-865] Create function for db keys

* [NOD-865] Change Exists to Has, and use accessor.Has where possible

* [NOD-865] Make ClearReachabilityData transactive

* [NOD-865] Don't iterate cursors while changing db data

* [NOD-865] Rename RegisterSubnetwork -> StoreSubnetwork

* [NOD-865] Change bucket from utxodiffs to utxo-diffs

* [NOD-865] Rename SubnetworkExists->HasSubnetwork

* [NOD-865] Change a comment

* [NOD-868] Remove all tests from old database.

* [NOD-868] Remove all unused methods from the old database's interfaces.

* [NOD-865] Fix tests

* [NOD-868] Remove references to DB.

* [NOD-865] Fix comment

* [NOD-868] Remove the old ffldb besides the interface and errors.go.

* [NOD-868] Remove errors.go.

* [NOD-868] Remove the old database package.

* [NOD-868] Add openDB to DAGSetup to emulate the old dbpath in dag.config.

* [NOD-868] Rename database2 to database.

* [NOD-868] Use NewTx instead of NoTx where required.

* [NOD-868] Fix merge errors.

* [NOD-868] Rename dbXXX functions to just xxx.

* [NOD-868] Rename putDAGState to saveDAGState.

* [NOD-868] Replace comments in initDAGState with logs.

* [NOD-868] Explain the openDB parameter in DAGSetup.

* [NOD-868] Fixup doc.go and README.md.

* [NOD-868] Remove pointless transactions.

Co-authored-by: Ori Newman <orinewman1@gmail.com>

* [NOD-805] Fix merge errors.

* [NOD-805] Fix a comment.

* [NOD-805] Don't return virtualTxsAcceptanceData from applyDAGChanges.

* [NOD-805] Add missing error handling in TestAcceptanceDataIndexRecover.

* [NOD-805] Rename blockDAG to dag in indexers/manager.go.

* [NOD-805] Defer cursor.Close() everywhere.

* [NOD-805] Rename scanFlatFiles to findCurrentLocation.

* [NOD-805] Extract crc32ChecksumLength and dataLengthLength to constants.

* [NOD-805] Handle open files properly in rollback.go.

* [NOD-805] Remove unnecessary func wrapper.

* [NOD-805] Remove unnecessary trimming in initialize.

* [NOD-805] Made StoreBlock accept only TxContext.

* [NOD-805] Changed the log level of an error message to Error.

* [NOD-805] Add a note about holding mutexes over deleteFile.

* [NOD-805] Remove a false comment.

* [NOD-805] Fix a comment.

* [NOD-805] Rename blk to block.

* [NOD-805] Extract utxoKey to a separate function.

* [NOD-805] Move dbaccess.xxxKey functions to the tops of their respective files.

* [NOD-805] Fix grammar in dbaccess/db.go.

* [NOD-805] Wrap a failed database corruption recovery error.

* [NOD-805] Split lines with WithStack in them.

* [NOD-805] Fix the comment over initialize.

* [NOD-805] Rename ffdb to flatFileDB and ldb to levelDB.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a comment.

* [NOD-805] Use s.writeCursor instead of cursor.

* [NOD-805] Embed file in lockableFile.

* [NOD-805] the the -> the

* [NOD-805] openDB -> db

* [NOD-805] Use TxContext in all flushToDB functions.

* [NOD-805] Rename context -> dbContext.

* [NOD-805] Reword the comment at the beginning on initDAGState.

* [NOD-805] Explain cursor key trimming.

* [NOD-805] Remove Error from Cursor.

* [NOD-805] Return ErrNotFound from done Cursor Key and Value.

* [NOD-805] Add missing error handling.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a variable name.

* [NOD-805] Remove pointless underscore.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a variable name.

Co-authored-by: Mike Zak <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-04-02 13:56:32 +03:00
Ori Newman
b20a7a679b [NOD-874] If the node is not current call sm.restartSyncIfNeeded() on handleInvMsg (#684)
* [NOD-874] If the node is not current call sm.restartSyncIfNeeded() on handleInvMsg

* [NOD-874] Check haveUnknownInvBlock before restartSyncIfNeeded

* [NOD-874] Fix comment

* [NOD-874] Fix comment

* [NOD-874] Fix comment
2020-04-01 12:56:10 +03:00
Ori Newman
36d866375e [NOD-881] Don't recalculate subtreesize for children (#678)
* [NOD-881] Don't recalculate subtreesize for children

* [NOD-881] Make BenchmarkReindexInterval clearer

* [NOD-881] Use b.ResetTimer

* [NOD-881] Fix BenchmarkReindexInterval to use b.N
2020-03-31 12:43:02 +03:00
Svarog
024edc30a3 [NOD-857] Add generalized profiler package and use it everwhere (#679)
* [NOD-857] Add generalized profiler package and use it everwhere

* [NOD-857] Dependency-inject log into profiling.Start()
2020-03-31 12:41:21 +03:00
Ori Newman
6aa5e0b5a8 [NOD-882] Remove ecc and hdkeychain (#680)
* [NOD-882] Remove ecc and hdkeychain

* [NOD-882] Remove HDCoinType from dagParams
2020-03-31 10:58:11 +03:00
Mike Zak
1a38550fdd Update to version 0.3.0 2020-03-29 14:15:17 +03:00
stasatdaglabs
3e7ebb5a84 [NOD-861] Get rid of dbtool/fetchblockregion.go. (#667) 2020-03-29 12:47:13 +03:00
Svarog
4bca7342d3 [NOD-883] Fix dockerfile in kaspaminer + set real version for go-libsecp256k1 (#673) 2020-03-26 17:50:09 +02:00
Elichai Turkel
f80908fb4e [NOD-876] Replace ecc with go-secp256k1 for public keys (#670)
* Replace ecc with go-secp256k1 in txscript

* Replace ecc with go-secp256k1 in util and cmd

* Replace ecc.Multiset with secp256k1.MultiSet
2020-03-26 17:03:39 +02:00
stasatdaglabs
e000e10738 [NOD-880] Remove CGO_ENABLED=0 from Dockerfile. (#671) 2020-03-26 14:02:57 +02:00
Ori Newman
d83862f36c [NOD-855] Save ECMH for block utxo and not diff utxo (#669)
* [NOD-855] Save ECMH for each block UTXO

* [NOD-855] Remove UpdateExtraNonce method

* [NOD-855] Remove multiset data from UTXO diffs

* [NOD-855] Fix to fetch multiset of selected parent

* [NOD-855] Don't remove coinbase inputs from multiset

* [NOD-855] Create multisetBucketName on startup

* [NOD-855] Remove multiset from UTXO diff tests

* [NOD-855] clear new entries from multisetstore on saveChangesFromBlock

* [NOD-855] Fix tests

* [NOD-855] Use UnacceptedBlueScore when adding current block transactions to multiset

* [NOD-855] Hash utxo before adding it to multiset

* [NOD-855] Pass isCoinbase to NewUTXOEntry

* [NOD-855] Do not use hash when adding entries to multiset

* [NOD-855] When calculating multiset, replace the unaccepted blue score of selected parent transaction with the block blue score

* [NOD-855] Manually add a chained transaction to a block in TestChainedTransactions

* [NOD-855] Change name and comments

* [NOD-855] Use FindAcceptanceData to find a specific block acceptance data

* [NOD-855] Remove redundant copy of txIn.PreviousOutpoint

* [NOD-855] Use fmt.Sprintf when creating internalRPCError
2020-03-26 13:06:12 +02:00
Svarog
1020402b34 [NOD-869] Close panicHandlerDone instead of sending an empty struct + use time.After instead of time.Tick (#668) 2020-03-25 16:14:08 +02:00
Mike Zak
bc6ce6ed53 Update version to v0.2.0 2020-03-25 11:51:14 +02:00
Ori Newman
d3b1953deb [NOD-848] optimize utxo diffs serialize allocations (#666)
* [NOD-848] Optimize allocations when serializing UTXO diffs

* [NOD-848] Use same UTXO serialization everywhere, and use compression as well

* [NOD-848] Fix usage of wrong buffer

* [NOD-848] Fix tests

* [NOD-848] Fix wire tests

* [NOD-848] Fix tests

* [NOD-848] Remove VLQ

* [NOD-848] Fix comments

* [NOD-848] Add varint for big endian encoding

* [NOD-848] In TestVarIntWire, assume the expected decoded value is the same as the serialization input

* [NOD-848] Serialize outpoint index with big endian varint

* [NOD-848] Remove p2pk from compression support

* [NOD-848] Fix comments

* [NOD-848] Remove p2pk from decompression support

* [NOD-848] Make entry compression optional

* [NOD-848] Fix tests

* [NOD-848] Fix comments and var names

* [NOD-848] Remove UTXO compression

* [NOD-848] Fix tests

* [NOD-848] Remove big endian varint

* [NOD-848] Fix comments

* [NOD-848] Rename ReadVarIntLittleEndian->ReadVarInt and fix WriteVarInt comment

* [NOD-848] Add outpointIndexByteOrder variable

* [NOD-848] Remove redundant comment

* [NOD-848] Fix outpointMaxSerializeSize to the correct value

* [NOD-848] Move subBuffer to utils
2020-03-24 16:44:41 +02:00
Svarog
3c67215e76 [NOD-796] Upgrade to go 1.14 (#665) 2020-03-22 14:50:13 +02:00
Svarog
586624c836 [NOD-853] Add profiler server to kaspaminer (#664) 2020-03-19 17:19:31 +02:00
Svarog
49855e6333 [NOD-823] Use WithDiffInPlace for the implementation of WithDiff (#657)
* [NOD-823] Use WithDiffInPlace for the implementation of WithDiff

* [NOD-823] Unexport withDiffInPlace
2020-03-17 11:19:02 +02:00
Ori Newman
624249c0f3 [NOD-842] Use flushToDB with the same transaction as everything else in saveChangesFromBlock and never ignore flushToDB errors (#662) 2020-03-16 11:05:17 +02:00
Ori Newman
1cf443a63b [NOD-841] Fix tests to not be dependent on block rate (#661)
* [NOD-841] Fix TestDifficulty

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Fix TestCheckBlockSanity

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Shorten long lines
2020-03-15 18:08:03 +02:00
Ori Newman
8909679f44 [NOD-818] Remove time adjustment (#658)
* [NOD-818] Remove time adjustment

* [NOD-818] Remove interface ensuring and copyright message

* [NOD-818] Update comment
2020-03-15 17:37:01 +02:00
Ori Newman
e58efbf0ea [NOD-839] Panic from non-rule error from ProcessBlock (#660) 2020-03-15 17:26:53 +02:00
Ori Newman
34fb066590 [NOD-518] Implement getmempoolentry (#656) 2020-03-12 16:00:18 +02:00
stasatdaglabs
299826f392 [NOD-827] Get rid of dbtools insecureimport.go and loadheaders.go (#655)
* [NOD-827] Get rid of dbtools insecureimport.go and loadheaders.go

* [NOD-827] Remove commands from realMain().
2020-03-10 16:31:13 +02:00
stasatdaglabs
3d8dd8724d [NOD-816] Remove TxIndex and AddrIndex (#653)
* [NOD-816] Remove TxIndex.

* [NOD-816] Remove AddrIndex.

* [NOD-816] Remove mentions of TxIndex and AddrIndex.

* [NOD-816] Remove mentions of getrawtransaction.

* [NOD-816] Remove mentions of searchrawtransaction.

* [NOD-816] Remove cmd/addsubnetwork.

* [NOD-816] Fix a comment.

* [NOD-816] Fix a comment.

* [NOD-816] Implement BlockDAG.TxConfirmations.

* [NOD-816] Return confirmations in getTxOut.

* [NOD-816] Rename TxConfirmations to UTXOConfirmations.

* [NOD-816] Rename txConfirmations to utxoConfirmations.

* [NOD-816] Fix capitalization in variable names.

* [NOD-816] Add acceptance index to addblock.

* [NOD-816] Get rid of txrawresult-confirmations.

* [NOD-816] Fix config flag.
2020-03-10 16:09:31 +02:00
Svarog
b8a00f7519 [NOD-778] Optimize RestoreUTXO (#652)
* [NOD-778] Add WithDiffInPlace

* [NOD-778] Fix bug in WithDiffInPlace

* [NOD-778] Add comment to WithDiffInPlace

* [NOD-778] Add double dag.restoreUTXO to benchmark, to remove time for hard-disk loading

* [NOD-778] Also test WithDiffInPlace in TestUTXODiffRules

* [NOD-778] Add tests for all cases possible in TestUTXODiffRules

* [NOD-778] Fix test-case 'first in toAdd in this, second in toRemove in this and toAdd in other'

* [NOD-778] Fixed in WithDiffInPlace

* [NOD-778] Update error messages when diffFrom(withDiffResult) fails in TestUTXODiffRules

* [NOD-778] diffFrom: disallow utxos both in d.toAdd, other.toAdd, and only one of d.toRemove and other.toRemove

* [NOD-778] Fix expected value in 'first in toRemove in this, second in toRemove in other'

* [NOD-778] diffFrom: Disallow situations where utxo both in d.toRemove and other.toRemove with different blue scores and no corresponding utxo in d.toAdd

* [NOD-778] WithDiff: Fix faulty logic that allows updates to blue scores

* [NOD-778] Fix WithDiffInPlace to pass all tests

* [NOD-778] Deleted temporary prints

* [NOD-778] Sorted TestUTXODiffRules tests according to spreadsheet

* [NOD-778] Delete deeputxo_test.go

* [NOD-778] Updated comments

* [NOD-778] Re-order

* [NOD-778] Re-order test-cases to be according to spreadsheet

* [NOD-778] Simplified case when both d.toRemove and other.toRemove have the same outpoint in diffFrom

* [NOD-778] Change a few error messages that say 'transaction' instead of 'outpoint'

* [NOD-778] Rename: utxoToAdd/Remove -> entryToAdd/Remove

* [NOD-788] Remove redundant else

* [NOD-778] Rename: existingUTXO -> existingEntry + remove redundant else

* [NOD-778] Correct test name
2020-03-10 15:32:19 +02:00
stasatdaglabs
4dfc8cf5b0 [NOD-816] Remove addsubnetwork. (#654) 2020-03-10 11:09:33 +02:00
Ori Newman
5a99e4d2f3 [NOD-806] Exit early after panic (#650)
* [NOD-806] After panic, gracefully stop logs, and then exit immediately

* [NOD-806] Convert non-kaspad applications to use the new spawn

* [NOD-806] Fix disabled log at rpcclient

* [NOD-806] Refactor HandlePanic

* [NOD-806] Cancel Logger interface

* [NOD-806] Remove redundant spawn checks from waitgroup_test.go

* [NOD-806] Use caller subsystem when logging panics

* [NOD-806] Fix go vet errors
2020-03-08 11:24:37 +02:00
Svarog
606cd668ff [NOD-810] Fix error text in lookupParentNodes (#651) 2020-03-05 15:49:36 +02:00
Ori Newman
dd537f5143 [NOD-808] Use syndtr/goleveldb instead of btcsuite/goleveldb. (#649) 2020-03-05 12:26:48 +02:00
stasatdaglabs
a1c631be62 [NOD-798] Disconnect from a peer if a block received from it gets rejected (#648)
* [NOD-798] Disconnect from a peer if its block gets rejected.

* [NOD-798] Make a comment less ambiguous.
2020-03-03 09:47:22 +02:00
Ori Newman
707a728656 [NOD-552] Add NormalizeRPCServerAddress and use it where needed (#643)
* [NOD-552] Add NormalizeRPCServerAddress and use it where needed

* [NOD-552] Make NormalizeAddress return an error for an invalid address

* [NOD-552] Use longer lines for a comment
2020-03-01 16:37:26 +02:00
stasatdaglabs
80b5631a48 [NOD-726] Only print "no sync peer" message when not current (#646)
* [NOD-726] Only print "no sync peer" message when not current.

* [NOD-726] Shorten duration in which "no sync peer" messages would not print.
2020-02-27 17:38:39 +02:00
Ori Newman
2373965551 [NOD-576] Rename NextHashes to ChildHashes in GetBlock/GetBlockHeaders rpc call (#645)
* [NOD-576] Rename NextHashes to ChildHashes in GetBlock/GetBlockHeaders rpc call

* [NOD-576] Fix typo
2020-02-27 17:34:38 +02:00
Ori Newman
65cbb6655b [NOD-661] Change BCDB subsystem tag (for logs) to KSDB (#644) 2020-02-27 17:30:08 +02:00
Ori Newman
cdd96d0670 [NOD-664] Remove version from everything inside kaspad/cmd - use kaspad version instead (#642)
* [NOD-664] Remove version from everything inside kaspad/cmd - use kaspad version instead

* [NOD-664] Fix broken import
2020-02-27 13:26:22 +02:00
Dan Aharoni
ad04bbde83 [NOD-782] Make sure errors.As gets parameter that implements error interface (#641)
* [NOD-782] Make sure errors.As gets parameter that implements error interface.

* [NOD-782] Pass pointer to errors.As
2020-02-27 12:27:38 +02:00
Ori Newman
5374d95416 [NOD-656] Log hashrate in kaspaminer (#632)
* [NOD-656] Log hashrate in kaspaminer

* [NOD-656] Measure hash rate in kilohashes

* [NOD-656] Show hash rate once in 10 seconds

* [NOD-656] Put hash rate logic in a separate function

* [NOD-656] Create logHashRateInterval constant
2020-02-24 11:59:02 +02:00
Ori Newman
de9aa39cc5 [NOD-721] Add defers (#638)
* [NOD-721] Defer unlocks

* [NOD-721] Add functions with locks to rpcmodel

* [NOD-721] Defer unlocks

* [NOD-721] Add filterDataWithLock function

* [NOD-721] Defer unlocks

* [NOD-721] Defer .Close()

* [NOD-721] Fix access to wsc.filterData without a lock

* [NOD-721] De-anonymize some anonymous functions

* [NOD-721] Remove redundant assignments

* [NOD-721] Remove redundant assignments

* [NOD-721] Remove redundant assignments

* [NOD-721] Get rid of submitOld, and break handleGetBlockTemplateLongPoll to smaller functions

* [NOD-721] Rename existsUnspentOutpoint->existsUnspentOutpointNoLock, existsUnspentOutpointWithLock->existsUnspentOutpoint

* [NOD-721] Rename filterDataWithLock->FilterData

* [NOD-721] Fixed comments
2020-02-24 09:19:44 +02:00
Ori Newman
98987f4a8f [NOD-603] Update validateParents to use reachability (#640)
* [NOD-603] Update validateParents to use reachability

* [NOD-603] Break a long line

* [NOD-721] Remove redundant check if block parent is a tip
2020-02-24 08:59:12 +02:00
Ori Newman
9745f31b69 [NOD-693] Update link to license (#639) 2020-02-20 17:12:53 +02:00
Ori Newman
ee08531a52 [NOD-610] Rename newSet->newBlockSet and setFromSlice->blockSetFromSlice (#635) 2020-02-20 16:19:28 +02:00
stasatdaglabs
61baf7b260 [NOD-769] Add a log for when a reachability reindex occurs (#637)
*  [NOD-719] Added defers to unlocks  (#618)

* [NOD-719] Added defers to unlocks

* [NOD-719] Added another defer to another Unlock

* [NOD-719] Added yet another defer to yet another Unlock

* [NOD-747] Change FinalityInterval to be 24 hours, isCurrent to be true if the DAG's time is less than 12 hours than the present, and change MaxInvPerMsg to be 1 << 17 (#625)

* [NOD-769] Add a log for when a reachability reindex occurs.

Co-authored-by: Svarog <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-02-19 13:39:45 +02:00
Ori Newman
650e4f735e [NOD-757] Readd addrmanager tests (#628) 2020-02-18 18:12:19 +02:00
Dan Aharoni
550b12b041 [NOD-772] Fix a bug where we ignore the return value of forAllOutboundPeers. (#636) 2020-02-18 18:02:15 +02:00
Ori Newman
a4bb070722 [NOD-754] Fix staticcheck errors (#627)
* [NOD-754] Fix staticcheck errors

* [NOD-754] Remove some unused exported functions

* [NOD-754] Fix staticcheck errors

* [NOD-754] Don't panic if out/in close fails

* [NOD-754] Wrap outside errors with custom message
2020-02-18 16:56:38 +02:00
Ori Newman
30fe0c279b [NOD-738] Move rpcmodel helper functions to pointers package (#629)
* [NOD-738] Move rpcmodel helper functions to copytopointer package

* [NOD-738] Rename copytopointer->pointers
2020-02-18 14:06:34 +02:00
stasatdaglabs
e405dd5981 [NOD-694] Fix requesting blocks that will surely be orphaned during netsync. (#630) 2020-02-18 12:12:34 +02:00
stasatdaglabs
243b4b8021 [NOD-765] Fix database corruption after restart in reachabilitystore and utxodiffstore. (#634) 2020-02-18 12:04:50 +02:00
Ori Newman
dd4c93e1ef [NOD-759] Merge v0.1.1-dev into v0.1.2-dev (#633)
*  [NOD-719] Added defers to unlocks  (#618)

* [NOD-719] Added defers to unlocks

* [NOD-719] Added another defer to another Unlock

* [NOD-719] Added yet another defer to yet another Unlock

* [NOD-747] Change FinalityInterval to be 24 hours, isCurrent to be true if the DAG's time is less than 12 hours than the present, and change MaxInvPerMsg to be 1 << 17 (#625)
2020-02-18 11:02:25 +02:00
Ori Newman
a07335d74d [NOD-737] Remove btc prefix from util file names (#631) 2020-02-17 13:11:24 +02:00
Dan Aharoni
7567cd4cb9 [NOD-744] Wrap go routines with spawn (#626)
* [NOD-744] Wrap go routines with spawn

* [NOD-747] Wrap some more go routines with spawn

* [NOD-744] Some more missing go routines

* [NOD-744] Break lines so make code more readable

* [NOD-744] Declare a local scope variable so the func would use it.

* [NOD-744] Fix type and update comment.

* [NOD-744] Declare local var so go routine would use it

* [NOD-744] Rename variable, use normal assignment;

* [NOD-744] Rename variable.
2020-02-13 13:10:07 +02:00
stasatdaglabs
51ff9e2562 [NOD-571] Cover ghostdag in tests where possible (#613)
* [NOD-571] Cover reachabilityInterval split methods.

* [NOD-571] Cover reindexInterval.

* [NOD-571] Cover reachability String() methods.

* [NOD-571] Cover blueAnticoneSize.

* [NOD-571] Remove unnecessary error from setTreeNode.

* [NOD-571] Add TestGHOSTDAGErrors.

* [NOD-571] Use PrepareBlockForTest in TestBlueAnticoneSizeErrors.

* [NOD-571] Use PrepareBlockForTest in TestGHOSTDAGErrors.

* [NOD-571] Add substring checks to TestSplitFractionErrors.

* [NOD-571] Add substring checks to TestSplitExactErrors and TestSplitWithExponentialBiasErrors.

* [NOD-571] Add comments to TestReindexIntervalErrors.

* [NOD-571] Add additional info in some error messages.

* [NOD-571] Fix error messages.
2020-02-09 11:27:10 +02:00
stasatdaglabs
5b8ab63890 [NOD-717] Fix nodes getting stuck in an infinite loop in addrManager.getAddress (#624)
* [NOD-717] Fix nodes getting stuck in an infinite loop in addrManager.getAddress.

* [NOD-717] Rename ResetFailedAttempts -> NotifyConnectionRequestComplete.
2020-02-06 18:17:10 +02:00
Dan Aharoni
3dd7dc4496 [NOD-727] Do not allow delayed blocks from RPC. (#623)
* [NOD-727] Do not allow delayed blocks from RPC.

* [NOD-727] Refactor sentFromRPC -> DisallowDelay

* [NOD-727] Clarify comment; Clarify error message.

* [NOD-727] Change error message.
2020-02-05 11:14:26 +02:00
Ori Newman
d90a08ecfa [NOD-722] Fix processBlockMsg case in blockHandler to send only one response to msg.reply, and rename blockHandler->messageHandler (#622) 2020-02-04 18:10:15 +02:00
Ori Newman
45dc1a3e7b [NOD-545] Remove headers first related logic (#621)
* [NOD-545] Remove headers first related logic

* [NOD-545] Fix tests

* [NOD-545] Change getTopHeadersMaxHeaders to be equal to getHeadersMaxHeaders
2020-02-04 14:54:42 +02:00
Ori Newman
4ffb5daa37 [NOD-622] Fix populateTemplateFromCandidates to sort txsForBlockTemplate.txMasses and txsForBlockTemplate.txFees (#617)
* [NOD-622] Fix populateTemplateFromCandidates to sort txsForBlockTemplate.txMasses and txsForBlockTemplate.txFees

* [NOD-622] Sort transactions in PrepareBlockForTest

* [NOD-622] Remove duplicate append of selected transactions
2020-02-03 13:42:40 +02:00
Ori Newman
b9138b720d [NOD-597] Make BlockIndex clear its dirty entries only after it successfully written them to disk (#620) 2020-02-03 13:39:25 +02:00
Ori Newman
d8954f1339 [NOD-615] Make bluesAnticoneSizes a map with *blockNode as a key (#619) 2020-02-03 12:40:39 +02:00
Ori Newman
eb953286ec [NOD-641] Upgrade to github.com/pkg/errors v0.9.1 and use errors.As where needed (#614)
* [NOD-641] Upgrade to github.com/pkg/errors v0.9.1 and use errors.As where needed

* [NOD-641] Fix find and replace error

* [NOD-641] Use errors.As for error type checking

* [NOD-641] Fix errors.As for pointer types

* [NOD-641] Use errors.As where needed

* [NOD-641] Rename rErr->ruleErr

* [NOD-641] Rename derr->dbErr

* [NOD-641] e->flagsErr where necessary

* [NOD-641] change jerr to more appropriate name

* [NOD-641] Rename cerr->bdRuleErr

* [NOD-641] Rename serr->scriptErr

* [NOD-641] Use errors.Is instead of testutil.AreErrorsEqual in TestNewHashFromStr

* [NOD-641] Rename bdRuleErr->dagRuleErr

* [NOD-641] Rename mErr->msgErr

* [NOD-641] Rename dErr->deserializeErr
2020-02-03 12:38:33 +02:00
Ori Newman
41c8178ad3 [NOD-648] Add TestProcessDelayedBlocks (#612)
* [NOD-648] Add TestProcessDelayedBlocks

* [NOD-648] Add one second to secondsUntilDelayedBlockIsValid to make sure the delayedBlock timestamp will be valid, and add comments

* [NOD-648] Remove redundant import

* [NOD-648] Use fakeTimeSource instead of time.Sleep

* [NOD-648] Rename dag.HaveBlock->dag.IsKnownBlock,  dag.BlockExists->dag.IsInDAG

* [NOD-648] Add comment

* [NOD-641] Rename HaveBlock->IsKnownBlock, BlockExists->IsInDAG
2020-02-03 11:30:03 +02:00
Ori Newman
aa74b51e6f [NOD-687] Remove -gcflags='-l' from all tests (#616) 2020-02-02 15:26:26 +02:00
Mike Zak
f7800eb5c4 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-02-02 15:17:25 +02:00
Ori Newman
44c55900f8 [NOD-715] Replace testDbRoot with os.TempDir() (#611) 2020-01-30 12:54:15 +02:00
Ori Newman
4c0ea78026 [NOD-586] Remove subTreeSize from reachabilityTreeNode (#610)
* [NOD-586] Remove subTreeSize from reachabilityTreeNode

* [NOD-586] Convert else { if { ... } } to else if { ... }
2020-01-30 10:39:53 +02:00
stasatdaglabs
03a93fe51e [NOD-647] Create a default config file even if the sample default config file is missing (#609)
* [NOD-647] Create a default config file even if the sample default config file is missing.

* [NOD-647] Unfancify WriteString().
2020-01-29 17:40:59 +02:00
Mike Zak
eca0514465 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 14:39:22 +02:00
Mike Zak
5daab45947 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 12:25:52 +02:00
Mike Zak
25bdaeed31 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 11:35:21 +02:00
Mike Zak
a3dc2f7da7 Update version to v0.1.2 2020-01-28 10:53:10 +02:00
407 changed files with 12671 additions and 34533 deletions

View File

@@ -4,7 +4,7 @@ Kaspad
Warning: This is pre-alpha software. There's no guarantee anything works.
====
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad)
Kaspad is the reference full node Kaspa implementation written in Go (golang).
@@ -75,5 +75,5 @@ The documentation is a work-in-progress. It is located in the [docs](https://git
## License
Kaspad is licensed under the [copyfree](http://copyfree.org) ISC License.
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).

View File

@@ -717,11 +717,7 @@ func (a *AddrManager) Start() {
// Start the address ticker to save addresses periodically.
a.wg.Add(1)
spawn(a.addressHandler, a.handlePanic)
}
func (a *AddrManager) handlePanic() {
atomic.AddInt32(&a.shutdown, 1)
spawn(a.addressHandler)
}
// Stop gracefully shuts down the address manager by stopping the main handler.

View File

@@ -5,8 +5,14 @@
package addrmgr
import (
"fmt"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/subnetworkid"
"net"
"reflect"
"testing"
"time"
"github.com/pkg/errors"
@@ -108,6 +114,551 @@ func TestStartStop(t *testing.T) {
}
}
func TestAddAddressByIP(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
fmtErr := errors.Errorf("")
addrErr := &net.AddrError{}
var tests = []struct {
addrIP string
err error
}{
{
someIP + ":16111",
nil,
},
{
someIP,
addrErr,
},
{
someIP[:12] + ":8333",
fmtErr,
},
{
someIP + ":abcd",
fmtErr,
},
}
amgr := New("testaddressbyip", nil, nil)
for i, test := range tests {
err := amgr.AddAddressByIP(test.addrIP, nil)
if test.err != nil && err == nil {
t.Errorf("TestAddAddressByIP test %d failed expected an error and got none", i)
continue
}
if test.err == nil && err != nil {
t.Errorf("TestAddAddressByIP test %d failed expected no error and got one", i)
continue
}
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("TestAddAddressByIP test %d failed got %v, want %v", i,
reflect.TypeOf(err), reflect.TypeOf(test.err))
continue
}
}
}
func TestAddLocalAddress(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
var tests = []struct {
address wire.NetAddress
priority AddressPriority
valid bool
}{
{
wire.NetAddress{IP: net.ParseIP("192.168.0.100")},
InterfacePrio,
false,
},
{
wire.NetAddress{IP: net.ParseIP("204.124.1.1")},
InterfacePrio,
true,
},
{
wire.NetAddress{IP: net.ParseIP("204.124.1.1")},
BoundPrio,
true,
},
{
wire.NetAddress{IP: net.ParseIP("::1")},
InterfacePrio,
false,
},
{
wire.NetAddress{IP: net.ParseIP("fe80::1")},
InterfacePrio,
false,
},
{
wire.NetAddress{IP: net.ParseIP("2620:100::1")},
InterfacePrio,
true,
},
}
amgr := New("testaddlocaladdress", nil, nil)
for x, test := range tests {
result := amgr.AddLocalAddress(&test.address, test.priority)
if result == nil && !test.valid {
t.Errorf("TestAddLocalAddress test #%d failed: %s should have "+
"been accepted", x, test.address.IP)
continue
}
if result != nil && test.valid {
t.Errorf("TestAddLocalAddress test #%d failed: %s should not have "+
"been accepted", x, test.address.IP)
continue
}
}
}
func TestAttempt(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("testattempt", lookupFunc, nil)
// Add a new address and get it
err := n.AddAddressByIP(someIP+":8333", nil)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
ka := n.GetAddress()
if !ka.LastAttempt().IsZero() {
t.Errorf("Address should not have attempts, but does")
}
na := ka.NetAddress()
n.Attempt(na)
if ka.LastAttempt().IsZero() {
t.Errorf("Address should have an attempt, but does not")
}
}
func TestConnected(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("testconnected", lookupFunc, nil)
// Add a new address and get it
err := n.AddAddressByIP(someIP+":8333", nil)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
ka := n.GetAddress()
na := ka.NetAddress()
// make it an hour ago
na.Timestamp = time.Unix(time.Now().Add(time.Hour*-1).Unix(), 0)
n.Connected(na)
if !ka.NetAddress().Timestamp.After(na.Timestamp) {
t.Errorf("Address should have a new timestamp, but does not")
}
}
func TestNeedMoreAddresses(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("testneedmoreaddresses", lookupFunc, nil)
addrsToAdd := 1500
b := n.NeedMoreAddresses()
if !b {
t.Errorf("Expected that we need more addresses")
}
addrs := make([]*wire.NetAddress, addrsToAdd)
var err error
for i := 0; i < addrsToAdd; i++ {
s := fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60)
addrs[i], err = n.DeserializeNetAddress(s)
if err != nil {
t.Errorf("Failed to turn %s into an address: %v", s, err)
}
}
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
n.AddAddresses(addrs, srcAddr, nil)
numAddrs := n.TotalNumAddresses()
if numAddrs > addrsToAdd {
t.Errorf("Number of addresses is too many %d vs %d", numAddrs, addrsToAdd)
}
b = n.NeedMoreAddresses()
if b {
t.Errorf("Expected that we don't need more addresses")
}
}
func TestGood(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("testgood", lookupFunc, nil)
addrsToAdd := 64 * 64
addrs := make([]*wire.NetAddress, addrsToAdd)
subnetworkCount := 32
subnetworkIDs := make([]*subnetworkid.SubnetworkID, subnetworkCount)
var err error
for i := 0; i < addrsToAdd; i++ {
s := fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60)
addrs[i], err = n.DeserializeNetAddress(s)
if err != nil {
t.Errorf("Failed to turn %s into an address: %v", s, err)
}
}
for i := 0; i < subnetworkCount; i++ {
subnetworkIDs[i] = &subnetworkid.SubnetworkID{0xff - byte(i)}
}
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
n.AddAddresses(addrs, srcAddr, nil)
for i, addr := range addrs {
n.Good(addr, subnetworkIDs[i%subnetworkCount])
}
numAddrs := n.TotalNumAddresses()
if numAddrs >= addrsToAdd {
t.Errorf("Number of addresses is too many: %d vs %d", numAddrs, addrsToAdd)
}
numCache := len(n.AddressCache(true, nil))
if numCache == 0 || numCache >= numAddrs/4 {
t.Errorf("Number of addresses in cache: got %d, want positive and less than %d",
numCache, numAddrs/4)
}
for i := 0; i < subnetworkCount; i++ {
numCache = len(n.AddressCache(false, subnetworkIDs[i]))
if numCache == 0 || numCache >= numAddrs/subnetworkCount {
t.Errorf("Number of addresses in subnetwork cache: got %d, want positive and less than %d",
numCache, numAddrs/4/subnetworkCount)
}
}
}
func TestGoodChangeSubnetworkID(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
n := New("test_good_change_subnetwork_id", lookupFunc, nil)
addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
addrKey := NetAddressKey(addr)
srcAddr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
oldSubnetwork := subnetworkid.SubnetworkIDNative
n.AddAddress(addr, srcAddr, oldSubnetwork)
n.Good(addr, oldSubnetwork)
// make sure address was saved to addrIndex under oldSubnetwork
ka := n.find(addr)
if ka == nil {
t.Fatalf("Address was not found after first time .Good called")
}
if !ka.SubnetworkID().IsEqual(oldSubnetwork) {
t.Fatalf("Address index did not point to oldSubnetwork")
}
// make sure address was added to correct bucket under oldSubnetwork
bucket := n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)]
wasFound := false
for e := bucket.Front(); e != nil; e = e.Next() {
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
wasFound = true
}
}
if !wasFound {
t.Fatalf("Address was not found in the correct bucket in oldSubnetwork")
}
// now call .Good again with a different subnetwork
newSubnetwork := subnetworkid.SubnetworkIDRegistry
n.Good(addr, newSubnetwork)
// make sure address was updated in addrIndex under newSubnetwork
ka = n.find(addr)
if ka == nil {
t.Fatalf("Address was not found after second time .Good called")
}
if !ka.SubnetworkID().IsEqual(newSubnetwork) {
t.Fatalf("Address index did not point to newSubnetwork")
}
// make sure address was removed from bucket under oldSubnetwork
bucket = n.addrTried[*oldSubnetwork][n.getTriedBucket(addr)]
wasFound = false
for e := bucket.Front(); e != nil; e = e.Next() {
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
wasFound = true
}
}
if wasFound {
t.Fatalf("Address was not removed from bucket in oldSubnetwork")
}
// make sure address was added to correct bucket under newSubnetwork
bucket = n.addrTried[*newSubnetwork][n.getTriedBucket(addr)]
wasFound = false
for e := bucket.Front(); e != nil; e = e.Next() {
if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey {
wasFound = true
}
}
if !wasFound {
t.Fatalf("Address was not found in the correct bucket in newSubnetwork")
}
}
func TestGetAddress(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
localSubnetworkID := &subnetworkid.SubnetworkID{0xff}
n := New("testgetaddress", lookupFunc, localSubnetworkID)
// Get an address from an empty set (should error)
if rv := n.GetAddress(); rv != nil {
t.Errorf("GetAddress failed: got: %v want: %v\n", rv, nil)
}
// Add a new address and get it
err := n.AddAddressByIP(someIP+":8332", localSubnetworkID)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
ka := n.GetAddress()
if ka == nil {
t.Fatalf("Did not get an address where there is one in the pool")
}
n.Attempt(ka.NetAddress())
// Checks that we don't get it if we find that it has other subnetwork ID than expected.
actualSubnetworkID := &subnetworkid.SubnetworkID{0xfe}
n.Good(ka.NetAddress(), actualSubnetworkID)
ka = n.GetAddress()
if ka != nil {
t.Errorf("Didn't expect to get an address because there shouldn't be any address from subnetwork ID %s or nil", localSubnetworkID)
}
// Checks that the total number of addresses incremented although the new address is not full node or a partial node of the same subnetwork as the local node.
numAddrs := n.TotalNumAddresses()
if numAddrs != 1 {
t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1)
}
// Now we repeat the same process, but now the address has the expected subnetwork ID.
// Add a new address and get it
err = n.AddAddressByIP(someIP+":8333", localSubnetworkID)
if err != nil {
t.Fatalf("Adding address failed: %v", err)
}
ka = n.GetAddress()
if ka == nil {
t.Fatalf("Did not get an address where there is one in the pool")
}
if ka.NetAddress().IP.String() != someIP {
t.Errorf("Wrong IP: got %v, want %v", ka.NetAddress().IP.String(), someIP)
}
if !ka.SubnetworkID().IsEqual(localSubnetworkID) {
t.Errorf("Wrong Subnetwork ID: got %v, want %v", *ka.SubnetworkID(), localSubnetworkID)
}
n.Attempt(ka.NetAddress())
// Mark this as a good address and get it
n.Good(ka.NetAddress(), localSubnetworkID)
ka = n.GetAddress()
if ka == nil {
t.Fatalf("Did not get an address where there is one in the pool")
}
if ka.NetAddress().IP.String() != someIP {
t.Errorf("Wrong IP: got %v, want %v", ka.NetAddress().IP.String(), someIP)
}
if *ka.SubnetworkID() != *localSubnetworkID {
t.Errorf("Wrong Subnetwork ID: got %v, want %v", ka.SubnetworkID(), localSubnetworkID)
}
numAddrs = n.TotalNumAddresses()
if numAddrs != 2 {
t.Errorf("Wrong number of addresses: got %d, want %d", numAddrs, 1)
}
}
func TestGetBestLocalAddress(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
localAddrs := []wire.NetAddress{
{IP: net.ParseIP("192.168.0.100")},
{IP: net.ParseIP("::1")},
{IP: net.ParseIP("fe80::1")},
{IP: net.ParseIP("2001:470::1")},
}
var tests = []struct {
remoteAddr wire.NetAddress
want0 wire.NetAddress
want1 wire.NetAddress
want2 wire.NetAddress
want3 wire.NetAddress
}{
{
// Remote connection from public IPv4
wire.NetAddress{IP: net.ParseIP("204.124.8.1")},
wire.NetAddress{IP: net.IPv4zero},
wire.NetAddress{IP: net.IPv4zero},
wire.NetAddress{IP: net.ParseIP("204.124.8.100")},
wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")},
},
{
// Remote connection from private IPv4
wire.NetAddress{IP: net.ParseIP("172.16.0.254")},
wire.NetAddress{IP: net.IPv4zero},
wire.NetAddress{IP: net.IPv4zero},
wire.NetAddress{IP: net.IPv4zero},
wire.NetAddress{IP: net.IPv4zero},
},
{
// Remote connection from public IPv6
wire.NetAddress{IP: net.ParseIP("2602:100:abcd::102")},
wire.NetAddress{IP: net.IPv6zero},
wire.NetAddress{IP: net.ParseIP("2001:470::1")},
wire.NetAddress{IP: net.ParseIP("2001:470::1")},
wire.NetAddress{IP: net.ParseIP("2001:470::1")},
},
/* XXX
{
// Remote connection from Tor
wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43::100")},
wire.NetAddress{IP: net.IPv4zero},
wire.NetAddress{IP: net.ParseIP("204.124.8.100")},
wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")},
},
*/
}
amgr := New("testgetbestlocaladdress", nil, nil)
// Test against default when there's no address
for x, test := range tests {
got := amgr.GetBestLocalAddress(&test.remoteAddr)
if !test.want0.IP.Equal(got.IP) {
t.Errorf("TestGetBestLocalAddress test1 #%d failed for remote address %s: want %s got %s",
x, test.remoteAddr.IP, test.want1.IP, got.IP)
continue
}
}
for _, localAddr := range localAddrs {
amgr.AddLocalAddress(&localAddr, InterfacePrio)
}
// Test against want1
for x, test := range tests {
got := amgr.GetBestLocalAddress(&test.remoteAddr)
if !test.want1.IP.Equal(got.IP) {
t.Errorf("TestGetBestLocalAddress test1 #%d failed for remote address %s: want %s got %s",
x, test.remoteAddr.IP, test.want1.IP, got.IP)
continue
}
}
// Add a public IP to the list of local addresses.
localAddr := wire.NetAddress{IP: net.ParseIP("204.124.8.100")}
amgr.AddLocalAddress(&localAddr, InterfacePrio)
// Test against want2
for x, test := range tests {
got := amgr.GetBestLocalAddress(&test.remoteAddr)
if !test.want2.IP.Equal(got.IP) {
t.Errorf("TestGetBestLocalAddress test2 #%d failed for remote address %s: want %s got %s",
x, test.remoteAddr.IP, test.want2.IP, got.IP)
continue
}
}
/*
// Add a Tor generated IP address
localAddr = wire.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")}
amgr.AddLocalAddress(&localAddr, ManualPrio)
// Test against want3
for x, test := range tests {
got := amgr.GetBestLocalAddress(&test.remoteAddr)
if !test.want3.IP.Equal(got.IP) {
t.Errorf("TestGetBestLocalAddress test3 #%d failed for remote address %s: want %s got %s",
x, test.remoteAddr.IP, test.want3.IP, got.IP)
continue
}
}
*/
}
func TestNetAddressKey(t *testing.T) {
addNaTests()

View File

@@ -10,4 +10,4 @@ import (
)
var log, _ = logger.Get(logger.SubsystemTags.ADXR)
var spawn = panics.GoroutineWrapperFuncWithPanicHandler(log)
var spawn = panics.GoroutineWrapperFunc(log)

222
addrmgr/network_test.go Normal file
View File

@@ -0,0 +1,222 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addrmgr_test
import (
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"net"
"testing"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/wire"
)
// TestIPTypes ensures the various functions which determine the type of an IP
// address based on RFCs work as intended.
func TestIPTypes(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
type ipTest struct {
in wire.NetAddress
rfc1918 bool
rfc2544 bool
rfc3849 bool
rfc3927 bool
rfc3964 bool
rfc4193 bool
rfc4380 bool
rfc4843 bool
rfc4862 bool
rfc5737 bool
rfc6052 bool
rfc6145 bool
rfc6598 bool
local bool
valid bool
routable bool
}
newIPTest := func(ip string, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964,
rfc4193, rfc4380, rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598,
local, valid, routable bool) ipTest {
nip := net.ParseIP(ip)
na := *wire.NewNetAddressIPPort(nip, 16111, wire.SFNodeNetwork)
test := ipTest{na, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964, rfc4193, rfc4380,
rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598, local, valid, routable}
return test
}
tests := []ipTest{
newIPTest("10.255.255.255", true, false, false, false, false, false,
false, false, false, false, false, false, false, false, true, false),
newIPTest("192.168.0.1", true, false, false, false, false, false,
false, false, false, false, false, false, false, false, true, false),
newIPTest("172.31.255.1", true, false, false, false, false, false,
false, false, false, false, false, false, false, false, true, false),
newIPTest("172.32.1.1", false, false, false, false, false, false, false, false,
false, false, false, false, false, false, true, true),
newIPTest("169.254.250.120", false, false, false, true, false, false,
false, false, false, false, false, false, false, false, true, false),
newIPTest("0.0.0.0", false, false, false, false, false, false, false,
false, false, false, false, false, false, true, false, false),
newIPTest("255.255.255.255", false, false, false, false, false, false,
false, false, false, false, false, false, false, false, false, false),
newIPTest("127.0.0.1", false, false, false, false, false, false,
false, false, false, false, false, false, false, true, true, false),
newIPTest("fd00:dead::1", false, false, false, false, false, true,
false, false, false, false, false, false, false, false, true, false),
newIPTest("2001::1", false, false, false, false, false, false,
true, false, false, false, false, false, false, false, true, true),
newIPTest("2001:10:abcd::1:1", false, false, false, false, false, false,
false, true, false, false, false, false, false, false, true, false),
newIPTest("fe80::1", false, false, false, false, false, false,
false, false, true, false, false, false, false, false, true, false),
newIPTest("fe80:1::1", false, false, false, false, false, false,
false, false, false, false, false, false, false, false, true, true),
newIPTest("64:ff9b::1", false, false, false, false, false, false,
false, false, false, false, true, false, false, false, true, true),
newIPTest("::ffff:abcd:ef12:1", false, false, false, false, false, false,
false, false, false, false, false, false, false, false, true, true),
newIPTest("::1", false, false, false, false, false, false, false, false,
false, false, false, false, false, true, true, false),
newIPTest("198.18.0.1", false, true, false, false, false, false, false,
false, false, false, false, false, false, false, true, false),
newIPTest("100.127.255.1", false, false, false, false, false, false, false,
false, false, false, false, false, true, false, true, false),
newIPTest("203.0.113.1", false, false, false, false, false, false, false,
false, false, false, false, false, false, false, true, false),
}
t.Logf("Running %d tests", len(tests))
for _, test := range tests {
if rv := addrmgr.IsRFC1918(&test.in); rv != test.rfc1918 {
t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc1918)
}
if rv := addrmgr.IsRFC3849(&test.in); rv != test.rfc3849 {
t.Errorf("IsRFC3849 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3849)
}
if rv := addrmgr.IsRFC3927(&test.in); rv != test.rfc3927 {
t.Errorf("IsRFC3927 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3927)
}
if rv := addrmgr.IsRFC3964(&test.in); rv != test.rfc3964 {
t.Errorf("IsRFC3964 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3964)
}
if rv := addrmgr.IsRFC4193(&test.in); rv != test.rfc4193 {
t.Errorf("IsRFC4193 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4193)
}
if rv := addrmgr.IsRFC4380(&test.in); rv != test.rfc4380 {
t.Errorf("IsRFC4380 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4380)
}
if rv := addrmgr.IsRFC4843(&test.in); rv != test.rfc4843 {
t.Errorf("IsRFC4843 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4843)
}
if rv := addrmgr.IsRFC4862(&test.in); rv != test.rfc4862 {
t.Errorf("IsRFC4862 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4862)
}
if rv := addrmgr.IsRFC6052(&test.in); rv != test.rfc6052 {
t.Errorf("isRFC6052 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6052)
}
if rv := addrmgr.IsRFC6145(&test.in); rv != test.rfc6145 {
t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6145)
}
if rv := addrmgr.IsLocal(&test.in); rv != test.local {
t.Errorf("IsLocal %s\n got: %v want: %v", test.in.IP, rv, test.local)
}
if rv := addrmgr.IsValid(&test.in); rv != test.valid {
t.Errorf("IsValid %s\n got: %v want: %v", test.in.IP, rv, test.valid)
}
if rv := addrmgr.IsRoutable(&test.in); rv != test.routable {
t.Errorf("IsRoutable %s\n got: %v want: %v", test.in.IP, rv, test.routable)
}
}
}
// TestGroupKey tests the GroupKey function to ensure it properly groups various
// IP addresses.
func TestGroupKey(t *testing.T) {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
defer config.SetActiveConfig(originalActiveCfg)
tests := []struct {
name string
ip string
expected string
}{
// Local addresses.
{name: "ipv4 localhost", ip: "127.0.0.1", expected: "local"},
{name: "ipv6 localhost", ip: "::1", expected: "local"},
{name: "ipv4 zero", ip: "0.0.0.0", expected: "local"},
{name: "ipv4 first octet zero", ip: "0.1.2.3", expected: "local"},
// Unroutable addresses.
{name: "ipv4 invalid bcast", ip: "255.255.255.255", expected: "unroutable"},
{name: "ipv4 rfc1918 10/8", ip: "10.1.2.3", expected: "unroutable"},
{name: "ipv4 rfc1918 172.16/12", ip: "172.16.1.2", expected: "unroutable"},
{name: "ipv4 rfc1918 192.168/16", ip: "192.168.1.2", expected: "unroutable"},
{name: "ipv6 rfc3849 2001:db8::/32", ip: "2001:db8::1234", expected: "unroutable"},
{name: "ipv4 rfc3927 169.254/16", ip: "169.254.1.2", expected: "unroutable"},
{name: "ipv6 rfc4193 fc00::/7", ip: "fc00::1234", expected: "unroutable"},
{name: "ipv6 rfc4843 2001:10::/28", ip: "2001:10::1234", expected: "unroutable"},
{name: "ipv6 rfc4862 fe80::/64", ip: "fe80::1234", expected: "unroutable"},
// IPv4 normal.
{name: "ipv4 normal class a", ip: "12.1.2.3", expected: "12.1.0.0"},
{name: "ipv4 normal class b", ip: "173.1.2.3", expected: "173.1.0.0"},
{name: "ipv4 normal class c", ip: "196.1.2.3", expected: "196.1.0.0"},
// IPv6/IPv4 translations.
{name: "ipv6 rfc3964 with ipv4 encap", ip: "2002:0c01:0203::", expected: "12.1.0.0"},
{name: "ipv6 rfc4380 toredo ipv4", ip: "2001:0:1234::f3fe:fdfc", expected: "12.1.0.0"},
{name: "ipv6 rfc6052 well-known prefix with ipv4", ip: "64:ff9b::0c01:0203", expected: "12.1.0.0"},
{name: "ipv6 rfc6145 translated ipv4", ip: "::ffff:0:0c01:0203", expected: "12.1.0.0"},
// Tor.
{name: "ipv6 tor onioncat", ip: "fd87:d87e:eb43:1234::5678", expected: "unroutable"},
{name: "ipv6 tor onioncat 2", ip: "fd87:d87e:eb43:1245::6789", expected: "unroutable"},
{name: "ipv6 tor onioncat 3", ip: "fd87:d87e:eb43:1345::6789", expected: "unroutable"},
// IPv6 normal.
{name: "ipv6 normal", ip: "2602:100::1", expected: "2602:100::"},
{name: "ipv6 normal 2", ip: "2602:0100::1234", expected: "2602:100::"},
{name: "ipv6 hurricane electric", ip: "2001:470:1f10:a1::2", expected: "2001:470:1000::"},
{name: "ipv6 hurricane electric 2", ip: "2001:0470:1f10:a1::2", expected: "2001:470:1000::"},
}
for i, test := range tests {
nip := net.ParseIP(test.ip)
na := *wire.NewNetAddressIPPort(nip, 8333, wire.SFNodeNetwork)
if key := addrmgr.GroupKey(&na); key != test.expected {
t.Errorf("TestGroupKey #%d (%s): unexpected group key "+
"- got '%s', want '%s'", i, test.name,
key, test.expected)
}
}
}

View File

@@ -1,7 +1,7 @@
blockchain
==========
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/blockchain)
Package blockdag implements Kaspa block handling, organization of the blockDAG,

View File

@@ -6,16 +6,27 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
)
func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error {
blockHeader := &block.MsgBlock().Header
newNode, _ := dag.newBlockNode(blockHeader, newSet())
newNode, _ := dag.newBlockNode(blockHeader, newBlockSet())
newNode.status = statusInvalidAncestor
dag.index.AddNode(newNode)
return dag.index.flushToDB()
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
err = dag.index.flushToDB(dbTx)
if err != nil {
return err
}
return dbTx.Commit()
}
// maybeAcceptBlock potentially accepts a block into the block DAG. It
@@ -30,7 +41,8 @@ func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error
func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error {
parents, err := lookupParentNodes(block, dag)
if err != nil {
if rErr, ok := err.(RuleError); ok && rErr.ErrorCode == ErrInvalidAncestorBlock {
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); ok && ruleErr.ErrorCode == ErrInvalidAncestorBlock {
err := dag.addNodeToIndexWithInvalidAncestor(block)
if err != nil {
return err
@@ -60,13 +72,26 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
// expensive connection logic. It also has some other nice properties
// such as making blocks that never become part of the DAG or
// blocks that fail to connect available for further analysis.
err = dag.db.Update(func(dbTx database.Tx) error {
err := dbStoreBlock(dbTx, block)
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
blockExists, err := dbaccess.HasBlock(dbTx, block.Hash())
if err != nil {
return err
}
if !blockExists {
err := storeBlock(dbTx, block)
if err != nil {
return err
}
return dag.index.flushToDBWithTx(dbTx)
})
}
err = dag.index.flushToDB(dbTx)
if err != nil {
return err
}
err = dbTx.Commit()
if err != nil {
return err
}
@@ -112,14 +137,14 @@ func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error)
header := block.MsgBlock().Header
parentHashes := header.ParentHashes
nodes := newSet()
nodes := newBlockSet()
for _, parentHash := range parentHashes {
node := blockDAG.index.LookupNode(parentHash)
if node == nil {
str := fmt.Sprintf("parent block %s is unknown", parentHashes)
str := fmt.Sprintf("parent block %s is unknown", parentHash)
return nil, ruleError(ErrParentBlockUnknown, str)
} else if blockDAG.index.NodeStatus(node).KnownInvalid() {
str := fmt.Sprintf("parent block %s is known to be invalid", parentHashes)
str := fmt.Sprintf("parent block %s is known to be invalid", parentHash)
return nil, ruleError(ErrInvalidAncestorBlock, str)
}

View File

@@ -1,6 +1,7 @@
package blockdag
import (
"errors"
"path/filepath"
"testing"
@@ -9,7 +10,7 @@ import (
func TestMaybeAcceptBlockErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -33,8 +34,8 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Expected: %s, got: <nil>", ErrParentBlockUnknown)
}
ruleErr, ok := err.(RuleError)
if !ok {
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrParentBlockUnknown {
@@ -71,8 +72,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
"Expected: %s, got: <nil>", ErrInvalidAncestorBlock)
}
ruleErr, ok = err.(RuleError)
if !ok {
if ok := errors.As(err, &ruleErr); !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
"Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrInvalidAncestorBlock {
@@ -91,8 +91,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
"Expected: %s, got: <nil>", ErrUnexpectedDifficulty)
}
ruleErr, ok = err.(RuleError)
if !ok {
if ok := errors.As(err, &ruleErr); !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
"Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrUnexpectedDifficulty {

View File

@@ -10,7 +10,7 @@ import (
// TestBlockHeap tests pushing, popping, and determining the length of the heap.
func TestBlockHeap(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestBlockHeap", Config{
dag, teardownFunc, err := DAGSetup("TestBlockHeap", true, Config{
DAGParams: &dagconfig.MainnetParams,
})
if err != nil {
@@ -19,12 +19,12 @@ func TestBlockHeap(t *testing.T) {
defer teardownFunc()
block0Header := dagconfig.MainnetParams.GenesisBlock.Header
block0, _ := dag.newBlockNode(&block0Header, newSet())
block0, _ := dag.newBlockNode(&block0Header, newBlockSet())
block100000Header := Block100000.Header
block100000, _ := dag.newBlockNode(&block100000Header, setFromSlice(block0))
block100000, _ := dag.newBlockNode(&block100000Header, blockSetFromSlice(block0))
block0smallHash, _ := dag.newBlockNode(&block0Header, newSet())
block0smallHash, _ := dag.newBlockNode(&block0Header, newBlockSet())
block0smallHash.hash = &daghash.Hash{}
tests := []struct {

View File

@@ -1,136 +0,0 @@
package blockdag
import (
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
var (
// idByHashIndexBucketName is the name of the db bucket used to house
// the block hash -> block id index.
idByHashIndexBucketName = []byte("idbyhashidx")
// hashByIDIndexBucketName is the name of the db bucket used to house
// the block id -> block hash index.
hashByIDIndexBucketName = []byte("hashbyididx")
currentBlockIDKey = []byte("currentblockid")
)
// -----------------------------------------------------------------------------
// This is a mapping between block hashes and unique IDs. The ID
// is simply a sequentially incremented uint64 that is used instead of block hash
// for the indexers. This is useful because it is only 8 bytes versus 32 bytes
// hashes and thus saves a ton of space when a block is referenced in an index.
// It consists of three buckets: the first bucket maps the hash of each
// block to the unique ID and the second maps that ID back to the block hash.
// The third bucket contains the last received block ID, and is used
// when starting the node to check that the enabled indexes are up to date
// with the latest received block, and if not, initiate recovery process.
//
// The serialized format for keys and values in the block hash to ID bucket is:
// <hash> = <ID>
//
// Field Type Size
// hash daghash.Hash 32 bytes
// ID uint64 8 bytes
// -----
// Total: 40 bytes
//
// The serialized format for keys and values in the ID to block hash bucket is:
// <ID> = <hash>
//
// Field Type Size
// ID uint64 8 bytes
// hash daghash.Hash 32 bytes
// -----
// Total: 40 bytes
//
// -----------------------------------------------------------------------------
const blockIDSize = 8 // 8 bytes for block ID
// DBFetchBlockIDByHash uses an existing database transaction to retrieve the
// block id for the provided hash from the index.
func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error) {
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
serializedID := hashIndex.Get(hash[:])
if serializedID == nil {
return 0, errors.Errorf("no entry in the block ID index for block with hash %s", hash)
}
return DeserializeBlockID(serializedID), nil
}
// DBFetchBlockHashBySerializedID uses an existing database transaction to
// retrieve the hash for the provided serialized block id from the index.
func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
hashBytes := idIndex.Get(serializedID)
if hashBytes == nil {
return nil, errors.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
}
var hash daghash.Hash
copy(hash[:], hashBytes)
return &hash, nil
}
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
// the index entries for the hash to id and id to hash mappings for the provided
// values.
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, serializedID []byte) error {
// Add the block hash to ID mapping to the index.
meta := dbTx.Metadata()
hashIndex := meta.Bucket(idByHashIndexBucketName)
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
return err
}
// Add the block ID to hash mapping to the index.
idIndex := meta.Bucket(hashByIDIndexBucketName)
return idIndex.Put(serializedID[:], hash[:])
}
// DBFetchCurrentBlockID returns the last known block ID.
func DBFetchCurrentBlockID(dbTx database.Tx) uint64 {
serializedID := dbTx.Metadata().Get(currentBlockIDKey)
if serializedID == nil {
return 0
}
return DeserializeBlockID(serializedID)
}
// DeserializeBlockID returns a deserialized block id
func DeserializeBlockID(serializedID []byte) uint64 {
return byteOrder.Uint64(serializedID)
}
// SerializeBlockID returns a serialized block id
func SerializeBlockID(blockID uint64) []byte {
serializedBlockID := make([]byte, blockIDSize)
byteOrder.PutUint64(serializedBlockID, blockID)
return serializedBlockID
}
// DBFetchBlockHashByID uses an existing database transaction to retrieve the
// hash for the provided block id from the index.
func DBFetchBlockHashByID(dbTx database.Tx, id uint64) (*daghash.Hash, error) {
return DBFetchBlockHashBySerializedID(dbTx, SerializeBlockID(id))
}
func createBlockID(dbTx database.Tx, blockHash *daghash.Hash) (uint64, error) {
currentBlockID := DBFetchCurrentBlockID(dbTx)
newBlockID := currentBlockID + 1
serializedNewBlockID := SerializeBlockID(newBlockID)
err := dbTx.Metadata().Put(currentBlockIDKey, serializedNewBlockID)
if err != nil {
return 0, err
}
err = dbPutBlockIDIndexEntry(dbTx, blockHash, serializedNewBlockID)
if err != nil {
return 0, err
}
return newBlockID, nil
}

View File

@@ -5,10 +5,10 @@
package blockdag
import (
"github.com/kaspanet/kaspad/dbaccess"
"sync"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
@@ -18,7 +18,6 @@ type blockIndex struct {
// The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a
// separate mutex.
db database.DB
dagParams *dagconfig.Params
sync.RWMutex
@@ -29,9 +28,8 @@ type blockIndex struct {
// newBlockIndex returns a new empty instance of a block index. The index will
// be dynamically populated as block nodes are loaded from the database and
// manually added.
func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
func newBlockIndex(dagParams *dagconfig.Params) *blockIndex {
return &blockIndex{
db: db,
dagParams: dagParams,
index: make(map[daghash.Hash]*blockNode),
dirty: make(map[*blockNode]struct{}),
@@ -43,8 +41,8 @@ func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
// This function is safe for concurrent access.
func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool {
bi.RLock()
defer bi.RUnlock()
_, hasBlock := bi.index[*hash]
bi.RUnlock()
return hasBlock
}
@@ -54,8 +52,8 @@ func (bi *blockIndex) HaveBlock(hash *daghash.Hash) bool {
// This function is safe for concurrent access.
func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode {
bi.RLock()
defer bi.RUnlock()
node := bi.index[*hash]
bi.RUnlock()
return node
}
@@ -65,9 +63,9 @@ func (bi *blockIndex) LookupNode(hash *daghash.Hash) *blockNode {
// This function is safe for concurrent access.
func (bi *blockIndex) AddNode(node *blockNode) {
bi.Lock()
defer bi.Unlock()
bi.addNode(node)
bi.dirty[node] = struct{}{}
bi.Unlock()
}
// addNode adds the provided node to the block index, but does not mark it as
@@ -83,8 +81,8 @@ func (bi *blockIndex) addNode(node *blockNode) {
// This function is safe for concurrent access.
func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
bi.RLock()
defer bi.RUnlock()
status := node.status
bi.RUnlock()
return status
}
@@ -95,9 +93,9 @@ func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus {
// This function is safe for concurrent access.
func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
bi.Lock()
defer bi.Unlock()
node.status |= flags
bi.dirty[node] = struct{}{}
bi.Unlock()
}
// UnsetStatusFlags flips the provided status flags on the block node to off,
@@ -106,22 +104,13 @@ func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) {
// This function is safe for concurrent access.
func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
bi.Lock()
defer bi.Unlock()
node.status &^= flags
bi.dirty[node] = struct{}{}
bi.Unlock()
}
// flushToDB writes all dirty block nodes to the database. If all writes
// succeed, this clears the dirty set.
func (bi *blockIndex) flushToDB() error {
return bi.db.Update(func(dbTx database.Tx) error {
return bi.flushToDBWithTx(dbTx)
})
}
// flushToDBWithTx writes all dirty block nodes to the database. If all
// writes succeed, this clears the dirty set.
func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
// flushToDB writes all dirty block nodes to the database.
func (bi *blockIndex) flushToDB(dbContext *dbaccess.TxContext) error {
bi.Lock()
defer bi.Unlock()
if len(bi.dirty) == 0 {
@@ -129,13 +118,19 @@ func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
}
for node := range bi.dirty {
err := dbStoreBlockNode(dbTx, node)
serializedBlockNode, err := serializeBlockNode(node)
if err != nil {
return err
}
key := blockIndexKey(node.hash, node.blueScore)
err = dbaccess.StoreIndexBlock(dbContext, key, serializedBlockNode)
if err != nil {
return err
}
}
// If write was successful, clear the dirty set.
bi.dirty = make(map[*blockNode]struct{})
return nil
}
func (bi *blockIndex) clearDirtyEntries() {
bi.dirty = make(map[*blockNode]struct{})
}

View File

@@ -10,7 +10,7 @@ import (
func TestAncestorErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", Config{
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -18,7 +18,7 @@ func TestAncestorErrors(t *testing.T) {
}
defer teardownFunc()
node := newTestNode(dag, newSet(), int32(0x10000000), 0, time.Unix(0, 0))
node := newTestNode(dag, newBlockSet(), int32(0x10000000), 0, time.Unix(0, 0))
node.blueScore = 2
ancestor := node.SelectedAncestor(3)
if ancestor != nil {

View File

@@ -31,11 +31,6 @@ const (
// statusInvalidAncestor indicates that one of the block's ancestors has
// has failed validation, thus the block is also invalid.
statusInvalidAncestor
// statusNone indicates that the block has no validation state flags set.
//
// NOTE: This must be defined last in order to avoid influencing iota.
statusNone blockStatus = 0
)
// KnownValid returns whether the block is known to be valid. This will return
@@ -80,7 +75,7 @@ type blockNode struct {
// bluesAnticoneSizes is a map holding the set of blues affected by this block and their
// modified blue anticone size.
bluesAnticoneSizes map[daghash.Hash]dagconfig.KType
bluesAnticoneSizes map[*blockNode]dagconfig.KType
// hash is the double sha 256 of the block.
hash *daghash.Hash
@@ -115,8 +110,8 @@ func (dag *BlockDAG) newBlockNode(blockHeader *wire.BlockHeader, parents blockSe
parents: parents,
children: make(blockSet),
blueScore: math.MaxUint64, // Initialized to the max value to avoid collisions with the genesis block
timestamp: dag.AdjustedTime().Unix(),
bluesAnticoneSizes: make(map[daghash.Hash]dagconfig.KType),
timestamp: dag.Now().Unix(),
bluesAnticoneSizes: make(map[*blockNode]dagconfig.KType),
}
// blockHeader is nil only for the virtual block
@@ -184,7 +179,7 @@ func (node *blockNode) Header() *wire.BlockHeader {
//
// This function is safe for concurrent access.
func (node *blockNode) SelectedAncestor(blueScore uint64) *blockNode {
if blueScore < 0 || blueScore > node.blueScore {
if blueScore > node.blueScore {
return nil
}

View File

@@ -9,7 +9,7 @@ import (
// This test is to ensure the size BlueAnticoneSizesSize is serialized to the size of KType.
// We verify that by serializing and deserializing the block while making sure that we stay within the expected range.
func TestBlueAnticoneSizesSize(t *testing.T) {
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", Config{
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -25,16 +25,17 @@ func TestBlueAnticoneSizesSize(t *testing.T) {
}
blockHeader := dagconfig.SimnetParams.GenesisBlock.Header
node, _ := dag.newBlockNode(&blockHeader, newSet())
hash := daghash.Hash{1}
node, _ := dag.newBlockNode(&blockHeader, newBlockSet())
fakeBlue := &blockNode{hash: &daghash.Hash{1}}
dag.index.AddNode(fakeBlue)
// Setting maxKType to maximum value of KType.
// As we verify above that KType is unsigned we can be sure that maxKType is indeed the maximum value of KType.
maxKType := ^dagconfig.KType(0)
node.bluesAnticoneSizes[hash] = maxKType
node.bluesAnticoneSizes[fakeBlue] = maxKType
serializedNode, _ := serializeBlockNode(node)
deserializedNode, _ := dag.deserializeBlockNode(serializedNode)
if deserializedNode.bluesAnticoneSizes[hash] != maxKType {
if deserializedNode.bluesAnticoneSizes[fakeBlue] != maxKType {
t.Fatalf("TestBlueAnticoneSizesSize: BlueAnticoneSize should not change when deserializing. Expected: %v but got %v",
maxKType, deserializedNode.bluesAnticoneSizes[hash])
maxKType, deserializedNode.bluesAnticoneSizes[fakeBlue])
}
}

View File

@@ -9,14 +9,14 @@ import (
// blockSet implements a basic unsorted set of blocks
type blockSet map[*blockNode]struct{}
// newSet creates a new, empty BlockSet
func newSet() blockSet {
// newBlockSet creates a new, empty BlockSet
func newBlockSet() blockSet {
return map[*blockNode]struct{}{}
}
// setFromSlice converts a slice of blockNodes into an unordered set represented as map
func setFromSlice(nodes ...*blockNode) blockSet {
set := newSet()
// blockSetFromSlice converts a slice of blockNodes into an unordered set represented as map
func blockSetFromSlice(nodes ...*blockNode) blockSet {
set := newBlockSet()
for _, node := range nodes {
set.add(node)
}
@@ -36,7 +36,7 @@ func (bs blockSet) remove(node *blockNode) {
// clone clones thie block set
func (bs blockSet) clone() blockSet {
clone := newSet()
clone := newBlockSet()
for node := range bs {
clone.add(node)
}
@@ -45,7 +45,7 @@ func (bs blockSet) clone() blockSet {
// subtract returns the difference between the BlockSet and another BlockSet
func (bs blockSet) subtract(other blockSet) blockSet {
diff := newSet()
diff := newBlockSet()
for node := range bs {
if !other.contains(node) {
diff.add(node)
@@ -102,17 +102,6 @@ func (bs blockSet) String() string {
return strings.Join(nodeStrs, ",")
}
// anyChildInSet returns true iff any child of node is contained within this set
func (bs blockSet) anyChildInSet(node *blockNode) bool {
for child := range node.children {
if bs.contains(child) {
return true
}
}
return false
}
func (bs blockSet) bluest() *blockNode {
var bluestNode *blockNode
var maxScore uint64

View File

@@ -8,7 +8,7 @@ import (
)
func TestHashes(t *testing.T) {
bs := setFromSlice(
bs := blockSetFromSlice(
&blockNode{
hash: &daghash.Hash{3},
},
@@ -49,33 +49,33 @@ func TestBlockSetSubtract(t *testing.T) {
}{
{
name: "both sets empty",
setA: setFromSlice(),
setB: setFromSlice(),
expectedResult: setFromSlice(),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(),
},
{
name: "subtract an empty set",
setA: setFromSlice(node1),
setB: setFromSlice(),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(node1),
},
{
name: "subtract from empty set",
setA: setFromSlice(),
setB: setFromSlice(node1),
expectedResult: setFromSlice(),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(),
},
{
name: "subtract unrelated set",
setA: setFromSlice(node1),
setB: setFromSlice(node2),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1),
setB: blockSetFromSlice(node2),
expectedResult: blockSetFromSlice(node1),
},
{
name: "typical case",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node2, node3),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node2, node3),
expectedResult: blockSetFromSlice(node1),
},
}
@@ -101,33 +101,33 @@ func TestBlockSetAddSet(t *testing.T) {
}{
{
name: "both sets empty",
setA: setFromSlice(),
setB: setFromSlice(),
expectedResult: setFromSlice(),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(),
},
{
name: "add an empty set",
setA: setFromSlice(node1),
setB: setFromSlice(),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(node1),
},
{
name: "add to empty set",
setA: setFromSlice(),
setB: setFromSlice(node1),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(node1),
},
{
name: "add already added member",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node1),
expectedResult: setFromSlice(node1, node2),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(node1, node2),
},
{
name: "typical case",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node2, node3),
expectedResult: setFromSlice(node1, node2, node3),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node2, node3),
expectedResult: blockSetFromSlice(node1, node2, node3),
},
}
@@ -153,33 +153,33 @@ func TestBlockSetAddSlice(t *testing.T) {
}{
{
name: "add empty slice to empty set",
set: setFromSlice(),
set: blockSetFromSlice(),
slice: []*blockNode{},
expectedResult: setFromSlice(),
expectedResult: blockSetFromSlice(),
},
{
name: "add an empty slice",
set: setFromSlice(node1),
set: blockSetFromSlice(node1),
slice: []*blockNode{},
expectedResult: setFromSlice(node1),
expectedResult: blockSetFromSlice(node1),
},
{
name: "add to empty set",
set: setFromSlice(),
set: blockSetFromSlice(),
slice: []*blockNode{node1},
expectedResult: setFromSlice(node1),
expectedResult: blockSetFromSlice(node1),
},
{
name: "add already added member",
set: setFromSlice(node1, node2),
set: blockSetFromSlice(node1, node2),
slice: []*blockNode{node1},
expectedResult: setFromSlice(node1, node2),
expectedResult: blockSetFromSlice(node1, node2),
},
{
name: "typical case",
set: setFromSlice(node1, node2),
set: blockSetFromSlice(node1, node2),
slice: []*blockNode{node2, node3},
expectedResult: setFromSlice(node1, node2, node3),
expectedResult: blockSetFromSlice(node1, node2, node3),
},
}
@@ -205,33 +205,33 @@ func TestBlockSetUnion(t *testing.T) {
}{
{
name: "both sets empty",
setA: setFromSlice(),
setB: setFromSlice(),
expectedResult: setFromSlice(),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(),
},
{
name: "union against an empty set",
setA: setFromSlice(node1),
setB: setFromSlice(),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(node1),
setB: blockSetFromSlice(),
expectedResult: blockSetFromSlice(node1),
},
{
name: "union from an empty set",
setA: setFromSlice(),
setB: setFromSlice(node1),
expectedResult: setFromSlice(node1),
setA: blockSetFromSlice(),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(node1),
},
{
name: "union with subset",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node1),
expectedResult: setFromSlice(node1, node2),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node1),
expectedResult: blockSetFromSlice(node1, node2),
},
{
name: "typical case",
setA: setFromSlice(node1, node2),
setB: setFromSlice(node2, node3),
expectedResult: setFromSlice(node1, node2, node3),
setA: blockSetFromSlice(node1, node2),
setB: blockSetFromSlice(node2, node3),
expectedResult: blockSetFromSlice(node1, node2, node3),
},
}

View File

@@ -2,6 +2,7 @@ package blockdag
import (
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/bigintpool"
"github.com/pkg/errors"
"math"
"math/big"
@@ -53,13 +54,19 @@ func (window blockWindow) minMaxTimestamps() (min, max int64) {
return
}
func (window blockWindow) averageTarget() *big.Int {
averageTarget := big.NewInt(0)
func (window blockWindow) averageTarget(averageTarget *big.Int) {
averageTarget.SetInt64(0)
target := bigintpool.Acquire(0)
defer bigintpool.Release(target)
for _, node := range window {
target := util.CompactToBig(node.bits)
util.CompactToBigWithDestination(node.bits, target)
averageTarget.Add(averageTarget, target)
}
return averageTarget.Div(averageTarget, big.NewInt(int64(len(window))))
windowLen := bigintpool.Acquire(int64(len(window)))
defer bigintpool.Release(windowLen)
averageTarget.Div(averageTarget, windowLen)
}
func (window blockWindow) medianTimestamp() (int64, error) {

View File

@@ -12,7 +12,7 @@ import (
func TestBlueBlockWindow(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", Config{
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -53,12 +53,12 @@ func TestBlueBlockWindow(t *testing.T) {
{
parents: []string{"C", "D"},
id: "E",
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"C", "D"},
id: "F",
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"A"},
@@ -73,37 +73,37 @@ func TestBlueBlockWindow(t *testing.T) {
{
parents: []string{"H", "F"},
id: "I",
expectedWindowWithGenesisPadding: []string{"F", "C", "D", "B", "A", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"F", "D", "C", "B", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"I"},
id: "J",
expectedWindowWithGenesisPadding: []string{"I", "F", "C", "D", "B", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"I", "F", "D", "C", "B", "A", "A", "A", "A", "A"},
},
{
parents: []string{"J"},
id: "K",
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "C", "D", "B", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "D", "C", "B", "A", "A", "A", "A"},
},
{
parents: []string{"K"},
id: "L",
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "C", "D", "B", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "D", "C", "B", "A", "A", "A"},
},
{
parents: []string{"L"},
id: "M",
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "C", "D", "B", "A", "A"},
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "D", "C", "B", "A", "A"},
},
{
parents: []string{"M"},
id: "N",
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "C", "D", "B", "A"},
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "D", "C", "B", "A"},
},
{
parents: []string{"N"},
id: "O",
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "B"},
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "B"},
},
}

View File

@@ -4,12 +4,12 @@ import (
"bufio"
"bytes"
"encoding/binary"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/txsort"
@@ -73,55 +73,24 @@ func (cfr *compactFeeIterator) next() (uint64, error) {
}
// The following functions relate to storing and retrieving fee data from the database
var feeBucket = []byte("fees")
// getBluesFeeData returns the compactFeeData for all nodes's blues,
// used to calculate the fees this blockNode needs to pay
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
bluesFeeData := make(map[daghash.Hash]compactFeeData)
err := dag.db.View(func(dbTx database.Tx) error {
for _, blueBlock := range node.blues {
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
if err != nil {
return errors.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
}
bluesFeeData[*blueBlock.hash] = feeData
for _, blueBlock := range node.blues {
feeData, err := dbaccess.FetchFeeData(dbaccess.NoTx(), blueBlock.hash)
if err != nil {
return nil, err
}
return nil
})
if err != nil {
return nil, err
bluesFeeData[*blueBlock.hash] = feeData
}
return bluesFeeData, nil
}
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
if err != nil {
return errors.Errorf("Error creating or retrieving fee bucket: %s", err)
}
return feeBucket.Put(blockHash.CloneBytes(), feeData)
}
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
feeBucket := dbTx.Metadata().Bucket(feeBucket)
if feeBucket == nil {
return nil, errors.New("Fee bucket does not exist")
}
feeData := feeBucket.Get(blockHash.CloneBytes())
if feeData == nil {
return nil, errors.Errorf("No fee data found for block %s", blockHash)
}
return feeData, nil
}
// The following functions deal with building and validating the coinbase transaction
func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {

View File

@@ -7,33 +7,21 @@ package blockdag
import (
"compress/bzip2"
"encoding/binary"
"github.com/pkg/errors"
"io"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"time"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/dagconfig"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
func loadBlocksWithLog(t *testing.T, filename string) ([]*util.Block, error) {
blocks, err := LoadBlocks(filename)
if err == nil {
t.Logf("Loaded %d blocks from file %s", len(blocks), filename)
for i, b := range blocks {
t.Logf("Block #%d: %s", i, b.Hash())
}
}
return blocks, err
}
// loadUTXOSet returns a utxo view loaded from a file.
func loadUTXOSet(filename string) (UTXOSet, error) {
// The utxostore file format is:
@@ -84,15 +72,8 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
return nil, err
}
// Serialized utxo entry.
serialized := make([]byte, numBytes)
_, err = io.ReadAtLeast(r, serialized, int(numBytes))
if err != nil {
return nil, err
}
// Deserialize it and add it to the view.
entry, err := deserializeUTXOEntry(serialized)
// Deserialize the UTXO entry and add it to the UTXO set.
entry, err := deserializeUTXOEntry(r)
if err != nil {
return nil, err
}
@@ -113,11 +94,11 @@ func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
// it is not usable with all functions and the tests must take care when making
// use of it.
func newTestDAG(params *dagconfig.Params) *BlockDAG {
index := newBlockIndex(nil, params)
index := newBlockIndex(params)
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
dag := &BlockDAG{
dagParams: params,
timeSource: NewMedianTime(),
timeSource: NewTimeSource(),
targetTimePerBlock: targetTimePerBlock,
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
@@ -129,10 +110,10 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
// Create a genesis block node and block index index populated with it
// on the above fake DAG.
dag.genesis, _ = dag.newBlockNode(&params.GenesisBlock.Header, newSet())
dag.genesis, _ = dag.newBlockNode(&params.GenesisBlock.Header, newBlockSet())
index.AddNode(dag.genesis)
dag.virtual = newVirtualBlock(dag, setFromSlice(dag.genesis))
dag.virtual = newVirtualBlock(dag, blockSetFromSlice(dag.genesis))
return dag
}
@@ -163,56 +144,37 @@ func addNodeAsChildToParents(node *blockNode) {
// same type (either both nil or both of type RuleError) and their error codes
// match when not nil.
func checkRuleError(gotErr, wantErr error) error {
// Ensure the error code is of the expected type and the error
// code matches the value specified in the test instance.
if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) {
return errors.Errorf("wrong error - got %T (%[1]v), want %T",
gotErr, wantErr)
}
if gotErr == nil {
if wantErr == nil && gotErr == nil {
return nil
}
// Ensure the want error type is a script error.
werr, ok := wantErr.(RuleError)
if !ok {
return errors.Errorf("unexpected test error type %T", wantErr)
var gotRuleErr RuleError
if ok := errors.As(gotErr, &gotRuleErr); !ok {
return errors.Errorf("gotErr expected to be RuleError, but got %+v instead", gotErr)
}
var wantRuleErr RuleError
if ok := errors.As(wantErr, &wantRuleErr); !ok {
return errors.Errorf("wantErr expected to be RuleError, but got %+v instead", wantErr)
}
// Ensure the error codes match. It's safe to use a raw type assert
// here since the code above already proved they are the same type and
// the want error is a script error.
gotErrorCode := gotErr.(RuleError).ErrorCode
if gotErrorCode != werr.ErrorCode {
if gotRuleErr.ErrorCode != wantRuleErr.ErrorCode {
return errors.Errorf("mismatched error code - got %v (%v), want %v",
gotErrorCode, gotErr, werr.ErrorCode)
gotRuleErr.ErrorCode, gotErr, wantRuleErr.ErrorCode)
}
return nil
}
func prepareAndProcessBlock(t *testing.T, dag *BlockDAG, parents ...*wire.MsgBlock) *wire.MsgBlock {
func prepareAndProcessBlockByParentMsgBlocks(t *testing.T, dag *BlockDAG, parents ...*wire.MsgBlock) *wire.MsgBlock {
parentHashes := make([]*daghash.Hash, len(parents))
for i, parent := range parents {
parentHashes[i] = parent.BlockHash()
}
daghash.Sort(parentHashes)
block, err := PrepareBlockForTest(dag, parentHashes, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
utilBlock := util.NewBlock(block)
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
if err != nil {
t.Fatalf("unexpected error in ProcessBlock: %s", err)
}
if isDelayed {
t.Fatalf("block is too far in the future")
}
if isOrphan {
t.Fatalf("block was unexpectedly orphan")
}
return block
return PrepareAndProcessBlockForTest(t, dag, parentHashes, nil)
}
func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNode {
@@ -222,3 +184,15 @@ func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNod
}
return node
}
type fakeTimeSource struct {
time time.Time
}
func (fts *fakeTimeSource) Now() time.Time {
return time.Unix(fts.time.Unix(), 0)
}
func newFakeTimeSource(fakeTime time.Time) TimeSource {
return &fakeTimeSource{time: fakeTime}
}

View File

@@ -1,584 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/txscript"
)
// -----------------------------------------------------------------------------
// A variable length quantity (VLQ) is an encoding that uses an arbitrary number
// of binary octets to represent an arbitrarily large integer. The scheme
// employs a most significant byte (MSB) base-128 encoding where the high bit in
// each byte indicates whether or not the byte is the final one. In addition,
// to ensure there are no redundant encodings, an offset is subtracted every
// time a group of 7 bits is shifted out. Therefore each integer can be
// represented in exactly one way, and each representation stands for exactly
// one integer.
//
// Another nice property of this encoding is that it provides a compact
// representation of values that are typically used to indicate sizes. For
// example, the values 0 - 127 are represented with a single byte, 128 - 16511
// with two bytes, and 16512 - 2113663 with three bytes.
//
// While the encoding allows arbitrarily large integers, it is artificially
// limited in this code to an unsigned 64-bit integer for efficiency purposes.
//
// Example encodings:
// 0 -> [0x00]
// 127 -> [0x7f] * Max 1-byte value
// 128 -> [0x80 0x00]
// 129 -> [0x80 0x01]
// 255 -> [0x80 0x7f]
// 256 -> [0x81 0x00]
// 16511 -> [0xff 0x7f] * Max 2-byte value
// 16512 -> [0x80 0x80 0x00]
// 32895 -> [0x80 0xff 0x7f]
// 2113663 -> [0xff 0xff 0x7f] * Max 3-byte value
// 270549119 -> [0xff 0xff 0xff 0x7f] * Max 4-byte value
// 2^64-1 -> [0x80 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0x7f]
//
// References:
// https://en.wikipedia.org/wiki/Variable-length_quantity
// http://www.codecodex.com/wiki/Variable-Length_Integers
// -----------------------------------------------------------------------------
// serializeSizeVLQ returns the number of bytes it would take to serialize the
// passed number as a variable-length quantity according to the format described
// above.
func serializeSizeVLQ(n uint64) int {
size := 1
for ; n > 0x7f; n = (n >> 7) - 1 {
size++
}
return size
}
// putVLQ serializes the provided number to a variable-length quantity according
// to the format described above and returns the number of bytes of the encoded
// value. The result is placed directly into the passed byte slice which must
// be at least large enough to handle the number of bytes returned by the
// serializeSizeVLQ function or it will panic.
func putVLQ(target []byte, n uint64) int {
offset := 0
for ; ; offset++ {
// The high bit is set when another byte follows.
highBitMask := byte(0x80)
if offset == 0 {
highBitMask = 0x00
}
target[offset] = byte(n&0x7f) | highBitMask
if n <= 0x7f {
break
}
n = (n >> 7) - 1
}
// Reverse the bytes so it is MSB-encoded.
for i, j := 0, offset; i < j; i, j = i+1, j-1 {
target[i], target[j] = target[j], target[i]
}
return offset + 1
}
// deserializeVLQ deserializes the provided variable-length quantity according
// to the format described above. It also returns the number of bytes
// deserialized.
func deserializeVLQ(serialized []byte) (uint64, int) {
var n uint64
var size int
for _, val := range serialized {
size++
n = (n << 7) | uint64(val&0x7f)
if val&0x80 != 0x80 {
break
}
n++
}
return n, size
}
// -----------------------------------------------------------------------------
// In order to reduce the size of stored scripts, a domain specific compression
// algorithm is used which recognizes standard scripts and stores them using
// less bytes than the original script.
//
// The general serialized format is:
//
// <script size or type><script data>
//
// Field Type Size
// script size or type VLQ variable
// script data []byte variable
//
// The specific serialized format for each recognized standard script is:
//
// - Pay-to-pubkey-hash: (21 bytes) - <0><20-byte pubkey hash>
// - Pay-to-script-hash: (21 bytes) - <1><20-byte script hash>
// - Pay-to-pubkey**: (33 bytes) - <2, 3, 4, or 5><32-byte pubkey X value>
// 2, 3 = compressed pubkey with bit 0 specifying the y coordinate to use
// 4, 5 = uncompressed pubkey with bit 0 specifying the y coordinate to use
// ** Only valid public keys starting with 0x02, 0x03, and 0x04 are supported.
//
// Any scripts which are not recognized as one of the aforementioned standard
// scripts are encoded using the general serialized format and encode the script
// size as the sum of the actual size of the script and the number of special
// cases.
// -----------------------------------------------------------------------------
// The following constants specify the special constants used to identify a
// special script type in the domain-specific compressed script encoding.
//
// NOTE: This section specifically does not use iota since these values are
// serialized and must be stable for long-term storage.
const (
// cstPayToPubKeyHash identifies a compressed pay-to-pubkey-hash script.
cstPayToPubKeyHash = 0
// cstPayToScriptHash identifies a compressed pay-to-script-hash script.
cstPayToScriptHash = 1
// cstPayToPubKeyComp2 identifies a compressed pay-to-pubkey script to
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyComp2 = 2
// cstPayToPubKeyComp3 identifies a compressed pay-to-pubkey script to
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyComp3 = 3
// cstPayToPubKeyUncomp4 identifies a compressed pay-to-pubkey script to
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyUncomp4 = 4
// cstPayToPubKeyUncomp5 identifies a compressed pay-to-pubkey script to
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyUncomp5 = 5
// numSpecialScripts is the number of special scripts recognized by the
// domain-specific script compression algorithm.
numSpecialScripts = 6
)
// isPubKeyHash returns whether or not the passed public key script is a
// standard pay-to-pubkey-hash script along with the pubkey hash it is paying to
// if it is.
func isPubKeyHash(script []byte) (bool, []byte) {
if len(script) == 25 && script[0] == txscript.OpDup &&
script[1] == txscript.OpHash160 &&
script[2] == txscript.OpData20 &&
script[23] == txscript.OpEqualVerify &&
script[24] == txscript.OpCheckSig {
return true, script[3:23]
}
return false, nil
}
// isScriptHash returns whether or not the passed public key script is a
// standard pay-to-script-hash script along with the script hash it is paying to
// if it is.
func isScriptHash(script []byte) (bool, []byte) {
if len(script) == 23 && script[0] == txscript.OpHash160 &&
script[1] == txscript.OpData20 &&
script[22] == txscript.OpEqual {
return true, script[2:22]
}
return false, nil
}
// isPubKey returns whether or not the passed public key script is a standard
// pay-to-pubkey script that pays to a valid compressed or uncompressed public
// key along with the serialized pubkey it is paying to if it is.
//
// NOTE: This function ensures the public key is actually valid since the
// compression algorithm requires valid pubkeys. It does not support hybrid
// pubkeys. This means that even if the script has the correct form for a
// pay-to-pubkey script, this function will only return true when it is paying
// to a valid compressed or uncompressed pubkey.
func isPubKey(script []byte) (bool, []byte) {
// Pay-to-compressed-pubkey script.
if len(script) == 35 && script[0] == txscript.OpData33 &&
script[34] == txscript.OpCheckSig && (script[1] == 0x02 ||
script[1] == 0x03) {
// Ensure the public key is valid.
serializedPubKey := script[1:34]
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
if err == nil {
return true, serializedPubKey
}
}
// Pay-to-uncompressed-pubkey script.
if len(script) == 67 && script[0] == txscript.OpData65 &&
script[66] == txscript.OpCheckSig && script[1] == 0x04 {
// Ensure the public key is valid.
serializedPubKey := script[1:66]
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
if err == nil {
return true, serializedPubKey
}
}
return false, nil
}
// compressedScriptSize returns the number of bytes the passed script would take
// when encoded with the domain specific compression algorithm described above.
func compressedScriptSize(scriptPubKey []byte) int {
// Pay-to-pubkey-hash script.
if valid, _ := isPubKeyHash(scriptPubKey); valid {
return 21
}
// Pay-to-script-hash script.
if valid, _ := isScriptHash(scriptPubKey); valid {
return 21
}
// Pay-to-pubkey (compressed or uncompressed) script.
if valid, _ := isPubKey(scriptPubKey); valid {
return 33
}
// When none of the above special cases apply, encode the script as is
// preceded by the sum of its size and the number of special cases
// encoded as a variable length quantity.
return serializeSizeVLQ(uint64(len(scriptPubKey)+numSpecialScripts)) +
len(scriptPubKey)
}
// decodeCompressedScriptSize treats the passed serialized bytes as a compressed
// script, possibly followed by other data, and returns the number of bytes it
// occupies taking into account the special encoding of the script size by the
// domain specific compression algorithm described above.
func decodeCompressedScriptSize(serialized []byte) int {
scriptSize, bytesRead := deserializeVLQ(serialized)
if bytesRead == 0 {
return 0
}
switch scriptSize {
case cstPayToPubKeyHash:
return 21
case cstPayToScriptHash:
return 21
case cstPayToPubKeyComp2, cstPayToPubKeyComp3, cstPayToPubKeyUncomp4,
cstPayToPubKeyUncomp5:
return 33
}
scriptSize -= numSpecialScripts
scriptSize += uint64(bytesRead)
return int(scriptSize)
}
// putCompressedScript compresses the passed script according to the domain
// specific compression algorithm described above directly into the passed
// target byte slice. The target byte slice must be at least large enough to
// handle the number of bytes returned by the compressedScriptSize function or
// it will panic.
func putCompressedScript(target, scriptPubKey []byte) int {
// Pay-to-pubkey-hash script.
if valid, hash := isPubKeyHash(scriptPubKey); valid {
target[0] = cstPayToPubKeyHash
copy(target[1:21], hash)
return 21
}
// Pay-to-script-hash script.
if valid, hash := isScriptHash(scriptPubKey); valid {
target[0] = cstPayToScriptHash
copy(target[1:21], hash)
return 21
}
// Pay-to-pubkey (compressed or uncompressed) script.
if valid, serializedPubKey := isPubKey(scriptPubKey); valid {
pubKeyFormat := serializedPubKey[0]
switch pubKeyFormat {
case 0x02, 0x03:
target[0] = pubKeyFormat
copy(target[1:33], serializedPubKey[1:33])
return 33
case 0x04:
// Encode the oddness of the serialized pubkey into the
// compressed script type.
target[0] = pubKeyFormat | (serializedPubKey[64] & 0x01)
copy(target[1:33], serializedPubKey[1:33])
return 33
}
}
// When none of the above special cases apply, encode the unmodified
// script preceded by the sum of its size and the number of special
// cases encoded as a variable length quantity.
encodedSize := uint64(len(scriptPubKey) + numSpecialScripts)
vlqSizeLen := putVLQ(target, encodedSize)
copy(target[vlqSizeLen:], scriptPubKey)
return vlqSizeLen + len(scriptPubKey)
}
// decompressScript returns the original script obtained by decompressing the
// passed compressed script according to the domain specific compression
// algorithm described above.
//
// NOTE: The script parameter must already have been proven to be long enough
// to contain the number of bytes returned by decodeCompressedScriptSize or it
// will panic. This is acceptable since it is only an internal function.
func decompressScript(compressedScriptPubKey []byte) []byte {
// In practice this function will not be called with a zero-length or
// nil script since the nil script encoding includes the length, however
// the code below assumes the length exists, so just return nil now if
// the function ever ends up being called with a nil script in the
// future.
if len(compressedScriptPubKey) == 0 {
return nil
}
// Decode the script size and examine it for the special cases.
encodedScriptSize, bytesRead := deserializeVLQ(compressedScriptPubKey)
switch encodedScriptSize {
// Pay-to-pubkey-hash script. The resulting script is:
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
case cstPayToPubKeyHash:
scriptPubKey := make([]byte, 25)
scriptPubKey[0] = txscript.OpDup
scriptPubKey[1] = txscript.OpHash160
scriptPubKey[2] = txscript.OpData20
copy(scriptPubKey[3:], compressedScriptPubKey[bytesRead:bytesRead+20])
scriptPubKey[23] = txscript.OpEqualVerify
scriptPubKey[24] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-script-hash script. The resulting script is:
// <OP_HASH160><20 byte script hash><OP_EQUAL>
case cstPayToScriptHash:
scriptPubKey := make([]byte, 23)
scriptPubKey[0] = txscript.OpHash160
scriptPubKey[1] = txscript.OpData20
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+20])
scriptPubKey[22] = txscript.OpEqual
return scriptPubKey
// Pay-to-compressed-pubkey script. The resulting script is:
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
scriptPubKey := make([]byte, 35)
scriptPubKey[0] = txscript.OpData33
scriptPubKey[1] = byte(encodedScriptSize)
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+32])
scriptPubKey[34] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-uncompressed-pubkey script. The resulting script is:
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyUncomp4, cstPayToPubKeyUncomp5:
// Change the leading byte to the appropriate compressed pubkey
// identifier (0x02 or 0x03) so it can be decoded as a
// compressed pubkey. This really should never fail since the
// encoding ensures it is valid before compressing to this type.
compressedKey := make([]byte, 33)
compressedKey[0] = byte(encodedScriptSize - 2)
copy(compressedKey[1:], compressedScriptPubKey[1:])
key, err := ecc.ParsePubKey(compressedKey, ecc.S256())
if err != nil {
return nil
}
scriptPubKey := make([]byte, 67)
scriptPubKey[0] = txscript.OpData65
copy(scriptPubKey[1:], key.SerializeUncompressed())
scriptPubKey[66] = txscript.OpCheckSig
return scriptPubKey
}
// When none of the special cases apply, the script was encoded using
// the general format, so reduce the script size by the number of
// special cases and return the unmodified script.
scriptSize := int(encodedScriptSize - numSpecialScripts)
scriptPubKey := make([]byte, scriptSize)
copy(scriptPubKey, compressedScriptPubKey[bytesRead:bytesRead+scriptSize])
return scriptPubKey
}
// -----------------------------------------------------------------------------
// In order to reduce the size of stored amounts, a domain specific compression
// algorithm is used which relies on there typically being a lot of zeroes at
// end of the amounts.
//
// While this is simply exchanging one uint64 for another, the resulting value
// for typical amounts has a much smaller magnitude which results in fewer bytes
// when encoded as variable length quantity. For example, consider the amount
// of 0.1 KAS which is 10000000 sompi. Encoding 10000000 as a VLQ would take
// 4 bytes while encoding the compressed value of 8 as a VLQ only takes 1 byte.
//
// Essentially the compression is achieved by splitting the value into an
// exponent in the range [0-9] and a digit in the range [1-9], when possible,
// and encoding them in a way that can be decoded. More specifically, the
// encoding is as follows:
// - 0 is 0
// - Find the exponent, e, as the largest power of 10 that evenly divides the
// value up to a maximum of 9
// - When e < 9, the final digit can't be 0 so store it as d and remove it by
// dividing the value by 10 (call the result n). The encoded value is thus:
// 1 + 10*(9*n + d-1) + e
// - When e==9, the only thing known is the amount is not 0. The encoded value
// is thus:
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
//
// Example encodings:
// (The numbers in parenthesis are the number of bytes when serialized as a VLQ)
// 0 (1) -> 0 (1) * 0.00000000 KAS
// 1000 (2) -> 4 (1) * 0.00001000 KAS
// 10000 (2) -> 5 (1) * 0.00010000 KAS
// 12345678 (4) -> 111111101(4) * 0.12345678 KAS
// 50000000 (4) -> 47 (1) * 0.50000000 KAS
// 100000000 (4) -> 9 (1) * 1.00000000 KAS
// 500000000 (5) -> 49 (1) * 5.00000000 KAS
// 1000000000 (5) -> 10 (1) * 10.00000000 KAS
// -----------------------------------------------------------------------------
// compressTxOutAmount compresses the passed amount according to the domain
// specific compression algorithm described above.
func compressTxOutAmount(amount uint64) uint64 {
// No need to do any work if it's zero.
if amount == 0 {
return 0
}
// Find the largest power of 10 (max of 9) that evenly divides the
// value.
exponent := uint64(0)
for amount%10 == 0 && exponent < 9 {
amount /= 10
exponent++
}
// The compressed result for exponents less than 9 is:
// 1 + 10*(9*n + d-1) + e
if exponent < 9 {
lastDigit := amount % 10
amount /= 10
return 1 + 10*(9*amount+lastDigit-1) + exponent
}
// The compressed result for an exponent of 9 is:
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
return 10 + 10*(amount-1)
}
// decompressTxOutAmount returns the original amount the passed compressed
// amount represents according to the domain specific compression algorithm
// described above.
func decompressTxOutAmount(amount uint64) uint64 {
// No need to do any work if it's zero.
if amount == 0 {
return 0
}
// The decompressed amount is either of the following two equations:
// x = 1 + 10*(9*n + d - 1) + e
// x = 1 + 10*(n - 1) + 9
amount--
// The decompressed amount is now one of the following two equations:
// x = 10*(9*n + d - 1) + e
// x = 10*(n - 1) + 9
exponent := amount % 10
amount /= 10
// The decompressed amount is now one of the following two equations:
// x = 9*n + d - 1 | where e < 9
// x = n - 1 | where e = 9
n := uint64(0)
if exponent < 9 {
lastDigit := amount%9 + 1
amount /= 9
n = amount*10 + lastDigit
} else {
n = amount + 1
}
// Apply the exponent.
for ; exponent > 0; exponent-- {
n *= 10
}
return n
}
// -----------------------------------------------------------------------------
// Compressed transaction outputs consist of an amount and a public key script
// both compressed using the domain specific compression algorithms previously
// described.
//
// The serialized format is:
//
// <compressed amount><compressed script>
//
// Field Type Size
// compressed amount VLQ variable
// compressed script []byte variable
// -----------------------------------------------------------------------------
// compressedTxOutSize returns the number of bytes the passed transaction output
// fields would take when encoded with the format described above.
func compressedTxOutSize(amount uint64, scriptPubKey []byte) int {
return serializeSizeVLQ(compressTxOutAmount(amount)) +
compressedScriptSize(scriptPubKey)
}
// putCompressedTxOut compresses the passed amount and script according to their
// domain specific compression algorithms and encodes them directly into the
// passed target byte slice with the format described above. The target byte
// slice must be at least large enough to handle the number of bytes returned by
// the compressedTxOutSize function or it will panic.
func putCompressedTxOut(target []byte, amount uint64, scriptPubKey []byte) int {
offset := putVLQ(target, compressTxOutAmount(amount))
offset += putCompressedScript(target[offset:], scriptPubKey)
return offset
}
// decodeCompressedTxOut decodes the passed compressed txout, possibly followed
// by other data, into its uncompressed amount and script and returns them along
// with the number of bytes they occupied prior to decompression.
func decodeCompressedTxOut(serialized []byte) (uint64, []byte, int, error) {
// Deserialize the compressed amount and ensure there are bytes
// remaining for the compressed script.
compressedAmount, bytesRead := deserializeVLQ(serialized)
if bytesRead >= len(serialized) {
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
"data after compressed amount")
}
// Decode the compressed script size and ensure there are enough bytes
// left in the slice for it.
scriptSize := decodeCompressedScriptSize(serialized[bytesRead:])
if len(serialized[bytesRead:]) < scriptSize {
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
"data after script size")
}
// Decompress and return the amount and script.
amount := decompressTxOutAmount(compressedAmount)
script := decompressScript(serialized[bytesRead : bytesRead+scriptSize])
return amount, script, bytesRead + scriptSize, nil
}

View File

@@ -1,436 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"bytes"
"encoding/hex"
"testing"
)
// hexToBytes converts the passed hex string into bytes and will panic if there
// is an error. This is only provided for the hard-coded constants so errors in
// the source code can be detected. It will only (and must only) be called with
// hard-coded values.
func hexToBytes(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
panic("invalid hex in source file: " + s)
}
return b
}
// TestVLQ ensures the variable length quantity serialization, deserialization,
// and size calculation works as expected.
func TestVLQ(t *testing.T) {
t.Parallel()
tests := []struct {
val uint64
serialized []byte
}{
{0, hexToBytes("00")},
{1, hexToBytes("01")},
{127, hexToBytes("7f")},
{128, hexToBytes("8000")},
{129, hexToBytes("8001")},
{255, hexToBytes("807f")},
{256, hexToBytes("8100")},
{16383, hexToBytes("fe7f")},
{16384, hexToBytes("ff00")},
{16511, hexToBytes("ff7f")}, // Max 2-byte value
{16512, hexToBytes("808000")},
{16513, hexToBytes("808001")},
{16639, hexToBytes("80807f")},
{32895, hexToBytes("80ff7f")},
{2113663, hexToBytes("ffff7f")}, // Max 3-byte value
{2113664, hexToBytes("80808000")},
{270549119, hexToBytes("ffffff7f")}, // Max 4-byte value
{270549120, hexToBytes("8080808000")},
{2147483647, hexToBytes("86fefefe7f")},
{2147483648, hexToBytes("86fefeff00")},
{4294967295, hexToBytes("8efefefe7f")}, // Max uint32, 5 bytes
// Max uint64, 10 bytes
{18446744073709551615, hexToBytes("80fefefefefefefefe7f")},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the value is calculated properly.
gotSize := serializeSizeVLQ(test.val)
if gotSize != len(test.serialized) {
t.Errorf("serializeSizeVLQ: did not get expected size "+
"for %d - got %d, want %d", test.val, gotSize,
len(test.serialized))
continue
}
// Ensure the value serializes to the expected bytes.
gotBytes := make([]byte, gotSize)
gotBytesWritten := putVLQ(gotBytes, test.val)
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("putVLQUnchecked: did not get expected bytes "+
"for %d - got %x, want %x", test.val, gotBytes,
test.serialized)
continue
}
if gotBytesWritten != len(test.serialized) {
t.Errorf("putVLQUnchecked: did not get expected number "+
"of bytes written for %d - got %d, want %d",
test.val, gotBytesWritten, len(test.serialized))
continue
}
// Ensure the serialized bytes deserialize to the expected
// value.
gotVal, gotBytesRead := deserializeVLQ(test.serialized)
if gotVal != test.val {
t.Errorf("deserializeVLQ: did not get expected value "+
"for %x - got %d, want %d", test.serialized,
gotVal, test.val)
continue
}
if gotBytesRead != len(test.serialized) {
t.Errorf("deserializeVLQ: did not get expected number "+
"of bytes read for %d - got %d, want %d",
test.serialized, gotBytesRead,
len(test.serialized))
continue
}
}
}
// TestScriptCompression ensures the domain-specific script compression and
// decompression works as expected.
func TestScriptCompression(t *testing.T) {
t.Parallel()
tests := []struct {
name string
uncompressed []byte
compressed []byte
}{
{
name: "nil",
uncompressed: nil,
compressed: hexToBytes("06"),
},
{
name: "pay-to-pubkey-hash 1",
uncompressed: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
compressed: hexToBytes("001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
},
{
name: "pay-to-pubkey-hash 2",
uncompressed: hexToBytes("76a914e34cce70c86373273efcc54ce7d2a491bb4a0e8488ac"),
compressed: hexToBytes("00e34cce70c86373273efcc54ce7d2a491bb4a0e84"),
},
{
name: "pay-to-script-hash 1",
uncompressed: hexToBytes("a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87"),
compressed: hexToBytes("01da1745e9b549bd0bfa1a569971c77eba30cd5a4b"),
},
{
name: "pay-to-script-hash 2",
uncompressed: hexToBytes("a914f815b036d9bbbce5e9f2a00abd1bf3dc91e9551087"),
compressed: hexToBytes("01f815b036d9bbbce5e9f2a00abd1bf3dc91e95510"),
},
{
name: "pay-to-pubkey compressed 0x02",
uncompressed: hexToBytes("2102192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4ac"),
compressed: hexToBytes("02192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
{
name: "pay-to-pubkey compressed 0x03",
uncompressed: hexToBytes("2103b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65ac"),
compressed: hexToBytes("03b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"),
},
{
name: "pay-to-pubkey uncompressed 0x04 even",
uncompressed: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
compressed: hexToBytes("04192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
{
name: "pay-to-pubkey uncompressed 0x04 odd",
uncompressed: hexToBytes("410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac"),
compressed: hexToBytes("0511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c"),
},
{
name: "pay-to-pubkey invalid pubkey",
uncompressed: hexToBytes("3302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
compressed: hexToBytes("293302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
},
{
name: "requires 2 size bytes - data push 200 bytes",
uncompressed: append(hexToBytes("4cc8"), bytes.Repeat([]byte{0x00}, 200)...),
// [0x80, 0x50] = 208 as a variable length quantity
// [0x4c, 0xc8] = OP_PUSHDATA1 200
compressed: append(hexToBytes("80504cc8"), bytes.Repeat([]byte{0x00}, 200)...),
},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the value is calculated properly.
gotSize := compressedScriptSize(test.uncompressed)
if gotSize != len(test.compressed) {
t.Errorf("compressedScriptSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotSize, len(test.compressed))
continue
}
// Ensure the script compresses to the expected bytes.
gotCompressed := make([]byte, gotSize)
gotBytesWritten := putCompressedScript(gotCompressed,
test.uncompressed)
if !bytes.Equal(gotCompressed, test.compressed) {
t.Errorf("putCompressedScript (%s): did not get "+
"expected bytes - got %x, want %x", test.name,
gotCompressed, test.compressed)
continue
}
if gotBytesWritten != len(test.compressed) {
t.Errorf("putCompressedScript (%s): did not get "+
"expected number of bytes written - got %d, "+
"want %d", test.name, gotBytesWritten,
len(test.compressed))
continue
}
// Ensure the compressed script size is properly decoded from
// the compressed script.
gotDecodedSize := decodeCompressedScriptSize(test.compressed)
if gotDecodedSize != len(test.compressed) {
t.Errorf("decodeCompressedScriptSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotDecodedSize, len(test.compressed))
continue
}
// Ensure the script decompresses to the expected bytes.
gotDecompressed := decompressScript(test.compressed)
if !bytes.Equal(gotDecompressed, test.uncompressed) {
t.Errorf("decompressScript (%s): did not get expected "+
"bytes - got %x, want %x", test.name,
gotDecompressed, test.uncompressed)
continue
}
}
}
// TestScriptCompressionErrors ensures calling various functions related to
// script compression with incorrect data returns the expected results.
func TestScriptCompressionErrors(t *testing.T) {
t.Parallel()
// A nil script must result in a decoded size of 0.
if gotSize := decodeCompressedScriptSize(nil); gotSize != 0 {
t.Fatalf("decodeCompressedScriptSize with nil script did not "+
"return 0 - got %d", gotSize)
}
// A nil script must result in a nil decompressed script.
if gotScript := decompressScript(nil); gotScript != nil {
t.Fatalf("decompressScript with nil script did not return nil "+
"decompressed script - got %x", gotScript)
}
// A compressed script for a pay-to-pubkey (uncompressed) that results
// in an invalid pubkey must result in a nil decompressed script.
compressedScript := hexToBytes("04012d74d0cb94344c9569c2e77901573d8d" +
"7903c3ebec3a957724895dca52c6b4")
if gotScript := decompressScript(compressedScript); gotScript != nil {
t.Fatalf("decompressScript with compressed pay-to-"+
"uncompressed-pubkey that is invalid did not return "+
"nil decompressed script - got %x", gotScript)
}
}
// TestAmountCompression ensures the domain-specific transaction output amount
// compression and decompression works as expected.
func TestAmountCompression(t *testing.T) {
t.Parallel()
tests := []struct {
name string
uncompressed uint64
compressed uint64
}{
{
name: "0 KAS",
uncompressed: 0,
compressed: 0,
},
{
name: "546 Sompi (current network dust value)",
uncompressed: 546,
compressed: 4911,
},
{
name: "0.00001 KAS (typical transaction fee)",
uncompressed: 1000,
compressed: 4,
},
{
name: "0.0001 KAS (typical transaction fee)",
uncompressed: 10000,
compressed: 5,
},
{
name: "0.12345678 KAS",
uncompressed: 12345678,
compressed: 111111101,
},
{
name: "0.5 KAS",
uncompressed: 50000000,
compressed: 48,
},
{
name: "1 KAS",
uncompressed: 100000000,
compressed: 9,
},
{
name: "5 KAS",
uncompressed: 500000000,
compressed: 49,
},
{
name: "21000000 KAS (max minted coins)",
uncompressed: 2100000000000000,
compressed: 21000000,
},
}
for _, test := range tests {
// Ensure the amount compresses to the expected value.
gotCompressed := compressTxOutAmount(test.uncompressed)
if gotCompressed != test.compressed {
t.Errorf("compressTxOutAmount (%s): did not get "+
"expected value - got %d, want %d", test.name,
gotCompressed, test.compressed)
continue
}
// Ensure the value decompresses to the expected value.
gotDecompressed := decompressTxOutAmount(test.compressed)
if gotDecompressed != test.uncompressed {
t.Errorf("decompressTxOutAmount (%s): did not get "+
"expected value - got %d, want %d", test.name,
gotDecompressed, test.uncompressed)
continue
}
}
}
// TestCompressedTxOut ensures the transaction output serialization and
// deserialization works as expected.
func TestCompressedTxOut(t *testing.T) {
t.Parallel()
tests := []struct {
name string
amount uint64
scriptPubKey []byte
compressed []byte
}{
{
name: "pay-to-pubkey-hash dust",
amount: 546,
scriptPubKey: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
},
{
name: "pay-to-pubkey uncompressed 1 KAS",
amount: 100000000,
scriptPubKey: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the txout is calculated properly.
gotSize := compressedTxOutSize(test.amount, test.scriptPubKey)
if gotSize != len(test.compressed) {
t.Errorf("compressedTxOutSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotSize, len(test.compressed))
continue
}
// Ensure the txout compresses to the expected value.
gotCompressed := make([]byte, gotSize)
gotBytesWritten := putCompressedTxOut(gotCompressed,
test.amount, test.scriptPubKey)
if !bytes.Equal(gotCompressed, test.compressed) {
t.Errorf("compressTxOut (%s): did not get expected "+
"bytes - got %x, want %x", test.name,
gotCompressed, test.compressed)
continue
}
if gotBytesWritten != len(test.compressed) {
t.Errorf("compressTxOut (%s): did not get expected "+
"number of bytes written - got %d, want %d",
test.name, gotBytesWritten,
len(test.compressed))
continue
}
// Ensure the serialized bytes are decoded back to the expected
// uncompressed values.
gotAmount, gotScript, gotBytesRead, err := decodeCompressedTxOut(
test.compressed)
if err != nil {
t.Errorf("decodeCompressedTxOut (%s): unexpected "+
"error: %v", test.name, err)
continue
}
if gotAmount != test.amount {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected amount - got %d, want %d",
test.name, gotAmount, test.amount)
continue
}
if !bytes.Equal(gotScript, test.scriptPubKey) {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected script - got %x, want %x",
test.name, gotScript, test.scriptPubKey)
continue
}
if gotBytesRead != len(test.compressed) {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected number of bytes read - got %d, want %d",
test.name, gotBytesRead, len(test.compressed))
continue
}
}
}
// TestTxOutCompressionErrors ensures calling various functions related to
// txout compression with incorrect data returns the expected results.
func TestTxOutCompressionErrors(t *testing.T) {
t.Parallel()
// A compressed txout with missing compressed script must error.
compressedTxOut := hexToBytes("00")
_, _, _, err := decodeCompressedTxOut(compressedTxOut)
if !isDeserializeErr(err) {
t.Fatalf("decodeCompressedTxOut with missing compressed script "+
"did not return expected error type - got %T, want "+
"errDeserialize", err)
}
// A compressed txout with short compressed script must error.
compressedTxOut = hexToBytes("0010")
_, _, _, err = decodeCompressedTxOut(compressedTxOut)
if !isDeserializeErr(err) {
t.Fatalf("decodeCompressedTxOut with short compressed script "+
"did not return expected error type - got %T, want "+
"errDeserialize", err)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -6,13 +6,17 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
"math"
"os"
"path/filepath"
"reflect"
"testing"
"time"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
@@ -39,7 +43,7 @@ func TestBlockCount(t *testing.T) {
}
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestBlockCount", Config{
dag, teardownFunc, err := DAGSetup("TestBlockCount", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -72,8 +76,8 @@ func TestBlockCount(t *testing.T) {
}
}
// TestHaveBlock tests the HaveBlock API to ensure proper functionality.
func TestHaveBlock(t *testing.T) {
// TestIsKnownBlock tests the IsKnownBlock API to ensure proper functionality.
func TestIsKnownBlock(t *testing.T) {
// Load up blocks such that there is a fork in the DAG.
// (genesis block) -> 1 -> 2 -> 3 -> 4
// \-> 3b
@@ -92,7 +96,7 @@ func TestHaveBlock(t *testing.T) {
}
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("haveblock", Config{
dag, teardownFunc, err := DAGSetup("haveblock", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -164,12 +168,13 @@ func TestHaveBlock(t *testing.T) {
if err == nil {
t.Fatalf("ProcessBlock for block 3D has no error when expected to have an error\n")
}
rErr, ok := err.(RuleError)
var ruleErr RuleError
ok := errors.As(err, &ruleErr)
if !ok {
t.Fatalf("ProcessBlock for block 3D expected a RuleError, but got %v\n", err)
}
if !ok || rErr.ErrorCode != ErrDuplicateTxInputs {
t.Fatalf("ProcessBlock for block 3D expected error code %s but got %s\n", ErrDuplicateTxInputs, rErr.ErrorCode)
if !ok || ruleErr.ErrorCode != ErrDuplicateTxInputs {
t.Fatalf("ProcessBlock for block 3D expected error code %s but got %s\n", ErrDuplicateTxInputs, ruleErr.ErrorCode)
}
if isDelayed {
t.Fatalf("ProcessBlock: block 3D " +
@@ -202,7 +207,7 @@ func TestHaveBlock(t *testing.T) {
{hash: dagconfig.SimnetParams.GenesisHash.String(), want: true},
// Block 3b should be present (as a second child of Block 2).
{hash: "264176fb6072e2362db18f92d3f4b739cff071a206736df7c407c0bf9a1d7fef", want: true},
{hash: "48a752afbe36ad66357f751f8dee4f75665d24e18f644d83a3409b398405b46b", want: true},
// Block 100000 should be present (as an orphan).
{hash: "65b20b048a074793ebfd1196e49341c8d194dabfc6b44a4fd0c607406e122baf", want: true},
@@ -217,9 +222,9 @@ func TestHaveBlock(t *testing.T) {
t.Fatalf("NewHashFromStr: %v", err)
}
result := dag.HaveBlock(hash)
result := dag.IsKnownBlock(hash)
if result != test.want {
t.Fatalf("HaveBlock #%d got %v want %v", i, result,
t.Fatalf("IsKnownBlock #%d got %v want %v", i, result,
test.want)
}
}
@@ -241,9 +246,9 @@ func TestCalcSequenceLock(t *testing.T) {
numBlocksToGenerate := 5
for i := 0; i < numBlocksToGenerate; i++ {
blockTime = blockTime.Add(time.Second)
node = newTestNode(dag, setFromSlice(node), blockVersion, 0, blockTime)
node = newTestNode(dag, blockSetFromSlice(node), blockVersion, 0, blockTime)
dag.index.AddNode(node)
dag.virtual.SetTips(setFromSlice(node))
dag.virtual.SetTips(blockSetFromSlice(node))
}
// Create a utxo view with a fake utxo for the inputs used in the
@@ -509,7 +514,7 @@ func TestCalcPastMedianTime(t *testing.T) {
blockTime := dag.genesis.Header().Timestamp
for i := uint32(1); i < numBlocks; i++ {
blockTime = blockTime.Add(time.Second)
nodes[i] = newTestNode(dag, setFromSlice(nodes[i-1]), blockVersion, 0, blockTime)
nodes[i] = newTestNode(dag, blockSetFromSlice(nodes[i-1]), blockVersion, 0, blockTime)
dag.index.AddNode(nodes[i])
}
@@ -544,29 +549,21 @@ func TestCalcPastMedianTime(t *testing.T) {
}
func TestNew(t *testing.T) {
// Create the root directory for test databases.
if !FileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
t.Fatalf("unable to create test db "+
"root: %s", err)
}
}
tempDir := os.TempDir()
dbPath := filepath.Join(testDbRoot, "TestNew")
dbPath := filepath.Join(tempDir, "TestNew")
_ = os.RemoveAll(dbPath)
db, err := database.Create(testDbType, dbPath, blockDataNet)
err := dbaccess.Open(dbPath)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
defer func() {
db.Close()
dbaccess.Close()
os.RemoveAll(dbPath)
os.RemoveAll(testDbRoot)
}()
config := &Config{
DAGParams: &dagconfig.SimnetParams,
DB: db,
TimeSource: NewMedianTime(),
TimeSource: NewTimeSource(),
SigCache: txscript.NewSigCache(1000),
}
_, err = New(config)
@@ -590,32 +587,24 @@ func TestNew(t *testing.T) {
// occur when the node shuts down improperly while a block is being
// validated.
func TestAcceptingInInit(t *testing.T) {
// Create the root directory for test databases.
if !FileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
t.Fatalf("unable to create test db "+
"root: %s", err)
}
}
tempDir := os.TempDir()
// Create a test database
dbPath := filepath.Join(testDbRoot, "TestAcceptingInInit")
dbPath := filepath.Join(tempDir, "TestAcceptingInInit")
_ = os.RemoveAll(dbPath)
db, err := database.Create(testDbType, dbPath, blockDataNet)
err := dbaccess.Open(dbPath)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
defer func() {
db.Close()
dbaccess.Close()
os.RemoveAll(dbPath)
os.RemoveAll(testDbRoot)
}()
// Create a DAG to add the test block into
config := &Config{
DAGParams: &dagconfig.SimnetParams,
DB: db,
TimeSource: NewMedianTime(),
TimeSource: NewTimeSource(),
SigCache: txscript.NewSigCache(1000),
}
dag, err := New(config)
@@ -633,20 +622,34 @@ func TestAcceptingInInit(t *testing.T) {
// Create a test blockNode with an unvalidated status
genesisNode := dag.index.LookupNode(genesisBlock.Hash())
testNode, _ := dag.newBlockNode(&testBlock.MsgBlock().Header, setFromSlice(genesisNode))
testNode, _ := dag.newBlockNode(&testBlock.MsgBlock().Header, blockSetFromSlice(genesisNode))
testNode.status = statusDataStored
// Manually add the test block to the database
err = db.Update(func(dbTx database.Tx) error {
err := dbStoreBlock(dbTx, testBlock)
if err != nil {
return err
}
return dbStoreBlockNode(dbTx, testNode)
})
dbTx, err := dbaccess.NewTx()
if err != nil {
t.Fatalf("Failed to open database "+
"transaction: %s", err)
}
defer dbTx.RollbackUnlessClosed()
err = storeBlock(dbTx, testBlock)
if err != nil {
t.Fatalf("Failed to store block: %s", err)
}
dbTestNode, err := serializeBlockNode(testNode)
if err != nil {
t.Fatalf("Failed to serialize blockNode: %s", err)
}
key := blockIndexKey(testNode.hash, testNode.blueScore)
err = dbaccess.StoreIndexBlock(dbTx, key, dbTestNode)
if err != nil {
t.Fatalf("Failed to update block index: %s", err)
}
err = dbTx.Commit()
if err != nil {
t.Fatalf("Failed to commit database "+
"transaction: %s", err)
}
// Create a new DAG. We expect this DAG to process the
// test node
@@ -666,7 +669,7 @@ func TestConfirmations(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestConfirmations", Config{
dag, teardownFunc, err := DAGSetup("TestConfirmations", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -688,7 +691,7 @@ func TestConfirmations(t *testing.T) {
chainBlocks := make([]*wire.MsgBlock, 5)
chainBlocks[0] = dag.dagParams.GenesisBlock
for i := uint32(1); i < 5; i++ {
chainBlocks[i] = prepareAndProcessBlock(t, dag, chainBlocks[i-1])
chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1])
}
// Make sure that each one of the chain blocks has the expected confirmations number
@@ -707,8 +710,8 @@ func TestConfirmations(t *testing.T) {
branchingBlocks := make([]*wire.MsgBlock, 2)
// Add two branching blocks
branchingBlocks[0] = prepareAndProcessBlock(t, dag, chainBlocks[1])
branchingBlocks[1] = prepareAndProcessBlock(t, dag, branchingBlocks[0])
branchingBlocks[0] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[1])
branchingBlocks[1] = prepareAndProcessBlockByParentMsgBlocks(t, dag, branchingBlocks[0])
// Check that the genesis has a confirmations number == len(chainBlocks)
genesisConfirmations, err = dag.blockConfirmations(dag.genesis)
@@ -738,7 +741,7 @@ func TestConfirmations(t *testing.T) {
// Generate 100 blocks to force the "main" chain to become red
branchingChainTip := branchingBlocks[1]
for i := uint32(0); i < 100; i++ {
nextBranchingChainTip := prepareAndProcessBlock(t, dag, branchingChainTip)
nextBranchingChainTip := prepareAndProcessBlockByParentMsgBlocks(t, dag, branchingChainTip)
branchingChainTip = nextBranchingChainTip
}
@@ -769,7 +772,7 @@ func TestAcceptingBlock(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 3
dag, teardownFunc, err := DAGSetup("TestAcceptingBlock", Config{
dag, teardownFunc, err := DAGSetup("TestAcceptingBlock", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -797,7 +800,7 @@ func TestAcceptingBlock(t *testing.T) {
chainBlocks := make([]*wire.MsgBlock, numChainBlocks)
chainBlocks[0] = dag.dagParams.GenesisBlock
for i := uint32(1); i <= numChainBlocks-1; i++ {
chainBlocks[i] = prepareAndProcessBlock(t, dag, chainBlocks[i-1])
chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1])
}
// Make sure that each chain block (including the genesis) is accepted by its child
@@ -825,7 +828,7 @@ func TestAcceptingBlock(t *testing.T) {
// Generate a chain tip that will be in the anticone of the selected tip and
// in dag.virtual.blues.
branchingChainTip := prepareAndProcessBlock(t, dag, chainBlocks[len(chainBlocks)-3])
branchingChainTip := prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[len(chainBlocks)-3])
// Make sure that branchingChainTip is not in the selected parent chain
isBranchingChainTipInSelectedParentChain, err := dag.IsInSelectedParentChain(branchingChainTip.BlockHash())
@@ -863,7 +866,7 @@ func TestAcceptingBlock(t *testing.T) {
intersectionBlock := chainBlocks[1]
sideChainTip := intersectionBlock
for i := 0; i < len(chainBlocks)-3; i++ {
sideChainTip = prepareAndProcessBlock(t, dag, sideChainTip)
sideChainTip = prepareAndProcessBlockByParentMsgBlocks(t, dag, sideChainTip)
}
// Make sure that the accepting block of the parent of the branching block didn't change
@@ -879,7 +882,7 @@ func TestAcceptingBlock(t *testing.T) {
// Make sure that a block that is found in the red set of the selected tip
// doesn't have an accepting block
prepareAndProcessBlock(t, dag, sideChainTip, chainBlocks[len(chainBlocks)-1])
prepareAndProcessBlockByParentMsgBlocks(t, dag, sideChainTip, chainBlocks[len(chainBlocks)-1])
sideChainTipAcceptingBlock, err := acceptingBlockByMsgBlock(sideChainTip)
if err != nil {
@@ -899,7 +902,7 @@ func TestFinalizeNodesBelowFinalityPoint(t *testing.T) {
func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("testFinalizeNodesBelowFinalityPoint", Config{
dag, teardownFunc, err := DAGSetup("testFinalizeNodesBelowFinalityPoint", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -911,18 +914,25 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
blockTime := dag.genesis.Header().Timestamp
flushUTXODiffStore := func() {
err := dag.db.Update(func(dbTx database.Tx) error {
return dag.utxoDiffStore.flushToDB(dbTx)
})
dbTx, err := dbaccess.NewTx()
if err != nil {
t.Fatalf("Failed to open database transaction: %s", err)
}
defer dbTx.RollbackUnlessClosed()
err = dag.utxoDiffStore.flushToDB(dbTx)
if err != nil {
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
}
dag.utxoDiffStore.clearDirtyEntries()
err = dbTx.Commit()
if err != nil {
t.Fatalf("Failed to commit database transaction: %s", err)
}
}
addNode := func(parent *blockNode) *blockNode {
blockTime = blockTime.Add(time.Second)
node := newTestNode(dag, setFromSlice(parent), blockVersion, 0, blockTime)
node := newTestNode(dag, blockSetFromSlice(parent), blockVersion, 0, blockTime)
node.updateParentsChildren()
dag.index.AddNode(node)
@@ -938,7 +948,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
nodes := make([]*blockNode, 0, finalityInterval)
currentNode := dag.genesis
nodes = append(nodes, currentNode)
for i := 0; i <= finalityInterval*2; i++ {
for i := uint64(0); i <= finalityInterval*2; i++ {
currentNode = addNode(currentNode)
nodes = append(nodes, currentNode)
}
@@ -946,6 +956,11 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
// Manually set the last finality point
dag.lastFinalityPoint = nodes[finalityInterval-1]
// Don't unload diffData
currentDifference := maxBlueScoreDifferenceToKeepLoaded
maxBlueScoreDifferenceToKeepLoaded = math.MaxUint64
defer func() { maxBlueScoreDifferenceToKeepLoaded = currentDifference }()
dag.finalizeNodesBelowFinalityPoint(deleteDiffData)
flushUTXODiffStore()
@@ -953,17 +968,27 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
if !node.isFinalized {
t.Errorf("Node with blue score %d expected to be finalized", node.blueScore)
}
if _, ok := dag.utxoDiffStore.loaded[*node.hash]; deleteDiffData && ok {
if _, ok := dag.utxoDiffStore.loaded[node]; deleteDiffData && ok {
t.Errorf("The diff data of node with blue score %d should have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData)
} else if !deleteDiffData && !ok {
t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData)
}
if diffData, err := dag.utxoDiffStore.diffDataFromDB(node.hash); err != nil {
_, err := dag.utxoDiffStore.diffDataFromDB(node.hash)
exists := !dbaccess.IsNotFoundError(err)
if exists && err != nil {
t.Errorf("diffDataFromDB: %s", err)
} else if deleteDiffData && diffData != nil {
continue
}
if deleteDiffData && exists {
t.Errorf("The diff data of node with blue score %d should have been deleted from the database if deleteDiffData is %T", node.blueScore, deleteDiffData)
} else if !deleteDiffData && diffData == nil {
continue
}
if !deleteDiffData && !exists {
t.Errorf("The diff data of node with blue score %d shouldn't have been deleted from the database if deleteDiffData is %T", node.blueScore, deleteDiffData)
continue
}
}
@@ -971,7 +996,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
if node.isFinalized {
t.Errorf("Node with blue score %d wasn't expected to be finalized", node.blueScore)
}
if _, ok := dag.utxoDiffStore.loaded[*node.hash]; !ok {
if _, ok := dag.utxoDiffStore.loaded[node]; !ok {
t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded", node.blueScore)
}
if diffData, err := dag.utxoDiffStore.diffDataFromDB(node.hash); err != nil {
@@ -984,7 +1009,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
func TestDAGIndexFailedStatus(t *testing.T) {
params := dagconfig.SimnetParams
dag, teardownFunc, err := DAGSetup("TestDAGIndexFailedStatus", Config{
dag, teardownFunc, err := DAGSetup("TestDAGIndexFailedStatus", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -1008,7 +1033,7 @@ func TestDAGIndexFailedStatus(t *testing.T) {
invalidBlock := util.NewBlock(invalidMsgBlock)
isOrphan, isDelayed, err := dag.ProcessBlock(invalidBlock, BFNoPoWCheck)
if _, ok := err.(RuleError); !ok {
if !errors.As(err, &RuleError{}) {
t.Fatalf("ProcessBlock: expected a rule error but got %s instead", err)
}
if isDelayed {
@@ -1037,7 +1062,8 @@ func TestDAGIndexFailedStatus(t *testing.T) {
invalidBlockChild := util.NewBlock(invalidMsgBlockChild)
isOrphan, isDelayed, err = dag.ProcessBlock(invalidBlockChild, BFNoPoWCheck)
if rErr, ok := err.(RuleError); !ok || rErr.ErrorCode != ErrInvalidAncestorBlock {
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); !ok || ruleErr.ErrorCode != ErrInvalidAncestorBlock {
t.Fatalf("ProcessBlock: expected a rule error but got %s instead", err)
}
if isDelayed {
@@ -1065,7 +1091,7 @@ func TestDAGIndexFailedStatus(t *testing.T) {
invalidBlockGrandChild := util.NewBlock(invalidMsgBlockGrandChild)
isOrphan, isDelayed, err = dag.ProcessBlock(invalidBlockGrandChild, BFNoPoWCheck)
if rErr, ok := err.(RuleError); !ok || rErr.ErrorCode != ErrInvalidAncestorBlock {
if ok := errors.As(err, &ruleErr); !ok || ruleErr.ErrorCode != ErrInvalidAncestorBlock {
t.Fatalf("ProcessBlock: expected a rule error but got %s instead", err)
}
if isDelayed {
@@ -1084,3 +1110,292 @@ func TestDAGIndexFailedStatus(t *testing.T) {
t.Fatalf("invalidBlockGrandChildNode status to have %b flags raised (got %b)", statusInvalidAncestor, invalidBlockGrandChildNode.status)
}
}
func TestIsDAGCurrentMaxDiff(t *testing.T) {
netParams := []*dagconfig.Params{
&dagconfig.MainnetParams,
&dagconfig.TestnetParams,
&dagconfig.DevnetParams,
&dagconfig.RegressionNetParams,
&dagconfig.SimnetParams,
}
for _, params := range netParams {
if params.TargetTimePerBlock*time.Duration(params.FinalityInterval) < isDAGCurrentMaxDiff {
t.Errorf("in %s, a DAG can be considered current even if it's below the finality point", params.Name)
}
}
}
func testProcessBlockRuleError(t *testing.T, dag *BlockDAG, block *wire.MsgBlock, expectedRuleErr error) {
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(block), BFNoPoWCheck)
err = checkRuleError(err, expectedRuleErr)
if err != nil {
t.Errorf("checkRuleError: %s", err)
}
if isDelayed {
t.Fatalf("ProcessBlock: block " +
"is too far in the future")
}
if isOrphan {
t.Fatalf("ProcessBlock: block got unexpectedly orphaned")
}
}
func TestDoubleSpends(t *testing.T) {
params := dagconfig.SimnetParams
params.BlockCoinbaseMaturity = 0
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestDoubleSpends", true, Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("Failed to setup dag instance: %v", err)
}
defer teardownFunc()
fundingBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{params.GenesisHash}, nil)
cbTx := fundingBlock.Transactions[0]
signatureScript, err := txscript.PayToScriptHashSignatureScript(OpTrueScript, nil)
if err != nil {
t.Fatalf("Failed to build signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
SignatureScript: signatureScript,
Sequence: wire.MaxTxInSequenceNum,
}
txOut := &wire.TxOut{
ScriptPubKey: OpTrueScript,
Value: uint64(1),
}
tx1 := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
doubleSpendTxOut := &wire.TxOut{
ScriptPubKey: OpTrueScript,
Value: uint64(2),
}
doubleSpendTx1 := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{doubleSpendTxOut})
blockWithTx1 := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{fundingBlock.BlockHash()}, []*wire.MsgTx{tx1})
// Check that a block will be rejected if it has a transaction that already exists in its past.
anotherBlockWithTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{blockWithTx1.BlockHash()}, nil)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add tx1.
anotherBlockWithTx1.Transactions = append(anotherBlockWithTx1.Transactions, tx1)
anotherBlockWithTx1UtilTxs := make([]*util.Tx, len(anotherBlockWithTx1.Transactions))
for i, tx := range anotherBlockWithTx1.Transactions {
anotherBlockWithTx1UtilTxs[i] = util.NewTx(tx)
}
anotherBlockWithTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(anotherBlockWithTx1UtilTxs).Root()
testProcessBlockRuleError(t, dag, anotherBlockWithTx1, ruleError(ErrOverwriteTx, ""))
// Check that a block will be rejected if it has a transaction that double spends
// a transaction from its past.
blockWithDoubleSpendForTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{blockWithTx1.BlockHash()}, nil)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add a transaction that double spends the block past.
blockWithDoubleSpendForTx1.Transactions = append(blockWithDoubleSpendForTx1.Transactions, doubleSpendTx1)
blockWithDoubleSpendForTx1UtilTxs := make([]*util.Tx, len(blockWithDoubleSpendForTx1.Transactions))
for i, tx := range blockWithDoubleSpendForTx1.Transactions {
blockWithDoubleSpendForTx1UtilTxs[i] = util.NewTx(tx)
}
blockWithDoubleSpendForTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDoubleSpendForTx1UtilTxs).Root()
testProcessBlockRuleError(t, dag, blockWithDoubleSpendForTx1, ruleError(ErrMissingTxOut, ""))
blockInAnticoneOfBlockWithTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, []*wire.MsgTx{doubleSpendTx1})
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Check that a block will not get rejected if it has a transaction that double spends
// a transaction from its anticone.
testProcessBlockRuleError(t, dag, blockInAnticoneOfBlockWithTx1, nil)
// Check that a block will be rejected if it has two transactions that spend the same UTXO.
blockWithDoubleSpendWithItself, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add tx1 and doubleSpendTx1.
blockWithDoubleSpendWithItself.Transactions = append(blockWithDoubleSpendWithItself.Transactions, tx1, doubleSpendTx1)
blockWithDoubleSpendWithItselfUtilTxs := make([]*util.Tx, len(blockWithDoubleSpendWithItself.Transactions))
for i, tx := range blockWithDoubleSpendWithItself.Transactions {
blockWithDoubleSpendWithItselfUtilTxs[i] = util.NewTx(tx)
}
blockWithDoubleSpendWithItself.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDoubleSpendWithItselfUtilTxs).Root()
testProcessBlockRuleError(t, dag, blockWithDoubleSpendWithItself, ruleError(ErrDoubleSpendInSameBlock, ""))
// Check that a block will be rejected if it has the same transaction twice.
blockWithDuplicateTransaction, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add tx1 twice.
blockWithDuplicateTransaction.Transactions = append(blockWithDuplicateTransaction.Transactions, tx1, tx1)
blockWithDuplicateTransactionUtilTxs := make([]*util.Tx, len(blockWithDuplicateTransaction.Transactions))
for i, tx := range blockWithDuplicateTransaction.Transactions {
blockWithDuplicateTransactionUtilTxs[i] = util.NewTx(tx)
}
blockWithDuplicateTransaction.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDuplicateTransactionUtilTxs).Root()
testProcessBlockRuleError(t, dag, blockWithDuplicateTransaction, ruleError(ErrDuplicateTx, ""))
}
func TestUTXOCommitment(t *testing.T) {
// Create a new database and dag instance to run tests against.
params := dagconfig.DevnetParams
params.BlockCoinbaseMaturity = 0
dag, teardownFunc, err := DAGSetup("TestUTXOCommitment", true, Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("TestUTXOCommitment: Failed to setup dag instance: %v", err)
}
defer teardownFunc()
resetExtraNonceForTest()
createTx := func(txToSpend *wire.MsgTx) *wire.MsgTx {
scriptPubKey, err := txscript.PayToScriptHashScript(OpTrueScript)
if err != nil {
t.Fatalf("TestUTXOCommitment: failed to build script pub key: %s", err)
}
signatureScript, err := txscript.PayToScriptHashSignatureScript(OpTrueScript, nil)
if err != nil {
t.Fatalf("TestUTXOCommitment: failed to build signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutpoint: wire.Outpoint{TxID: *txToSpend.TxID(), Index: 0},
SignatureScript: signatureScript,
Sequence: wire.MaxTxInSequenceNum,
}
txOut := &wire.TxOut{
ScriptPubKey: scriptPubKey,
Value: uint64(1),
}
return wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
}
// Build the following DAG:
// G <- A <- B <- D
// <- C <-
genesis := params.GenesisBlock
// Block A:
blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{genesis.BlockHash()}, nil)
// Block B:
blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil)
// Block C:
txSpendBlockACoinbase := createTx(blockA.Transactions[0])
blockCTxs := []*wire.MsgTx{txSpendBlockACoinbase}
blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, blockCTxs)
// Block D:
txSpendTxInBlockC := createTx(txSpendBlockACoinbase)
blockDTxs := []*wire.MsgTx{txSpendTxInBlockC}
blockD := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash(), blockC.BlockHash()}, blockDTxs)
// Get the pastUTXO of blockD
blockNodeD := dag.index.LookupNode(blockD.BlockHash())
if blockNodeD == nil {
t.Fatalf("TestUTXOCommitment: blockNode for block D not found")
}
blockDPastUTXO, _, _, _ := dag.pastUTXO(blockNodeD)
blockDPastDiffUTXOSet := blockDPastUTXO.(*DiffUTXOSet)
// Build a Multiset for block D
multiset := secp256k1.NewMultiset()
for outpoint, entry := range blockDPastDiffUTXOSet.base.utxoCollection {
var err error
multiset, err = addUTXOToMultiset(multiset, entry, &outpoint)
if err != nil {
t.Fatalf("TestUTXOCommitment: addUTXOToMultiset unexpectedly failed")
}
}
for outpoint, entry := range blockDPastDiffUTXOSet.UTXODiff.toAdd {
var err error
multiset, err = addUTXOToMultiset(multiset, entry, &outpoint)
if err != nil {
t.Fatalf("TestUTXOCommitment: addUTXOToMultiset unexpectedly failed")
}
}
for outpoint, entry := range blockDPastDiffUTXOSet.UTXODiff.toRemove {
var err error
multiset, err = removeUTXOFromMultiset(multiset, entry, &outpoint)
if err != nil {
t.Fatalf("TestUTXOCommitment: removeUTXOFromMultiset unexpectedly failed")
}
}
// Turn the multiset into a UTXO commitment
utxoCommitment := daghash.Hash(*multiset.Finalize())
// Make sure that the two commitments are equal
if !utxoCommitment.IsEqual(blockNodeD.utxoCommitment) {
t.Fatalf("TestUTXOCommitment: calculated UTXO commitment and "+
"actual UTXO commitment don't match. Want: %s, got: %s",
utxoCommitment, blockNodeD.utxoCommitment)
}
}
func TestPastUTXOMultiSet(t *testing.T) {
// Create a new database and dag instance to run tests against.
params := dagconfig.SimnetParams
dag, teardownFunc, err := DAGSetup("TestPastUTXOMultiSet", true, Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("TestPastUTXOMultiSet: Failed to setup dag instance: %v", err)
}
defer teardownFunc()
// Build a short chain
genesis := params.GenesisBlock
blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{genesis.BlockHash()}, nil)
blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil)
blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash()}, nil)
// Take blockC's selectedParentMultiset
blockNodeC := dag.index.LookupNode(blockC.BlockHash())
if blockNodeC == nil {
t.Fatalf("TestPastUTXOMultiSet: blockNode for blockC not found")
}
blockCSelectedParentMultiset, err := blockNodeC.selectedParentMultiset(dag)
if err != nil {
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset unexpectedly failed: %s", err)
}
// Copy the multiset
blockCSelectedParentMultisetCopy := *blockCSelectedParentMultiset
blockCSelectedParentMultiset = &blockCSelectedParentMultisetCopy
// Add a block on top of blockC
PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockC.BlockHash()}, nil)
// Get blockC's selectedParentMultiset again
blockCSelectedParentMultiSetAfterAnotherBlock, err := blockNodeC.selectedParentMultiset(dag)
if err != nil {
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset unexpectedly failed: %s", err)
}
// Make sure that blockC's selectedParentMultiset had not changed
if !reflect.DeepEqual(blockCSelectedParentMultiset, blockCSelectedParentMultiSetAfterAnotherBlock) {
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset appears to have changed")
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -6,11 +6,11 @@ package blockdag
import (
"bytes"
"encoding/hex"
"github.com/pkg/errors"
"reflect"
"testing"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
@@ -36,9 +36,21 @@ func TestErrNotInDAG(t *testing.T) {
}
}
// TestUtxoSerialization ensures serializing and deserializing unspent
// hexToBytes converts the passed hex string into bytes and will panic if there
// is an error. This is only provided for the hard-coded constants so errors in
// the source code can be detected. It will only (and must only) be called with
// hard-coded values.
func hexToBytes(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
panic("invalid hex in source file: " + s)
}
return b
}
// TestUTXOSerialization ensures serializing and deserializing unspent
// trasaction output entries works as expected.
func TestUtxoSerialization(t *testing.T) {
func TestUTXOSerialization(t *testing.T) {
t.Parallel()
tests := []struct {
@@ -54,7 +66,7 @@ func TestUtxoSerialization(t *testing.T) {
blockBlueScore: 1,
packedFlags: tfCoinbase,
},
serialized: hexToBytes("03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"),
serialized: hexToBytes("01000000000000000100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
},
{
name: "blue score 100001, not coinbase",
@@ -64,13 +76,21 @@ func TestUtxoSerialization(t *testing.T) {
blockBlueScore: 100001,
packedFlags: 0,
},
serialized: hexToBytes("8b99420700ee8bd501094a7d5ca318da2506de35e1cb025ddc"),
serialized: hexToBytes("a1860100000000000040420f00000000001976a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
},
}
for i, test := range tests {
// Ensure the utxo entry serializes to the expected value.
gotBytes := serializeUTXOEntry(test.entry)
w := &bytes.Buffer{}
err := serializeUTXOEntry(w, test.entry)
if err != nil {
t.Errorf("serializeUTXOEntry #%d (%s) unexpected "+
"error: %v", i, test.name, err)
continue
}
gotBytes := w.Bytes()
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("serializeUTXOEntry #%d (%s): mismatched "+
"bytes - got %x, want %x", i, test.name,
@@ -78,8 +98,8 @@ func TestUtxoSerialization(t *testing.T) {
continue
}
// Deserialize to a utxo entry.
utxoEntry, err := deserializeUTXOEntry(test.serialized)
// Deserialize to a utxo entry.gotBytes
utxoEntry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
if err != nil {
t.Errorf("deserializeUTXOEntry #%d (%s) unexpected "+
"error: %v", i, test.name, err)
@@ -124,28 +144,24 @@ func TestUtxoEntryDeserializeErrors(t *testing.T) {
tests := []struct {
name string
serialized []byte
errType error
}{
{
name: "no data after header code",
serialized: hexToBytes("02"),
errType: errDeserialize(""),
},
{
name: "incomplete compressed txout",
serialized: hexToBytes("0232"),
errType: errDeserialize(""),
},
}
for _, test := range tests {
// Ensure the expected error type is returned and the returned
// entry is nil.
entry, err := deserializeUTXOEntry(test.serialized)
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
t.Errorf("deserializeUTXOEntry (%s): expected error "+
"type does not match - got %T, want %T",
test.name, err, test.errType)
entry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
if err == nil {
t.Errorf("deserializeUTXOEntry (%s): didn't return an error",
test.name)
continue
}
if entry != nil {
@@ -172,7 +188,7 @@ func TestDAGStateSerialization(t *testing.T) {
TipHashes: []*daghash.Hash{newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")},
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
},
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
},
{
name: "block 1",
@@ -180,7 +196,7 @@ func TestDAGStateSerialization(t *testing.T) {
TipHashes: []*daghash.Hash{newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")},
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
},
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
},
}
@@ -217,50 +233,6 @@ func TestDAGStateSerialization(t *testing.T) {
}
}
// TestDAGStateDeserializeErrors performs negative tests against
// deserializing the DAG state to ensure error paths work as expected.
func TestDAGStateDeserializeErrors(t *testing.T) {
t.Parallel()
tests := []struct {
name string
serialized []byte
errType error
}{
{
name: "nothing serialized",
serialized: hexToBytes(""),
errType: database.Error{ErrorCode: database.ErrCorruption},
},
{
name: "corrupted data",
serialized: []byte("[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,7"),
errType: database.Error{ErrorCode: database.ErrCorruption},
},
}
for _, test := range tests {
// Ensure the expected error type and code is returned.
_, err := deserializeDAGState(test.serialized)
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
t.Errorf("deserializeDAGState (%s): expected "+
"error type does not match - got %T, want %T",
test.name, err, test.errType)
continue
}
if derr, ok := err.(database.Error); ok {
tderr := test.errType.(database.Error)
if derr.ErrorCode != tderr.ErrorCode {
t.Errorf("deserializeDAGState (%s): "+
"wrong error code got: %v, want: %v",
test.name, derr.ErrorCode,
tderr.ErrorCode)
continue
}
}
}
}
// newHashFromStr converts the passed big-endian hex string into a
// daghash.Hash. It only differs from the one available in daghash in that
// it panics in case of an error since it will only (and must only) be

View File

@@ -5,7 +5,7 @@
package blockdag
import (
"math/big"
"github.com/kaspanet/kaspad/util/bigintpool"
"time"
"github.com/kaspanet/kaspad/util"
@@ -30,11 +30,20 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ti
// averageWindowTarget * (windowMinTimestamp / (targetTimePerBlock * windowSize))
// The result uses integer division which means it will be slightly
// rounded down.
newTarget := targetsWindow.averageTarget()
newTarget := bigintpool.Acquire(0)
defer bigintpool.Release(newTarget)
windowTimeStampDifference := bigintpool.Acquire(windowMaxTimeStamp - windowMinTimestamp)
defer bigintpool.Release(windowTimeStampDifference)
targetTimePerBlock := bigintpool.Acquire(dag.targetTimePerBlock)
defer bigintpool.Release(targetTimePerBlock)
difficultyAdjustmentWindowSize := bigintpool.Acquire(int64(dag.difficultyAdjustmentWindowSize))
defer bigintpool.Release(difficultyAdjustmentWindowSize)
targetsWindow.averageTarget(newTarget)
newTarget.
Mul(newTarget, big.NewInt(windowMaxTimeStamp-windowMinTimestamp)).
Div(newTarget, big.NewInt(dag.targetTimePerBlock)).
Div(newTarget, big.NewInt(int64(dag.difficultyAdjustmentWindowSize)))
Mul(newTarget, windowTimeStampDifference).
Div(newTarget, targetTimePerBlock).
Div(newTarget, difficultyAdjustmentWindowSize)
if newTarget.Cmp(dag.dagParams.PowMax) > 0 {
return dag.powMaxBits
}

View File

@@ -82,7 +82,7 @@ func TestDifficulty(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.DifficultyAdjustmentWindowSize = 264
dag, teardownFunc, err := DAGSetup("TestDifficulty", Config{
dag, teardownFunc, err := DAGSetup("TestDifficulty", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -94,7 +94,8 @@ func TestDifficulty(t *testing.T) {
addNode := func(parents blockSet, blockTime time.Time) *blockNode {
bluestParent := parents.bluest()
if blockTime == zeroTime {
blockTime = time.Unix(bluestParent.timestamp+1, 0)
blockTime = time.Unix(bluestParent.timestamp, 0)
blockTime = blockTime.Add(params.TargetTimePerBlock)
}
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
if err != nil {
@@ -117,30 +118,32 @@ func TestDifficulty(t *testing.T) {
}
tip := dag.genesis
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != dag.genesis.bits {
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment window size, the difficulty should be the same as genesis'")
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment " +
"window size, the difficulty should be the same as genesis'")
}
}
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize+100; i++ {
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != dag.genesis.bits {
t.Fatalf("As long as the block rate remains the same, the difficulty shouldn't change")
}
}
nodeInThePast := addNode(setFromSlice(tip), tip.PastMedianTime(dag))
nodeInThePast := addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
if nodeInThePast.bits != tip.bits {
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
}
tip = nodeInThePast
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != nodeInThePast.bits {
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
}
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if compareBits(tip.bits, nodeInThePast.bits) >= 0 {
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the block rate, so the difficulty should increase as well")
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the " +
"block rate, so the difficulty should increase as well")
}
expectedBits := uint32(0x207f83df)
if tip.bits != expectedBits {
@@ -149,7 +152,7 @@ func TestDifficulty(t *testing.T) {
// Increase block rate to increase difficulty
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
tip = addNode(setFromSlice(tip), tip.PastMedianTime(dag))
tip = addNode(blockSetFromSlice(tip), tip.PastMedianTime(dag))
if compareBits(tip.bits, tip.parents.bluest().bits) > 0 {
t.Fatalf("Because we're increasing the block rate, the difficulty can't decrease")
}
@@ -159,7 +162,7 @@ func TestDifficulty(t *testing.T) {
lastBits := tip.bits
sameBitsCount := uint64(0)
for sameBitsCount < dag.difficultyAdjustmentWindowSize+1 {
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits == lastBits {
sameBitsCount++
} else {
@@ -167,37 +170,41 @@ func TestDifficulty(t *testing.T) {
sameBitsCount = 0
}
}
slowNode := addNode(setFromSlice(tip), time.Unix(tip.timestamp+2, 0))
slowBlockTime := time.Unix(tip.timestamp, 0)
slowBlockTime = slowBlockTime.Add(params.TargetTimePerBlock + time.Second)
slowNode := addNode(blockSetFromSlice(tip), slowBlockTime)
if slowNode.bits != tip.bits {
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
}
tip = slowNode
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != slowNode.bits {
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
}
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
if compareBits(tip.bits, slowNode.bits) <= 0 {
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block rate, so the difficulty should decrease as well")
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block" +
" rate, so the difficulty should decrease as well")
}
splitNode := addNode(setFromSlice(tip), zeroTime)
splitNode := addNode(blockSetFromSlice(tip), zeroTime)
tip = splitNode
for i := 0; i < 100; i++ {
tip = addNode(setFromSlice(tip), zeroTime)
tip = addNode(blockSetFromSlice(tip), zeroTime)
}
blueTip := tip
redChainTip := splitNode
for i := 0; i < 10; i++ {
redChainTip = addNode(setFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
redChainTip = addNode(blockSetFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
}
tipWithRedPast := addNode(setFromSlice(redChainTip, blueTip), zeroTime)
tipWithoutRedPast := addNode(setFromSlice(blueTip), zeroTime)
tipWithRedPast := addNode(blockSetFromSlice(redChainTip, blueTip), zeroTime)
tipWithoutRedPast := addNode(blockSetFromSlice(blueTip), zeroTime)
if tipWithoutRedPast.bits != tipWithRedPast.bits {
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks shouldn't affect the difficulty")
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks" +
" shouldn't affect the difficulty")
}
}

View File

@@ -6,28 +6,10 @@ package blockdag
import (
"fmt"
"github.com/pkg/errors"
)
// DeploymentError identifies an error that indicates a deployment ID was
// specified that does not exist.
type DeploymentError uint32
// Error returns the assertion error as a human-readable string and satisfies
// the error interface.
func (e DeploymentError) Error() string {
return fmt.Sprintf("deployment ID %d does not exist", uint32(e))
}
// AssertError identifies an error that indicates an internal code consistency
// issue and should be treated as a critical and unrecoverable error.
type AssertError string
// Error returns the assertion error as a human-readable string and satisfies
// the error interface.
func (e AssertError) Error() string {
return "assertion failed: " + string(e)
}
// ErrorCode identifies a kind of error.
type ErrorCode int
@@ -121,6 +103,11 @@ const (
// either does not exist or has already been spent.
ErrMissingTxOut
// ErrDoubleSpendInSameBlock indicates a transaction
// that spends an output that was already spent by another
// transaction in the same block.
ErrDoubleSpendInSameBlock
// ErrUnfinalizedTx indicates a transaction has not been finalized.
// A valid block may only contain finalized transactions.
ErrUnfinalizedTx
@@ -217,6 +204,10 @@ const (
// ErrInvalidParentsRelation indicates that one of the parents of a block
// is also an ancestor of another parent
ErrInvalidParentsRelation
// ErrDelayedBlockIsNotAllowed indicates that a block with a delayed timestamp was
// submitted with BFDisallowDelay flag raised.
ErrDelayedBlockIsNotAllowed
)
// Map of ErrorCode values back to their constant names for pretty printing.
@@ -241,6 +232,7 @@ var errorCodeStrings = map[ErrorCode]string{
ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
ErrBadTxInput: "ErrBadTxInput",
ErrMissingTxOut: "ErrMissingTxOut",
ErrDoubleSpendInSameBlock: "ErrDoubleSpendInSameBlock",
ErrUnfinalizedTx: "ErrUnfinalizedTx",
ErrDuplicateTx: "ErrDuplicateTx",
ErrOverwriteTx: "ErrOverwriteTx",
@@ -264,6 +256,7 @@ var errorCodeStrings = map[ErrorCode]string{
ErrInvalidPayload: "ErrInvalidPayload",
ErrInvalidPayloadHash: "ErrInvalidPayloadHash",
ErrInvalidParentsRelation: "ErrInvalidParentsRelation",
ErrDelayedBlockIsNotAllowed: "ErrDelayedBlockIsNotAllowed",
}
// String returns the ErrorCode as a human-readable name.
@@ -289,7 +282,6 @@ func (e RuleError) Error() string {
return e.Description
}
// ruleError creates an RuleError given a set of arguments.
func ruleError(c ErrorCode, desc string) RuleError {
return RuleError{ErrorCode: c, Description: desc}
func ruleError(c ErrorCode, desc string) error {
return errors.WithStack(RuleError{ErrorCode: c, Description: desc})
}

View File

@@ -5,7 +5,6 @@
package blockdag
import (
"fmt"
"testing"
)
@@ -58,6 +57,7 @@ func TestErrorCodeStringer(t *testing.T) {
{ErrInvalidPayload, "ErrInvalidPayload"},
{ErrInvalidPayloadHash, "ErrInvalidPayloadHash"},
{ErrInvalidParentsRelation, "ErrInvalidParentsRelation"},
{ErrDelayedBlockIsNotAllowed, "ErrDelayedBlockIsNotAllowed"},
{0xffff, "Unknown ErrorCode (65535)"},
}
@@ -98,46 +98,3 @@ func TestRuleError(t *testing.T) {
}
}
}
// TestDeploymentError tests the stringized output for the DeploymentError type.
func TestDeploymentError(t *testing.T) {
t.Parallel()
tests := []struct {
in DeploymentError
want string
}{
{
DeploymentError(0),
"deployment ID 0 does not exist",
},
{
DeploymentError(10),
"deployment ID 10 does not exist",
},
{
DeploymentError(123),
"deployment ID 123 does not exist",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.Error()
if result != test.want {
t.Errorf("Error #%d\n got: %s want: %s", i, result,
test.want)
continue
}
}
}
func TestAssertError(t *testing.T) {
message := "abc 123"
err := AssertError(message)
expectedMessage := fmt.Sprintf("assertion failed: %s", message)
if expectedMessage != err.Error() {
t.Errorf("Unexpected AssertError message. "+
"Got: %s, want: %s", err.Error(), expectedMessage)
}
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"github.com/pkg/errors"
"math"
"strings"
"testing"
"github.com/kaspanet/kaspad/util/subnetworkid"
@@ -40,7 +41,7 @@ func TestFinality(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.FinalityInterval = 100
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -73,7 +74,7 @@ func TestFinality(t *testing.T) {
currentNode := genesis
// First we build a chain of params.FinalityInterval blocks for future use
for i := 0; i < params.FinalityInterval; i++ {
for i := uint64(0); i < params.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -85,7 +86,7 @@ func TestFinality(t *testing.T) {
// Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and
// we expect the block with height 1 * params.FinalityInterval to be the last finality point
currentNode = genesis
for i := 0; i < params.FinalityInterval; i++ {
for i := uint64(0); i < params.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -94,7 +95,7 @@ func TestFinality(t *testing.T) {
expectedFinalityPoint := currentNode
for i := 0; i < params.FinalityInterval; i++ {
for i := uint64(0); i < params.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -137,10 +138,10 @@ func TestFinality(t *testing.T) {
if err == nil {
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
}
rErr, ok := err.(blockdag.RuleError)
if ok {
if rErr.ErrorCode != blockdag.ErrFinality {
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
var ruleErr blockdag.RuleError
if errors.As(err, &ruleErr) {
if ruleErr.ErrorCode != blockdag.ErrFinality {
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
}
} else {
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", err)
@@ -152,13 +153,12 @@ func TestFinality(t *testing.T) {
if err == nil {
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
}
rErr, ok = err.(blockdag.RuleError)
if ok {
if rErr.ErrorCode != blockdag.ErrFinality {
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
if errors.As(err, &ruleErr) {
if ruleErr.ErrorCode != blockdag.ErrFinality {
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, ruleErr.ErrorCode)
}
} else {
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", rErr)
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", ruleErr)
}
}
@@ -167,9 +167,17 @@ func TestFinality(t *testing.T) {
// a getblocks message it should always be able to send
// all the necessary invs.
func TestFinalityInterval(t *testing.T) {
params := dagconfig.SimnetParams
if params.FinalityInterval > wire.MaxInvPerMsg {
t.Errorf("dagconfig.SimnetParams.FinalityInterval should be lower or equal to wire.MaxInvPerMsg")
netParams := []*dagconfig.Params{
&dagconfig.MainnetParams,
&dagconfig.TestnetParams,
&dagconfig.DevnetParams,
&dagconfig.RegressionNetParams,
&dagconfig.SimnetParams,
}
for _, params := range netParams {
if params.FinalityInterval > wire.MaxInvPerMsg {
t.Errorf("FinalityInterval in %s should be lower or equal to wire.MaxInvPerMsg", params.Name)
}
}
}
@@ -178,7 +186,7 @@ func TestSubnetworkRegistry(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.BlockCoinbaseMaturity = 0
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -191,7 +199,7 @@ func TestSubnetworkRegistry(t *testing.T) {
if err != nil {
t.Fatalf("could not register network: %s", err)
}
limit, err := dag.SubnetworkStore.GasLimit(subnetworkID)
limit, err := blockdag.GasLimit(subnetworkID)
if err != nil {
t.Fatalf("could not retrieve gas limit: %s", err)
}
@@ -204,7 +212,7 @@ func TestChainedTransactions(t *testing.T) {
params := dagconfig.SimnetParams
params.BlockCoinbaseMaturity = 0
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -260,21 +268,32 @@ func TestChainedTransactions(t *testing.T) {
}
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
block2, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true)
block2, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx}, false)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add a chained transaction to block2
block2.Transactions = append(block2.Transactions, chainedTx)
block2UtilTxs := make([]*util.Tx, len(block2.Transactions))
for i, tx := range block2.Transactions {
block2UtilTxs[i] = util.NewTx(tx)
}
block2.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(block2UtilTxs).Root()
//Checks that dag.ProcessBlock fails because we don't allow a transaction to spend another transaction from the same block
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
if err == nil {
t.Errorf("ProcessBlock expected an error")
} else if rErr, ok := err.(blockdag.RuleError); ok {
if rErr.ErrorCode != blockdag.ErrMissingTxOut {
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, rErr.ErrorCode)
}
} else {
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
var ruleErr blockdag.RuleError
if ok := errors.As(err, &ruleErr); ok {
if ruleErr.ErrorCode != blockdag.ErrMissingTxOut {
t.Errorf("ProcessBlock expected an %v error code but got %v", blockdag.ErrMissingTxOut, ruleErr.ErrorCode)
}
} else {
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
}
}
if isDelayed {
t.Fatalf("ProcessBlock: block2 " +
@@ -321,7 +340,7 @@ func TestOrderInDiffFromAcceptanceData(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = math.MaxUint8
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -391,7 +410,7 @@ func TestGasLimit(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.BlockCoinbaseMaturity = 0
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -468,11 +487,11 @@ func TestGasLimit(t *testing.T) {
if err == nil {
t.Fatalf("ProcessBlock expected to have an error in block that exceeds gas limit")
}
rErr, ok := err.(blockdag.RuleError)
if !ok {
var ruleErr blockdag.RuleError
if !errors.As(err, &ruleErr) {
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
} else if ruleErr.ErrorCode != blockdag.ErrInvalidGas {
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, ruleErr.ErrorCode)
}
if isDelayed {
t.Fatalf("ProcessBlock: overLimitBlock " +
@@ -503,15 +522,18 @@ func TestGasLimit(t *testing.T) {
if err == nil {
t.Fatalf("ProcessBlock expected to have an error")
}
rErr, ok = err.(blockdag.RuleError)
if !ok {
if !errors.As(err, &ruleErr) {
t.Fatalf("ProcessBlock expected a RuleError, but got %v", err)
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
} else if ruleErr.ErrorCode != blockdag.ErrInvalidGas {
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, ruleErr.ErrorCode)
}
if isOrphan {
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
}
if isDelayed {
t.Fatalf("ProcessBlock: overflowGasBlock " +
"is too far in the future")
}
nonExistentSubnetwork := &subnetworkid.SubnetworkID{123}
nonExistentSubnetworkTxIn := &wire.TxIn{
@@ -535,9 +557,16 @@ func TestGasLimit(t *testing.T) {
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
expectedErrStr := fmt.Sprintf("Error getting gas limit for subnetworkID '%s': subnetwork '%s' not found",
nonExistentSubnetwork, nonExistentSubnetwork)
if err.Error() != expectedErrStr {
if strings.Contains(err.Error(), expectedErrStr) {
t.Fatalf("ProcessBlock expected error \"%v\" but got \"%v\"", expectedErrStr, err)
}
if isDelayed {
t.Fatalf("ProcessBlock: nonExistentSubnetworkBlock " +
"is too far in the future")
}
if isOrphan {
t.Fatalf("ProcessBlock: nonExistentSubnetworkBlock got unexpectedly orphan")
}
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit
validBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1}, true)

View File

@@ -27,7 +27,7 @@ import (
// For further details see the article https://eprint.iacr.org/2018/104.pdf
func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blockNode, err error) {
newNode.selectedParent = newNode.parents.bluest()
newNode.bluesAnticoneSizes[*newNode.selectedParent.hash] = 0
newNode.bluesAnticoneSizes[newNode.selectedParent] = 0
newNode.blues = []*blockNode{newNode.selectedParent}
selectedParentAnticone, err = dag.selectedParentAnticone(newNode)
if err != nil {
@@ -102,9 +102,9 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
if possiblyBlue {
// No k-cluster violation found, we can now set the candidate block as blue
newNode.blues = append(newNode.blues, blueCandidate)
newNode.bluesAnticoneSizes[*blueCandidate.hash] = candidateAnticoneSize
newNode.bluesAnticoneSizes[blueCandidate] = candidateAnticoneSize
for blue, blueAnticoneSize := range candidateBluesAnticoneSizes {
newNode.bluesAnticoneSizes[*blue.hash] = blueAnticoneSize + 1
newNode.bluesAnticoneSizes[blue] = blueAnticoneSize + 1
}
// The maximum length of node.blues can be K+1 because
@@ -126,9 +126,9 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
// we check whether it is in the past of the selected parent.
// If not, we add the node to the resulting anticone-set and queue it for processing.
func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, error) {
anticoneSet := newSet()
anticoneSet := newBlockSet()
var anticoneSlice []*blockNode
selectedParentPast := newSet()
selectedParentPast := newBlockSet()
var queue []*blockNode
// Queueing all parents (other than the selected parent itself) for processing.
for parent := range node.parents {
@@ -168,7 +168,7 @@ func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, erro
// Expects 'block' to be in the blue set of 'context'
func (dag *BlockDAG) blueAnticoneSize(block, context *blockNode) (dagconfig.KType, error) {
for current := context; current != nil; current = current.selectedParent {
if blueAnticoneSize, ok := current.bluesAnticoneSizes[*block.hash]; ok {
if blueAnticoneSize, ok := current.bluesAnticoneSizes[block]; ok {
return blueAnticoneSize, nil
}
}

View File

@@ -3,9 +3,12 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"reflect"
"sort"
"strings"
"testing"
)
@@ -30,7 +33,7 @@ func TestGHOSTDAG(t *testing.T) {
}{
{
k: 3,
expectedReds: []string{"F", "G", "H", "I", "O", "P"},
expectedReds: []string{"F", "G", "H", "I", "N", "O"},
dagData: []*testBlockData{
{
parents: []string{"A"},
@@ -163,7 +166,7 @@ func TestGHOSTDAG(t *testing.T) {
id: "T",
expectedScore: 13,
expectedSelectedParent: "S",
expectedBlues: []string{"S", "N", "Q"},
expectedBlues: []string{"S", "P", "Q"},
},
},
},
@@ -173,7 +176,7 @@ func TestGHOSTDAG(t *testing.T) {
func() {
resetExtraNonceForTest()
dagParams.K = test.k
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), Config{
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), true, Config{
DAGParams: &dagParams,
})
if err != nil {
@@ -276,3 +279,94 @@ func checkReds(expectedReds []string, reds map[string]bool) bool {
}
return true
}
func TestBlueAnticoneSizeErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("TestBlueAnticoneSizeErrors: Failed to setup DAG instance: %s", err)
}
defer teardownFunc()
// Prepare a block chain with size K beginning with the genesis block
currentBlockA := dag.dagParams.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockA)
currentBlockA = newBlock
}
// Prepare another block chain with size K beginning with the genesis block
currentBlockB := dag.dagParams.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockB)
currentBlockB = newBlock
}
// Get references to the tips of the two chains
blockNodeA := dag.index.LookupNode(currentBlockA.BlockHash())
blockNodeB := dag.index.LookupNode(currentBlockB.BlockHash())
// Try getting the blueAnticoneSize between them. Since the two
// blocks are not in the anticones of eachother, this should fail.
_, err = dag.blueAnticoneSize(blockNodeA, blockNodeB)
if err == nil {
t.Fatalf("TestBlueAnticoneSizeErrors: blueAnticoneSize unexpectedly succeeded")
}
expectedErrSubstring := "is not in blue set of"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestBlueAnticoneSizeErrors: blueAnticoneSize returned wrong error. "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
}
func TestGHOSTDAGErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("TestGHOSTDAGErrors: Failed to setup DAG instance: %s", err)
}
defer teardownFunc()
// Add two child blocks to the genesis
block1 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
block2 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
// Add a child block to the previous two blocks
block3 := prepareAndProcessBlockByParentMsgBlocks(t, dag, block1, block2)
// Clear the reachability store
dag.reachabilityStore.loaded = map[daghash.Hash]*reachabilityData{}
dbTx, err := dbaccess.NewTx()
if err != nil {
t.Fatalf("NewTx: %s", err)
}
defer dbTx.RollbackUnlessClosed()
err = dbaccess.ClearReachabilityData(dbTx)
if err != nil {
t.Fatalf("ClearReachabilityData: %s", err)
}
err = dbTx.Commit()
if err != nil {
t.Fatalf("Commit: %s", err)
}
// Try to rerun GHOSTDAG on the last block. GHOSTDAG uses
// reachability data, so we expect it to fail.
blockNode3 := dag.index.LookupNode(block3.BlockHash())
_, err = dag.ghostdag(blockNode3)
if err == nil {
t.Fatalf("TestGHOSTDAGErrors: ghostdag unexpectedly succeeded")
}
expectedErrSubstring := "Couldn't find reachability data"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestGHOSTDAGErrors: ghostdag returned wrong error. "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
}

View File

@@ -1,7 +1,7 @@
indexers
========
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://godoc.org/github.com/kaspanet/kaspad/blockdag/indexers?status.png)](http://godoc.org/github.com/kaspanet/kaspad/blockdag/indexers)
Package indexers implements optional block chain indexes.

View File

@@ -4,29 +4,16 @@ import (
"bytes"
"encoding/gob"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
const (
// acceptanceIndexName is the human-readable name for the index.
acceptanceIndexName = "acceptance index"
)
var (
// acceptanceIndexKey is the key of the acceptance index and the db bucket used
// to house it.
acceptanceIndexKey = []byte("acceptanceidx")
)
// AcceptanceIndex implements a txAcceptanceData by block hash index. That is to say,
// it stores a mapping between a block's hash and the set of transactions that the
// block accepts among its blue blocks.
type AcceptanceIndex struct {
db database.DB
dag *blockdag.BlockDAG
}
@@ -43,122 +30,82 @@ func NewAcceptanceIndex() *AcceptanceIndex {
return &AcceptanceIndex{}
}
// DropAcceptanceIndex drops the acceptance index from the provided database if it
// exists.
func DropAcceptanceIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, acceptanceIndexKey, acceptanceIndexName, interrupt)
}
// DropAcceptanceIndex drops the acceptance index.
func DropAcceptanceIndex() error {
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
// Key returns the database key to use for the index as a byte slice.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Key() []byte {
return acceptanceIndexKey
}
err = dbaccess.DropAcceptanceIndex(dbTx)
if err != nil {
return err
}
// Name returns the human-readable name of the index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Name() string {
return acceptanceIndexName
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the bucket for the
// acceptance index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Create(dbTx database.Tx) error {
_, err := dbTx.Metadata().CreateBucket(acceptanceIndexKey)
return err
return dbTx.Commit()
}
// Init initializes the hash-based acceptance index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
idx.db = db
func (idx *AcceptanceIndex) Init(dag *blockdag.BlockDAG) error {
idx.dag = dag
return nil
return idx.recover()
}
// recover attempts to insert any data that's missing from the
// acceptance index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) recover() error {
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
err = idx.dag.ForEachHash(func(hash daghash.Hash) error {
exists, err := dbaccess.HasAcceptanceData(dbTx, &hash)
if err != nil {
return err
}
if exists {
return nil
}
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(&hash)
if err != nil {
return err
}
return idx.ConnectBlock(dbTx, &hash, txAcceptanceData)
})
if err != nil {
return err
}
return dbTx.Commit()
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the DAG.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) ConnectBlock(dbTx database.Tx, _ *util.Block, blockID uint64, _ *blockdag.BlockDAG,
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
return dbPutTxsAcceptanceData(dbTx, blockID, txsAcceptanceData)
}
// TxsAcceptanceData returns the acceptance data of all the transactions that
// were accepted by the block with hash blockHash.
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
var txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData
err := idx.db.View(func(dbTx database.Tx) error {
var err error
txsAcceptanceData, err = dbFetchTxsAcceptanceDataByHash(dbTx, blockHash)
return err
})
if err != nil {
return nil, err
}
return txsAcceptanceData, nil
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
for blockID := currentBlockID + 1; blockID <= lastKnownBlockID; blockID++ {
hash, err := blockdag.DBFetchBlockHashByID(dbTx, currentBlockID)
if err != nil {
return err
}
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(hash)
if err != nil {
return err
}
err = idx.ConnectBlock(dbTx, nil, blockID, nil, txAcceptanceData, nil)
if err != nil {
return err
}
}
return nil
}
func dbPutTxsAcceptanceData(dbTx database.Tx, blockID uint64,
func (idx *AcceptanceIndex) ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash,
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(txsAcceptanceData)
if err != nil {
return err
}
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedTxsAcceptanceData)
return dbaccess.StoreAcceptanceData(dbContext, blockHash, serializedTxsAcceptanceData)
}
func dbFetchTxsAcceptanceDataByHash(dbTx database.Tx,
hash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
blockID, err := blockdag.DBFetchBlockIDByHash(dbTx, hash)
// TxsAcceptanceData returns the acceptance data of all the transactions that
// were accepted by the block with hash blockHash.
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
serializedTxsAcceptanceData, err := dbaccess.FetchAcceptanceData(dbaccess.NoTx(), blockHash)
if err != nil {
return nil, err
}
return dbFetchTxsAcceptanceDataByID(dbTx, blockID)
}
func dbFetchTxsAcceptanceDataByID(dbTx database.Tx,
blockID uint64) (blockdag.MultiBlockTxsAcceptanceData, error) {
serializedBlockID := blockdag.SerializeBlockID(blockID)
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
serializedTxsAcceptanceData := bucket.Get(serializedBlockID)
if serializedTxsAcceptanceData == nil {
return nil, errors.Errorf("no entry in the accpetance index for block id %d", blockID)
}
return deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
}

View File

@@ -3,7 +3,7 @@ package indexers
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
@@ -96,7 +96,7 @@ func TestAcceptanceIndexRecover(t *testing.T) {
}
defer os.RemoveAll(db1Path)
db1, err := database.Create("ffldb", db1Path, params.Net)
err = dbaccess.Open(db1Path)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
@@ -104,10 +104,9 @@ func TestAcceptanceIndexRecover(t *testing.T) {
db1Config := blockdag.Config{
IndexManager: db1IndexManager,
DAGParams: params,
DB: db1,
}
db1DAG, teardown, err := blockdag.DAGSetup("", db1Config)
db1DAG, teardown, err := blockdag.DAGSetup("", false, db1Config)
if err != nil {
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
}
@@ -130,11 +129,6 @@ func TestAcceptanceIndexRecover(t *testing.T) {
}
}
err = db1.FlushCache()
if err != nil {
t.Fatalf("Error flushing database to disk: %s", err)
}
db2Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover2")
if err != nil {
t.Fatalf("Error creating temporary directory: %s", err)
@@ -166,17 +160,20 @@ func TestAcceptanceIndexRecover(t *testing.T) {
t.Fatalf("Error fetching acceptance data: %s", err)
}
db2, err := database.Open("ffldb", db2Path, params.Net)
err = dbaccess.Close()
if err != nil {
t.Fatalf("Error opening database: %s", err)
t.Fatalf("Error closing the database: %s", err)
}
err = dbaccess.Open(db2Path)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
db2Config := blockdag.Config{
DAGParams: params,
DB: db2,
}
db2DAG, teardown, err := blockdag.DAGSetup("", db2Config)
db2DAG, teardown, err := blockdag.DAGSetup("", false, db2Config)
if err != nil {
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
}
@@ -199,10 +196,6 @@ func TestAcceptanceIndexRecover(t *testing.T) {
}
}
err = db2.FlushCache()
if err != nil {
t.Fatalf("Error flushing database to disk: %s", err)
}
db3Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover3")
if err != nil {
t.Fatalf("Error creating temporary directory: %s", err)
@@ -213,9 +206,13 @@ func TestAcceptanceIndexRecover(t *testing.T) {
t.Fatalf("copyDirectory: %s", err)
}
db3, err := database.Open("ffldb", db3Path, params.Net)
err = dbaccess.Close()
if err != nil {
t.Fatalf("Error opening database: %s", err)
t.Fatalf("Error closing the database: %s", err)
}
err = dbaccess.Open(db3Path)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
db3AcceptanceIndex := NewAcceptanceIndex()
@@ -223,10 +220,9 @@ func TestAcceptanceIndexRecover(t *testing.T) {
db3Config := blockdag.Config{
IndexManager: db3IndexManager,
DAGParams: params,
DB: db3,
}
_, teardown, err = blockdag.DAGSetup("", db3Config)
_, teardown, err = blockdag.DAGSetup("", false, db3Config)
if err != nil {
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
}
@@ -298,16 +294,16 @@ func copyDirectory(scrDir, dest string) error {
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
func copyFile(srcFile, dstFile string) error {
out, err := os.Create(dstFile)
defer out.Close()
if err != nil {
return err
}
defer out.Close()
in, err := os.Open(srcFile)
defer in.Close()
if err != nil {
return err
}
defer in.Close()
_, err = io.Copy(out, in)
if err != nil {

View File

@@ -1,902 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"fmt"
"github.com/pkg/errors"
"sync"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const (
// addrIndexName is the human-readable name for the index.
addrIndexName = "address index"
// level0MaxEntries is the maximum number of transactions that are
// stored in level 0 of an address index entry. Subsequent levels store
// 2^n * level0MaxEntries entries, or in words, double the maximum of
// the previous level.
level0MaxEntries = 8
// addrKeySize is the number of bytes an address key consumes in the
// index. It consists of 1 byte address type + 20 bytes hash160.
addrKeySize = 1 + 20
// levelKeySize is the number of bytes a level key in the address index
// consumes. It consists of the address key + 1 byte for the level.
levelKeySize = addrKeySize + 1
// levelOffset is the offset in the level key which identifes the level.
levelOffset = levelKeySize - 1
// addrKeyTypePubKeyHash is the address type in an address key which
// represents both a pay-to-pubkey-hash and a pay-to-pubkey address.
// This is done because both are identical for the purposes of the
// address index.
addrKeyTypePubKeyHash = 0
// addrKeyTypeScriptHash is the address type in an address key which
// represents a pay-to-script-hash address. This is necessary because
// the hash of a pubkey address might be the same as that of a script
// hash.
addrKeyTypeScriptHash = 1
// Size of a transaction entry. It consists of 8 bytes block id + 4
// bytes offset + 4 bytes length.
txEntrySize = 8 + 4 + 4
)
var (
// addrIndexKey is the key of the address index and the db bucket used
// to house it.
addrIndexKey = []byte("txbyaddridx")
// errUnsupportedAddressType is an error that is used to signal an
// unsupported address type has been used.
errUnsupportedAddressType = errors.New("address type is not supported " +
"by the address index")
)
// -----------------------------------------------------------------------------
// The address index maps addresses referenced in the blockDAG to a list of
// all the transactions involving that address. Transactions are stored
// according to their order of appearance in the blockDAG. That is to say
// first by block height and then by offset inside the block. It is also
// important to note that this implementation requires the transaction index
// since it is needed in order to catch up old blocks due to the fact the spent
// outputs will already be pruned from the utxo set.
//
// The approach used to store the index is similar to a log-structured merge
// tree (LSM tree) and is thus similar to how leveldb works internally.
//
// Every address consists of one or more entries identified by a level starting
// from 0 where each level holds a maximum number of entries such that each
// subsequent level holds double the maximum of the previous one. In equation
// form, the number of entries each level holds is 2^n * firstLevelMaxSize.
//
// New transactions are appended to level 0 until it becomes full at which point
// the entire level 0 entry is appended to the level 1 entry and level 0 is
// cleared. This process continues until level 1 becomes full at which point it
// will be appended to level 2 and cleared and so on.
//
// The result of this is the lower levels contain newer transactions and the
// transactions within each level are ordered from oldest to newest.
//
// The intent of this approach is to provide a balance between space efficiency
// and indexing cost. Storing one entry per transaction would have the lowest
// indexing cost, but would waste a lot of space because the same address hash
// would be duplicated for every transaction key. On the other hand, storing a
// single entry with all transactions would be the most space efficient, but
// would cause indexing cost to grow quadratically with the number of
// transactions involving the same address. The approach used here provides
// logarithmic insertion and retrieval.
//
// The serialized key format is:
//
// <addr type><addr hash><level>
//
// Field Type Size
// addr type uint8 1 byte
// addr hash hash160 20 bytes
// level uint8 1 byte
// -----
// Total: 22 bytes
//
// The serialized value format is:
//
// [<block id><start offset><tx length>,...]
//
// Field Type Size
// block id uint64 8 bytes
// start offset uint32 4 bytes
// tx length uint32 4 bytes
// -----
// Total: 16 bytes per indexed tx
// -----------------------------------------------------------------------------
// fetchBlockHashFunc defines a callback function to use in order to convert a
// serialized block ID to an associated block hash.
type fetchBlockHashFunc func(serializedID []byte) (*daghash.Hash, error)
// serializeAddrIndexEntry serializes the provided block id and transaction
// location according to the format described in detail above.
func serializeAddrIndexEntry(blockID uint64, txLoc wire.TxLoc) []byte {
// Serialize the entry.
serialized := make([]byte, 16)
byteOrder.PutUint64(serialized, blockID)
byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxStart))
byteOrder.PutUint32(serialized[12:], uint32(txLoc.TxLen))
return serialized
}
// deserializeAddrIndexEntry decodes the passed serialized byte slice into the
// provided region struct according to the format described in detail above and
// uses the passed block hash fetching function in order to conver the block ID
// to the associated block hash.
func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion, fetchBlockHash fetchBlockHashFunc) error {
// Ensure there are enough bytes to decode.
if len(serialized) < txEntrySize {
return errDeserialize("unexpected end of data")
}
hash, err := fetchBlockHash(serialized[0:8])
if err != nil {
return err
}
region.Hash = hash
region.Offset = byteOrder.Uint32(serialized[8:12])
region.Len = byteOrder.Uint32(serialized[12:16])
return nil
}
// keyForLevel returns the key for a specific address and level in the address
// index entry.
func keyForLevel(addrKey [addrKeySize]byte, level uint8) [levelKeySize]byte {
var key [levelKeySize]byte
copy(key[:], addrKey[:])
key[levelOffset] = level
return key
}
// dbPutAddrIndexEntry updates the address index to include the provided entry
// according to the level-based scheme described in detail above.
func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint64, txLoc wire.TxLoc) error {
// Start with level 0 and its initial max number of entries.
curLevel := uint8(0)
maxLevelBytes := level0MaxEntries * txEntrySize
// Simply append the new entry to level 0 and return now when it will
// fit. This is the most common path.
newData := serializeAddrIndexEntry(blockID, txLoc)
level0Key := keyForLevel(addrKey, 0)
level0Data := bucket.Get(level0Key[:])
if len(level0Data)+len(newData) <= maxLevelBytes {
mergedData := newData
if len(level0Data) > 0 {
mergedData = make([]byte, len(level0Data)+len(newData))
copy(mergedData, level0Data)
copy(mergedData[len(level0Data):], newData)
}
return bucket.Put(level0Key[:], mergedData)
}
// At this point, level 0 is full, so merge each level into higher
// levels as many times as needed to free up level 0.
prevLevelData := level0Data
for {
// Each new level holds twice as much as the previous one.
curLevel++
maxLevelBytes *= 2
// Move to the next level as long as the current level is full.
curLevelKey := keyForLevel(addrKey, curLevel)
curLevelData := bucket.Get(curLevelKey[:])
if len(curLevelData) == maxLevelBytes {
prevLevelData = curLevelData
continue
}
// The current level has room for the data in the previous one,
// so merge the data from previous level into it.
mergedData := prevLevelData
if len(curLevelData) > 0 {
mergedData = make([]byte, len(curLevelData)+
len(prevLevelData))
copy(mergedData, curLevelData)
copy(mergedData[len(curLevelData):], prevLevelData)
}
err := bucket.Put(curLevelKey[:], mergedData)
if err != nil {
return err
}
// Move all of the levels before the previous one up a level.
for mergeLevel := curLevel - 1; mergeLevel > 0; mergeLevel-- {
mergeLevelKey := keyForLevel(addrKey, mergeLevel)
prevLevelKey := keyForLevel(addrKey, mergeLevel-1)
prevData := bucket.Get(prevLevelKey[:])
err := bucket.Put(mergeLevelKey[:], prevData)
if err != nil {
return err
}
}
break
}
// Finally, insert the new entry into level 0 now that it is empty.
return bucket.Put(level0Key[:], newData)
}
// dbFetchAddrIndexEntries returns block regions for transactions referenced by
// the given address key and the number of entries skipped since it could have
// been less in the case where there are less total entries than the requested
// number of entries to skip.
func dbFetchAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, numToSkip, numRequested uint32, reverse bool, fetchBlockHash fetchBlockHashFunc) ([]database.BlockRegion, uint32, error) {
// When the reverse flag is not set, all levels need to be fetched
// because numToSkip and numRequested are counted from the oldest
// transactions (highest level) and thus the total count is needed.
// However, when the reverse flag is set, only enough records to satisfy
// the requested amount are needed.
var level uint8
var serialized []byte
for !reverse || len(serialized) < int(numToSkip+numRequested)*txEntrySize {
curLevelKey := keyForLevel(addrKey, level)
levelData := bucket.Get(curLevelKey[:])
if levelData == nil {
// Stop when there are no more levels.
break
}
// Higher levels contain older transactions, so prepend them.
prepended := make([]byte, len(serialized)+len(levelData))
copy(prepended, levelData)
copy(prepended[len(levelData):], serialized)
serialized = prepended
level++
}
// When the requested number of entries to skip is larger than the
// number available, skip them all and return now with the actual number
// skipped.
numEntries := uint32(len(serialized) / txEntrySize)
if numToSkip >= numEntries {
return nil, numEntries, nil
}
// Nothing more to do when there are no requested entries.
if numRequested == 0 {
return nil, numToSkip, nil
}
// Limit the number to load based on the number of available entries,
// the number to skip, and the number requested.
numToLoad := numEntries - numToSkip
if numToLoad > numRequested {
numToLoad = numRequested
}
// Start the offset after all skipped entries and load the calculated
// number.
results := make([]database.BlockRegion, numToLoad)
for i := uint32(0); i < numToLoad; i++ {
// Calculate the read offset according to the reverse flag.
var offset uint32
if reverse {
offset = (numEntries - numToSkip - i - 1) * txEntrySize
} else {
offset = (numToSkip + i) * txEntrySize
}
// Deserialize and populate the result.
err := deserializeAddrIndexEntry(serialized[offset:],
&results[i], fetchBlockHash)
if err != nil {
// Ensure any deserialization errors are returned as
// database corruption errors.
if isDeserializeErr(err) {
err = database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("failed to "+
"deserialized address index "+
"for key %x: %s", addrKey, err),
}
}
return nil, 0, err
}
}
return results, numToSkip, nil
}
// minEntriesToReachLevel returns the minimum number of entries that are
// required to reach the given address index level.
func minEntriesToReachLevel(level uint8) int {
maxEntriesForLevel := level0MaxEntries
minRequired := 1
for l := uint8(1); l <= level; l++ {
minRequired += maxEntriesForLevel
maxEntriesForLevel *= 2
}
return minRequired
}
// maxEntriesForLevel returns the maximum number of entries allowed for the
// given address index level.
func maxEntriesForLevel(level uint8) int {
numEntries := level0MaxEntries
for l := level; l > 0; l-- {
numEntries *= 2
}
return numEntries
}
// dbRemoveAddrIndexEntries removes the specified number of entries from from
// the address index for the provided key. An assertion error will be returned
// if the count exceeds the total number of entries in the index.
func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, count int) error {
// Nothing to do if no entries are being deleted.
if count <= 0 {
return nil
}
// Make use of a local map to track pending updates and define a closure
// to apply it to the database. This is done in order to reduce the
// number of database reads and because there is more than one exit
// path that needs to apply the updates.
pendingUpdates := make(map[uint8][]byte)
applyPending := func() error {
for level, data := range pendingUpdates {
curLevelKey := keyForLevel(addrKey, level)
if len(data) == 0 {
err := bucket.Delete(curLevelKey[:])
if err != nil {
return err
}
continue
}
err := bucket.Put(curLevelKey[:], data)
if err != nil {
return err
}
}
return nil
}
// Loop forwards through the levels while removing entries until the
// specified number has been removed. This will potentially result in
// entirely empty lower levels which will be backfilled below.
var highestLoadedLevel uint8
numRemaining := count
for level := uint8(0); numRemaining > 0; level++ {
// Load the data for the level from the database.
curLevelKey := keyForLevel(addrKey, level)
curLevelData := bucket.Get(curLevelKey[:])
if len(curLevelData) == 0 && numRemaining > 0 {
return AssertError(fmt.Sprintf("dbRemoveAddrIndexEntries "+
"not enough entries for address key %x to "+
"delete %d entries", addrKey, count))
}
pendingUpdates[level] = curLevelData
highestLoadedLevel = level
// Delete the entire level as needed.
numEntries := len(curLevelData) / txEntrySize
if numRemaining >= numEntries {
pendingUpdates[level] = nil
numRemaining -= numEntries
continue
}
// Remove remaining entries to delete from the level.
offsetEnd := len(curLevelData) - (numRemaining * txEntrySize)
pendingUpdates[level] = curLevelData[:offsetEnd]
break
}
// When all elements in level 0 were not removed there is nothing left
// to do other than updating the database.
if len(pendingUpdates[0]) != 0 {
return applyPending()
}
// At this point there are one or more empty levels before the current
// level which need to be backfilled and the current level might have
// had some entries deleted from it as well. Since all levels after
// level 0 are required to either be empty, half full, or completely
// full, the current level must be adjusted accordingly by backfilling
// each previous levels in a way which satisfies the requirements. Any
// entries that are left are assigned to level 0 after the loop as they
// are guaranteed to fit by the logic in the loop. In other words, this
// effectively squashes all remaining entries in the current level into
// the lowest possible levels while following the level rules.
//
// Note that the level after the current level might also have entries
// and gaps are not allowed, so this also keeps track of the lowest
// empty level so the code below knows how far to backfill in case it is
// required.
lowestEmptyLevel := uint8(255)
curLevelData := pendingUpdates[highestLoadedLevel]
curLevelMaxEntries := maxEntriesForLevel(highestLoadedLevel)
for level := highestLoadedLevel; level > 0; level-- {
// When there are not enough entries left in the current level
// for the number that would be required to reach it, clear the
// the current level which effectively moves them all up to the
// previous level on the next iteration. Otherwise, there are
// are sufficient entries, so update the current level to
// contain as many entries as possible while still leaving
// enough remaining entries required to reach the level.
numEntries := len(curLevelData) / txEntrySize
prevLevelMaxEntries := curLevelMaxEntries / 2
minPrevRequired := minEntriesToReachLevel(level - 1)
if numEntries < prevLevelMaxEntries+minPrevRequired {
lowestEmptyLevel = level
pendingUpdates[level] = nil
} else {
// This level can only be completely full or half full,
// so choose the appropriate offset to ensure enough
// entries remain to reach the level.
var offset int
if numEntries-curLevelMaxEntries >= minPrevRequired {
offset = curLevelMaxEntries * txEntrySize
} else {
offset = prevLevelMaxEntries * txEntrySize
}
pendingUpdates[level] = curLevelData[:offset]
curLevelData = curLevelData[offset:]
}
curLevelMaxEntries = prevLevelMaxEntries
}
pendingUpdates[0] = curLevelData
if len(curLevelData) == 0 {
lowestEmptyLevel = 0
}
// When the highest loaded level is empty, it's possible the level after
// it still has data and thus that data needs to be backfilled as well.
for len(pendingUpdates[highestLoadedLevel]) == 0 {
// When the next level is empty too, the is no data left to
// continue backfilling, so there is nothing left to do.
// Otherwise, populate the pending updates map with the newly
// loaded data and update the highest loaded level accordingly.
level := highestLoadedLevel + 1
curLevelKey := keyForLevel(addrKey, level)
levelData := bucket.Get(curLevelKey[:])
if len(levelData) == 0 {
break
}
pendingUpdates[level] = levelData
highestLoadedLevel = level
// At this point the highest level is not empty, but it might
// be half full. When that is the case, move it up a level to
// simplify the code below which backfills all lower levels that
// are still empty. This also means the current level will be
// empty, so the loop will perform another another iteration to
// potentially backfill this level with data from the next one.
curLevelMaxEntries := maxEntriesForLevel(level)
if len(levelData)/txEntrySize != curLevelMaxEntries {
pendingUpdates[level] = nil
pendingUpdates[level-1] = levelData
level--
curLevelMaxEntries /= 2
}
// Backfill all lower levels that are still empty by iteratively
// halfing the data until the lowest empty level is filled.
for level > lowestEmptyLevel {
offset := (curLevelMaxEntries / 2) * txEntrySize
pendingUpdates[level] = levelData[:offset]
levelData = levelData[offset:]
pendingUpdates[level-1] = levelData
level--
curLevelMaxEntries /= 2
}
// The lowest possible empty level is now the highest loaded
// level.
lowestEmptyLevel = highestLoadedLevel
}
// Apply the pending updates.
return applyPending()
}
// addrToKey converts known address types to an addrindex key. An error is
// returned for unsupported types.
func addrToKey(addr util.Address) ([addrKeySize]byte, error) {
switch addr := addr.(type) {
case *util.AddressPubKeyHash:
var result [addrKeySize]byte
result[0] = addrKeyTypePubKeyHash
copy(result[1:], addr.Hash160()[:])
return result, nil
case *util.AddressScriptHash:
var result [addrKeySize]byte
result[0] = addrKeyTypeScriptHash
copy(result[1:], addr.Hash160()[:])
return result, nil
}
return [addrKeySize]byte{}, errUnsupportedAddressType
}
// AddrIndex implements a transaction by address index. That is to say, it
// supports querying all transactions that reference a given address because
// they are either crediting or debiting the address. The returned transactions
// are ordered according to their order of appearance in the blockDAG. In
// other words, first by block height and then by offset inside the block.
//
// In addition, support is provided for a memory-only index of unconfirmed
// transactions such as those which are kept in the memory pool before inclusion
// in a block.
type AddrIndex struct {
// The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a
// separate mutex.
db database.DB
dagParams *dagconfig.Params
// The following fields are used to quickly link transactions and
// addresses that have not been included into a block yet when an
// address index is being maintained. The are protected by the
// unconfirmedLock field.
//
// The txnsByAddr field is used to keep an index of all transactions
// which either create an output to a given address or spend from a
// previous output to it keyed by the address.
//
// The addrsByTx field is essentially the reverse and is used to
// keep an index of all addresses which a given transaction involves.
// This allows fairly efficient updates when transactions are removed
// once they are included into a block.
unconfirmedLock sync.RWMutex
txnsByAddr map[[addrKeySize]byte]map[daghash.TxID]*util.Tx
addrsByTx map[daghash.TxID]map[[addrKeySize]byte]struct{}
}
// Ensure the AddrIndex type implements the Indexer interface.
var _ Indexer = (*AddrIndex)(nil)
// Ensure the AddrIndex type implements the NeedsInputser interface.
var _ NeedsInputser = (*AddrIndex)(nil)
// NeedsInputs signals that the index requires the referenced inputs in order
// to properly create the index.
//
// This implements the NeedsInputser interface.
func (idx *AddrIndex) NeedsInputs() bool {
return true
}
// Init is only provided to satisfy the Indexer interface as there is nothing to
// initialize for this index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
idx.db = db
return nil
}
// Key returns the database key to use for the index as a byte slice.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Key() []byte {
return addrIndexKey
}
// Name returns the human-readable name of the index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Name() string {
return addrIndexName
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the bucket for the address
// index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Create(dbTx database.Tx) error {
_, err := dbTx.Metadata().CreateBucket(addrIndexKey)
return err
}
// writeIndexData represents the address index data to be written for one block.
// It consists of the address mapped to an ordered list of the transactions
// that involve the address in block. It is ordered so the transactions can be
// stored in the order they appear in the block.
type writeIndexData map[[addrKeySize]byte][]int
// indexScriptPubKey extracts all standard addresses from the passed public key
// script and maps each of them to the associated transaction using the passed
// map.
func (idx *AddrIndex) indexScriptPubKey(data writeIndexData, scriptPubKey []byte, txIdx int) {
// Nothing to index if the script is non-standard or otherwise doesn't
// contain any addresses.
_, addr, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
idx.dagParams)
if err != nil || addr == nil {
return
}
addrKey, err := addrToKey(addr)
if err != nil {
// Ignore unsupported address types.
return
}
// Avoid inserting the transaction more than once. Since the
// transactions are indexed serially any duplicates will be
// indexed in a row, so checking the most recent entry for the
// address is enough to detect duplicates.
indexedTxns := data[addrKey]
numTxns := len(indexedTxns)
if numTxns > 0 && indexedTxns[numTxns-1] == txIdx {
return
}
indexedTxns = append(indexedTxns, txIdx)
data[addrKey] = indexedTxns
}
// indexBlock extract all of the standard addresses from all of the transactions
// in the passed block and maps each of them to the associated transaction using
// the passed map.
func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *blockdag.BlockDAG) {
for txIdx, tx := range block.Transactions() {
// Coinbases do not reference any inputs. Since the block is
// required to have already gone through full validation, it has
// already been proven on the first transaction in the block is
// a coinbase.
if txIdx > util.CoinbaseTransactionIndex {
for _, txIn := range tx.MsgTx().TxIn {
// The UTXO should always have the input since
// the index contract requires it, however, be
// safe and simply ignore any missing entries.
entry, ok := dag.GetUTXOEntry(txIn.PreviousOutpoint)
if !ok {
continue
}
idx.indexScriptPubKey(data, entry.ScriptPubKey(), txIdx)
}
}
for _, txOut := range tx.MsgTx().TxOut {
idx.indexScriptPubKey(data, txOut.ScriptPubKey, txIdx)
}
}
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the DAG. This indexer adds a mapping for each address
// the transactions in the block involve.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
_ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
// The offset and length of the transactions within the serialized
// block.
txLocs, err := block.TxLoc()
if err != nil {
return err
}
// Build all of the address to transaction mappings in a local map.
addrsToTxns := make(writeIndexData)
idx.indexBlock(addrsToTxns, block, dag)
// Add all of the index entries for each address.
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
for addrKey, txIdxs := range addrsToTxns {
for _, txIdx := range txIdxs {
err := dbPutAddrIndexEntry(addrIdxBucket, addrKey,
blockID, txLocs[txIdx])
if err != nil {
return err
}
}
}
return nil
}
// TxRegionsForAddress returns a slice of block regions which identify each
// transaction that involves the passed address according to the specified
// number to skip, number requested, and whether or not the results should be
// reversed. It also returns the number actually skipped since it could be less
// in the case where there are not enough entries.
//
// NOTE: These results only include transactions confirmed in blocks. See the
// UnconfirmedTxnsForAddress method for obtaining unconfirmed transactions
// that involve a given address.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr util.Address, numToSkip, numRequested uint32, reverse bool) ([]database.BlockRegion, uint32, error) {
addrKey, err := addrToKey(addr)
if err != nil {
return nil, 0, err
}
var regions []database.BlockRegion
var skipped uint32
err = idx.db.View(func(dbTx database.Tx) error {
// Create closure to lookup the block hash given the ID using
// the database transaction.
fetchBlockHash := func(id []byte) (*daghash.Hash, error) {
// Deserialize and populate the result.
return blockdag.DBFetchBlockHashBySerializedID(dbTx, id)
}
var err error
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
regions, skipped, err = dbFetchAddrIndexEntries(addrIdxBucket,
addrKey, numToSkip, numRequested, reverse,
fetchBlockHash)
return err
})
return regions, skipped, err
}
// indexUnconfirmedAddresses modifies the unconfirmed (memory-only) address
// index to include mappings for the addresses encoded by the passed public key
// script to the transaction.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) indexUnconfirmedAddresses(scriptPubKey []byte, tx *util.Tx) {
// The error is ignored here since the only reason it can fail is if the
// script fails to parse and it was already validated before being
// admitted to the mempool.
_, addr, _ := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
idx.dagParams)
// Ignore unsupported address types.
addrKey, err := addrToKey(addr)
if err != nil {
return
}
// Add a mapping from the address to the transaction.
idx.unconfirmedLock.Lock()
addrIndexEntry := idx.txnsByAddr[addrKey]
if addrIndexEntry == nil {
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
idx.txnsByAddr[addrKey] = addrIndexEntry
}
addrIndexEntry[*tx.ID()] = tx
// Add a mapping from the transaction to the address.
addrsByTxEntry := idx.addrsByTx[*tx.ID()]
if addrsByTxEntry == nil {
addrsByTxEntry = make(map[[addrKeySize]byte]struct{})
idx.addrsByTx[*tx.ID()] = addrsByTxEntry
}
addrsByTxEntry[addrKey] = struct{}{}
idx.unconfirmedLock.Unlock()
}
// AddUnconfirmedTx adds all addresses related to the transaction to the
// unconfirmed (memory-only) address index.
//
// NOTE: This transaction MUST have already been validated by the memory pool
// before calling this function with it and have all of the inputs available in
// the provided utxo view. Failure to do so could result in some or all
// addresses not being indexed.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) AddUnconfirmedTx(tx *util.Tx, utxoSet blockdag.UTXOSet) {
// Index addresses of all referenced previous transaction outputs.
//
// The existence checks are elided since this is only called after the
// transaction has already been validated and thus all inputs are
// already known to exist.
for _, txIn := range tx.MsgTx().TxIn {
entry, ok := utxoSet.Get(txIn.PreviousOutpoint)
if !ok {
// Ignore missing entries. This should never happen
// in practice since the function comments specifically
// call out all inputs must be available.
continue
}
idx.indexUnconfirmedAddresses(entry.ScriptPubKey(), tx)
}
// Index addresses of all created outputs.
for _, txOut := range tx.MsgTx().TxOut {
idx.indexUnconfirmedAddresses(txOut.ScriptPubKey, tx)
}
}
// RemoveUnconfirmedTx removes the passed transaction from the unconfirmed
// (memory-only) address index.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) RemoveUnconfirmedTx(txID *daghash.TxID) {
idx.unconfirmedLock.Lock()
defer idx.unconfirmedLock.Unlock()
// Remove all address references to the transaction from the address
// index and remove the entry for the address altogether if it no longer
// references any transactions.
for addrKey := range idx.addrsByTx[*txID] {
delete(idx.txnsByAddr[addrKey], *txID)
if len(idx.txnsByAddr[addrKey]) == 0 {
delete(idx.txnsByAddr, addrKey)
}
}
// Remove the entry from the transaction to address lookup map as well.
delete(idx.addrsByTx, *txID)
}
// UnconfirmedTxnsForAddress returns all transactions currently in the
// unconfirmed (memory-only) address index that involve the passed address.
// Unsupported address types are ignored and will result in no results.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
// Ignore unsupported address types.
addrKey, err := addrToKey(addr)
if err != nil {
return nil
}
// Protect concurrent access.
idx.unconfirmedLock.RLock()
defer idx.unconfirmedLock.RUnlock()
// Return a new slice with the results if there are any. This ensures
// safe concurrency.
if txns, exists := idx.txnsByAddr[addrKey]; exists {
addressTxns := make([]*util.Tx, 0, len(txns))
for _, tx := range txns {
addressTxns = append(addressTxns, tx)
}
return addressTxns
}
return nil
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("addrindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the addrindex with --dropaddrindex", lastKnownBlockID-currentBlockID)
}
// NewAddrIndex returns a new instance of an indexer that is used to create a
// mapping of all addresses in the blockDAG to the respective transactions
// that involve them.
//
// It implements the Indexer interface which plugs into the IndexManager that in
// turn is used by the blockDAG package. This allows the index to be
// seamlessly maintained along with the DAG.
func NewAddrIndex(dagParams *dagconfig.Params) *AddrIndex {
return &AddrIndex{
dagParams: dagParams,
txnsByAddr: make(map[[addrKeySize]byte]map[daghash.TxID]*util.Tx),
addrsByTx: make(map[daghash.TxID]map[[addrKeySize]byte]struct{}),
}
}
// DropAddrIndex drops the address index from the provided database if it
// exists.
func DropAddrIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, addrIndexKey, addrIndexName, interrupt)
}

View File

@@ -1,277 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"testing"
"github.com/kaspanet/kaspad/wire"
)
// addrIndexBucket provides a mock address index database bucket by implementing
// the internalBucket interface.
type addrIndexBucket struct {
levels map[[levelKeySize]byte][]byte
}
// Clone returns a deep copy of the mock address index bucket.
func (b *addrIndexBucket) Clone() *addrIndexBucket {
levels := make(map[[levelKeySize]byte][]byte)
for k, v := range b.levels {
vCopy := make([]byte, len(v))
copy(vCopy, v)
levels[k] = vCopy
}
return &addrIndexBucket{levels: levels}
}
// Get returns the value associated with the key from the mock address index
// bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Get(key []byte) []byte {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
return b.levels[levelKey]
}
// Put stores the provided key/value pair to the mock address index bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Put(key []byte, value []byte) error {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
b.levels[levelKey] = value
return nil
}
// Delete removes the provided key from the mock address index bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Delete(key []byte) error {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
delete(b.levels, levelKey)
return nil
}
// printLevels returns a string with a visual representation of the provided
// address key taking into account the max size of each level. It is useful
// when creating and debugging test cases.
func (b *addrIndexBucket) printLevels(addrKey [addrKeySize]byte) string {
highestLevel := uint8(0)
for k := range b.levels {
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
continue
}
level := uint8(k[levelOffset])
if level > highestLevel {
highestLevel = level
}
}
var levelBuf bytes.Buffer
_, _ = levelBuf.WriteString("\n")
maxEntries := level0MaxEntries
for level := uint8(0); level <= highestLevel; level++ {
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
for i := 0; i < numEntries; i++ {
start := i * txEntrySize
num := byteOrder.Uint32(data[start:])
_, _ = levelBuf.WriteString(fmt.Sprintf("%02d ", num))
}
for i := numEntries; i < maxEntries; i++ {
_, _ = levelBuf.WriteString("_ ")
}
_, _ = levelBuf.WriteString("\n")
maxEntries *= 2
}
return levelBuf.String()
}
// sanityCheck ensures that all data stored in the bucket for the given address
// adheres to the level-based rules described by the address index
// documentation.
func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal int) error {
// Find the highest level for the key.
highestLevel := uint8(0)
for k := range b.levels {
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
continue
}
level := uint8(k[levelOffset])
if level > highestLevel {
highestLevel = level
}
}
// Ensure the expected total number of entries are present and that
// all levels adhere to the rules described in the address index
// documentation.
var totalEntries int
maxEntries := level0MaxEntries
for level := uint8(0); level <= highestLevel; level++ {
// Level 0 can'have more entries than the max allowed if the
// levels after it have data and it can't be empty. All other
// levels must either be half full or full.
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
totalEntries += numEntries
if level == 0 {
if (highestLevel != 0 && numEntries == 0) ||
numEntries > maxEntries {
return errors.Errorf("level %d has %d entries",
level, numEntries)
}
} else if numEntries != maxEntries && numEntries != maxEntries/2 {
return errors.Errorf("level %d has %d entries", level,
numEntries)
}
maxEntries *= 2
}
if totalEntries != expectedTotal {
return errors.Errorf("expected %d entries - got %d", expectedTotal,
totalEntries)
}
// Ensure all of the numbers are in order starting from the highest
// level moving to the lowest level.
expectedNum := uint32(0)
for level := highestLevel + 1; level > 0; level-- {
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
for i := 0; i < numEntries; i++ {
start := i * txEntrySize
num := byteOrder.Uint32(data[start:])
if num != expectedNum {
return errors.Errorf("level %d offset %d does "+
"not contain the expected number of "+
"%d - got %d", level, i, num,
expectedNum)
}
expectedNum++
}
}
return nil
}
// TestAddrIndexLevels ensures that adding and deleting entries to the address
// index creates multiple levels as described by the address index
// documentation.
func TestAddrIndexLevels(t *testing.T) {
t.Parallel()
tests := []struct {
name string
key [addrKeySize]byte
numInsert int
printLevels bool // Set to help debug a specific test.
}{
{
name: "level 0 not full",
numInsert: level0MaxEntries - 1,
},
{
name: "level 1 half",
numInsert: level0MaxEntries + 1,
},
{
name: "level 1 full",
numInsert: level0MaxEntries*2 + 1,
},
{
name: "level 2 half, level 1 half",
numInsert: level0MaxEntries*3 + 1,
},
{
name: "level 2 half, level 1 full",
numInsert: level0MaxEntries*4 + 1,
},
{
name: "level 2 full, level 1 half",
numInsert: level0MaxEntries*5 + 1,
},
{
name: "level 2 full, level 1 full",
numInsert: level0MaxEntries*6 + 1,
},
{
name: "level 3 half, level 2 half, level 1 half",
numInsert: level0MaxEntries*7 + 1,
},
{
name: "level 3 full, level 2 half, level 1 full",
numInsert: level0MaxEntries*12 + 1,
},
}
nextTest:
for testNum, test := range tests {
// Insert entries in order.
populatedBucket := &addrIndexBucket{
levels: make(map[[levelKeySize]byte][]byte),
}
for i := 0; i < test.numInsert; i++ {
txLoc := wire.TxLoc{TxStart: i * 2}
err := dbPutAddrIndexEntry(populatedBucket, test.key,
uint64(i), txLoc)
if err != nil {
t.Errorf("dbPutAddrIndexEntry #%d (%s) - "+
"unexpected error: %v", testNum,
test.name, err)
continue nextTest
}
}
if test.printLevels {
t.Log(populatedBucket.printLevels(test.key))
}
// Delete entries from the populated bucket until all entries
// have been deleted. The bucket is reset to the fully
// populated bucket on each iteration so every combination is
// tested. Notice the upper limit purposes exceeds the number
// of entries to ensure attempting to delete more entries than
// there are works correctly.
for numDelete := 0; numDelete <= test.numInsert+1; numDelete++ {
// Clone populated bucket to run each delete against.
bucket := populatedBucket.Clone()
// Remove the number of entries for this iteration.
err := dbRemoveAddrIndexEntries(bucket, test.key,
numDelete)
if err != nil {
if numDelete <= test.numInsert {
t.Errorf("dbRemoveAddrIndexEntries (%s) "+
" delete %d - unexpected error: "+
"%v", test.name, numDelete, err)
continue nextTest
}
}
if test.printLevels {
t.Log(bucket.printLevels(test.key))
}
// Sanity check the levels to ensure the adhere to all
// rules.
numExpected := test.numInsert
if numDelete <= test.numInsert {
numExpected -= numDelete
}
err = bucket.sanityCheck(test.key, numExpected)
if err != nil {
t.Errorf("sanity check fail (%s) delete %d: %v",
test.name, numDelete, err)
continue nextTest
}
}
}
}

View File

@@ -1,112 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package indexers implements optional block DAG indexes.
*/
package indexers
import (
"encoding/binary"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
)
var (
// byteOrder is the preferred byte order used for serializing numeric
// fields for storage in the database.
byteOrder = binary.LittleEndian
// errInterruptRequested indicates that an operation was cancelled due
// to a user-requested interrupt.
errInterruptRequested = errors.New("interrupt requested")
)
// NeedsInputser provides a generic interface for an indexer to specify the it
// requires the ability to look up inputs for a transaction.
type NeedsInputser interface {
NeedsInputs() bool
}
// Indexer provides a generic interface for an indexer that is managed by an
// index manager such as the Manager type provided by this package.
type Indexer interface {
// Key returns the key of the index as a byte slice.
Key() []byte
// Name returns the human-readable name of the index.
Name() string
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time.
Create(dbTx database.Tx) error
// Init is invoked when the index manager is first initializing the
// index. This differs from the Create method in that it is called on
// every load, including the case the index was just created.
Init(db database.DB, dag *blockdag.BlockDAG) error
// ConnectBlock is invoked when the index manager is notified that a new
// block has been connected to the DAG.
ConnectBlock(dbTx database.Tx,
block *util.Block,
blockID uint64,
dag *blockdag.BlockDAG,
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData,
virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error
}
// AssertError identifies an error that indicates an internal code consistency
// issue and should be treated as a critical and unrecoverable error.
type AssertError string
// Error returns the assertion error as a huma-readable string and satisfies
// the error interface.
func (e AssertError) Error() string {
return "assertion failed: " + string(e)
}
// errDeserialize signifies that a problem was encountered when deserializing
// data.
type errDeserialize string
// Error implements the error interface.
func (e errDeserialize) Error() string {
return string(e)
}
// isDeserializeErr returns whether or not the passed error is an errDeserialize
// error.
func isDeserializeErr(err error) bool {
_, ok := err.(errDeserialize)
return ok
}
// internalBucket is an abstraction over a database bucket. It is used to make
// the code easier to test since it allows mock objects in the tests to only
// implement these functions instead of everything a database.Bucket supports.
type internalBucket interface {
Get(key []byte) []byte
Put(key []byte, value []byte) error
Delete(key []byte) error
}
// interruptRequested returns true when the provided channel has been closed.
// This simplifies early shutdown slightly since the caller can just use an if
// statement instead of a select.
func interruptRequested(interrupted <-chan struct{}) bool {
select {
case <-interrupted:
return true
default:
}
return false
}

View File

@@ -0,0 +1,28 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package indexers implements optional block DAG indexes.
*/
package indexers
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
)
// Indexer provides a generic interface for an indexer that is managed by an
// index manager such as the Manager type provided by this package.
type Indexer interface {
// Init is invoked when the index manager is first initializing the
// index.
Init(dag *blockdag.BlockDAG) error
// ConnectBlock is invoked when the index manager is notified that a new
// block has been connected to the DAG.
ConnectBlock(dbContext *dbaccess.TxContext,
blockHash *daghash.Hash,
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData) error
}

View File

@@ -6,8 +6,6 @@ package indexers
import (
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.INDX)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -6,190 +6,30 @@ package indexers
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
)
var (
// indexTipsBucketName is the name of the db bucket used to house the
// current tip of each index.
indexTipsBucketName = []byte("idxtips")
indexCurrentBlockIDBucketName = []byte("idxcurrentblockid")
)
// Manager defines an index manager that manages multiple optional indexes and
// implements the blockdag.IndexManager interface so it can be seamlessly
// plugged into normal DAG processing.
type Manager struct {
db database.DB
enabledIndexes []Indexer
}
// Ensure the Manager type implements the blockdag.IndexManager interface.
var _ blockdag.IndexManager = (*Manager)(nil)
// indexDropKey returns the key for an index which indicates it is in the
// process of being dropped.
func indexDropKey(idxKey []byte) []byte {
dropKey := make([]byte, len(idxKey)+1)
dropKey[0] = 'd'
copy(dropKey[1:], idxKey)
return dropKey
}
// maybeFinishDrops determines if each of the enabled indexes are in the middle
// of being dropped and finishes dropping them when the are. This is necessary
// because dropping and index has to be done in several atomic steps rather than
// one big atomic step due to the massive number of entries.
func (m *Manager) maybeFinishDrops(interrupt <-chan struct{}) error {
indexNeedsDrop := make([]bool, len(m.enabledIndexes))
err := m.db.View(func(dbTx database.Tx) error {
// None of the indexes needs to be dropped if the index tips
// bucket hasn't been created yet.
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
if indexesBucket == nil {
return nil
}
// Mark the indexer as requiring a drop if one is already in
// progress.
for i, indexer := range m.enabledIndexes {
dropKey := indexDropKey(indexer.Key())
if indexesBucket.Get(dropKey) != nil {
indexNeedsDrop[i] = true
}
}
return nil
})
if err != nil {
return err
}
if interruptRequested(interrupt) {
return errInterruptRequested
}
// Finish dropping any of the enabled indexes that are already in the
// middle of being dropped.
for i, indexer := range m.enabledIndexes {
if !indexNeedsDrop[i] {
continue
}
log.Infof("Resuming %s drop", indexer.Name())
err := dropIndex(m.db, indexer.Key(), indexer.Name(), interrupt)
if err != nil {
return err
}
}
return nil
}
// maybeCreateIndexes determines if each of the enabled indexes have already
// been created and creates them if not.
func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error {
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
for _, indexer := range m.enabledIndexes {
// Nothing to do if the index tip already exists.
idxKey := indexer.Key()
if indexesBucket.Get(idxKey) != nil {
continue
}
// The tip for the index does not exist, so create it and
// invoke the create callback for the index so it can perform
// any one-time initialization it requires.
if err := indexer.Create(dbTx); err != nil {
return err
}
// TODO (Mike): this is temporary solution to prevent node from not starting
// because it thinks indexers are not initialized.
// Indexers, however, do not work properly, and a general solution to their work operation is required
indexesBucket.Put(idxKey, []byte{0})
}
return nil
}
// Init initializes the enabled indexes. This is called during DAG
// initialization and primarily consists of catching up all indexes to the
// current tips. This is necessary since each index can be disabled
// and re-enabled at any time and attempting to catch-up indexes at the same
// time new blocks are being downloaded would lead to an overall longer time to
// catch up due to the I/O contention.
//
// Init initializes the enabled indexes.
// This is part of the blockdag.IndexManager interface.
func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
// Nothing to do when no indexes are enabled.
if len(m.enabledIndexes) == 0 {
return nil
}
if interruptRequested(interrupt) {
return errInterruptRequested
}
m.db = db
// Finish and drops that were previously interrupted.
if err := m.maybeFinishDrops(interrupt); err != nil {
return err
}
// Create the initial state for the indexes as needed.
err := m.db.Update(func(dbTx database.Tx) error {
// Create the bucket for the current tips as needed.
meta := dbTx.Metadata()
_, err := meta.CreateBucketIfNotExists(indexTipsBucketName)
if err != nil {
return err
}
if _, err := meta.CreateBucketIfNotExists(indexCurrentBlockIDBucketName); err != nil {
return err
}
return m.maybeCreateIndexes(dbTx)
})
if err != nil {
return err
}
// Initialize each of the enabled indexes.
func (m *Manager) Init(dag *blockdag.BlockDAG) error {
for _, indexer := range m.enabledIndexes {
if err := indexer.Init(db, blockDAG); err != nil {
if err := indexer.Init(dag); err != nil {
return err
}
}
return m.recoverIfNeeded()
}
// recoverIfNeeded checks if the node worked for some time
// without one of the current enabled indexes, and if it's
// the case, recovers the missing blocks from the index.
func (m *Manager) recoverIfNeeded() error {
return m.db.Update(func(dbTx database.Tx) error {
lastKnownBlockID := blockdag.DBFetchCurrentBlockID(dbTx)
for _, indexer := range m.enabledIndexes {
serializedCurrentIdxBlockID := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Get(indexer.Key())
currentIdxBlockID := uint64(0)
if serializedCurrentIdxBlockID != nil {
currentIdxBlockID = blockdag.DeserializeBlockID(serializedCurrentIdxBlockID)
}
if lastKnownBlockID > currentIdxBlockID {
err := indexer.Recover(dbTx, currentIdxBlockID, lastKnownBlockID)
if err != nil {
return err
}
}
}
return nil
})
return nil
}
// ConnectBlock must be invoked when a block is added to the DAG. It
@@ -197,32 +37,13 @@ func (m *Manager) recoverIfNeeded() error {
// checks, and invokes each indexer.
//
// This is part of the blockdag.IndexManager interface.
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
func (m *Manager) ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// Call each of the currently active optional indexes with the block
// being connected so they can update accordingly.
for _, index := range m.enabledIndexes {
// Notify the indexer with the connected block so it can index it.
if err := index.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData); err != nil {
return err
}
}
// Add the new block ID index entry for the block being connected and
// update the current internal block ID accordingly.
err := m.updateIndexersWithCurrentBlockID(dbTx, block.Hash(), blockID)
if err != nil {
return err
}
return nil
}
func (m *Manager) updateIndexersWithCurrentBlockID(dbTx database.Tx, blockHash *daghash.Hash, blockID uint64) error {
serializedBlockID := blockdag.SerializeBlockID(blockID)
for _, index := range m.enabledIndexes {
err := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Put(index.Key(), serializedBlockID)
if err != nil {
if err := index.ConnectBlock(dbContext, blockHash, txsAcceptanceData); err != nil {
return err
}
}
@@ -238,152 +59,3 @@ func NewManager(enabledIndexes []Indexer) *Manager {
enabledIndexes: enabledIndexes,
}
}
// dropIndex drops the passed index from the database. Since indexes can be
// massive, it deletes the index in multiple database transactions in order to
// keep memory usage to reasonable levels. It also marks the drop in progress
// so the drop can be resumed if it is stopped before it is done before the
// index can be used again.
func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan struct{}) error {
// Nothing to do if the index doesn't already exist.
var needsDelete bool
err := db.View(func(dbTx database.Tx) error {
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
if indexesBucket != nil && indexesBucket.Get(idxKey) != nil {
needsDelete = true
}
return nil
})
if err != nil {
return err
}
if !needsDelete {
log.Infof("Not dropping %s because it does not exist", idxName)
return nil
}
// Mark that the index is in the process of being dropped so that it
// can be resumed on the next start if interrupted before the process is
// complete.
log.Infof("Dropping all %s entries. This might take a while...",
idxName)
err = db.Update(func(dbTx database.Tx) error {
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
return indexesBucket.Put(indexDropKey(idxKey), idxKey)
})
if err != nil {
return err
}
// Since the indexes can be so large, attempting to simply delete
// the bucket in a single database transaction would result in massive
// memory usage and likely crash many systems due to ulimits. In order
// to avoid this, use a cursor to delete a maximum number of entries out
// of the bucket at a time. Recurse buckets depth-first to delete any
// sub-buckets.
const maxDeletions = 2000000
var totalDeleted uint64
// Recurse through all buckets in the index, cataloging each for
// later deletion.
var subBuckets [][][]byte
var subBucketClosure func(database.Tx, []byte, [][]byte) error
subBucketClosure = func(dbTx database.Tx,
subBucket []byte, tlBucket [][]byte) error {
// Get full bucket name and append to subBuckets for later
// deletion.
var bucketName [][]byte
if (tlBucket == nil) || (len(tlBucket) == 0) {
bucketName = append(bucketName, subBucket)
} else {
bucketName = append(tlBucket, subBucket)
}
subBuckets = append(subBuckets, bucketName)
// Recurse sub-buckets to append to subBuckets slice.
bucket := dbTx.Metadata()
for _, subBucketName := range bucketName {
bucket = bucket.Bucket(subBucketName)
}
return bucket.ForEachBucket(func(k []byte) error {
return subBucketClosure(dbTx, k, bucketName)
})
}
// Call subBucketClosure with top-level bucket.
err = db.View(func(dbTx database.Tx) error {
return subBucketClosure(dbTx, idxKey, nil)
})
if err != nil {
return nil
}
// Iterate through each sub-bucket in reverse, deepest-first, deleting
// all keys inside them and then dropping the buckets themselves.
for i := range subBuckets {
bucketName := subBuckets[len(subBuckets)-1-i]
// Delete maxDeletions key/value pairs at a time.
for numDeleted := maxDeletions; numDeleted == maxDeletions; {
numDeleted = 0
err := db.Update(func(dbTx database.Tx) error {
subBucket := dbTx.Metadata()
for _, subBucketName := range bucketName {
subBucket = subBucket.Bucket(subBucketName)
}
cursor := subBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() &&
numDeleted < maxDeletions {
if err := cursor.Delete(); err != nil {
return err
}
numDeleted++
}
return nil
})
if err != nil {
return err
}
if numDeleted > 0 {
totalDeleted += uint64(numDeleted)
log.Infof("Deleted %d keys (%d total) from %s",
numDeleted, totalDeleted, idxName)
}
}
if interruptRequested(interrupt) {
return errInterruptRequested
}
// Drop the bucket itself.
err = db.Update(func(dbTx database.Tx) error {
bucket := dbTx.Metadata()
for j := 0; j < len(bucketName)-1; j++ {
bucket = bucket.Bucket(bucketName[j])
}
return bucket.DeleteBucket(bucketName[len(bucketName)-1])
})
}
// Remove the index tip, index bucket, and in-progress drop flag now
// that all index entries have been removed.
err = db.Update(func(dbTx database.Tx) error {
meta := dbTx.Metadata()
indexesBucket := meta.Bucket(indexTipsBucketName)
if err := indexesBucket.Delete(idxKey); err != nil {
return err
}
if err := meta.Bucket(indexCurrentBlockIDBucketName).Delete(idxKey); err != nil {
return err
}
return indexesBucket.Delete(indexDropKey(idxKey))
})
if err != nil {
return err
}
log.Infof("Dropped %s", idxName)
return nil
}

View File

@@ -1,438 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"fmt"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
const (
// txIndexName is the human-readable name for the index.
txIndexName = "transaction index"
includingBlocksIndexKeyEntrySize = 8 // 4 bytes for offset + 4 bytes for transaction length
)
var (
includingBlocksIndexKey = []byte("includingblocksidx")
acceptingBlocksIndexKey = []byte("acceptingblocksidx")
)
// txsAcceptedByVirtual is the in-memory index of txIDs that were accepted
// by the current virtual
var txsAcceptedByVirtual map[daghash.TxID]bool
// -----------------------------------------------------------------------------
// The transaction index consists of an entry for every transaction in the DAG.
//
// There are two buckets used in total. The first bucket maps the hash of
// each transaction to its location in each block it's included in. The second bucket
// contains all of the blocks that from their viewpoint the transaction has been
// accepted (i.e. the transaction is found in their blue set without double spends),
// and their blue block (or themselves) that included the transaction.
//
// NOTE: Although it is technically possible for multiple transactions to have
// the same hash as long as the previous transaction with the same hash is fully
// spent, this code only stores the most recent one because doing otherwise
// would add a non-trivial amount of space and overhead for something that will
// realistically never happen per the probability and even if it did, the old
// one must be fully spent and so the most likely transaction a caller would
// want for a given hash is the most recent one anyways.
//
// The including blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
//
// <block id> = <start offset><tx length>
//
// Field Type Size
// block id uint64 8 bytes
// start offset uint32 4 bytes
// tx length uint32 4 bytes
// -----
// Total: 16 bytes
//
// The accepting blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
//
// <accepting block id> = <including block id>
//
// Field Type Size
// accepting block id uint64 8 bytes
// including block id uint64 8 bytes
// -----
// Total: 16 bytes
//
// -----------------------------------------------------------------------------
func putIncludingBlocksEntry(target []byte, txLoc wire.TxLoc) {
byteOrder.PutUint32(target, uint32(txLoc.TxStart))
byteOrder.PutUint32(target[4:], uint32(txLoc.TxLen))
}
func dbPutIncludingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
bucket, err := dbTx.Metadata().Bucket(includingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
if err != nil {
return err
}
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
}
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
bucket, err := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
if err != nil {
return err
}
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
}
// dbFetchFirstTxRegion uses an existing database transaction to fetch the block
// region for the provided transaction hash from the transaction index. When
// there is no entry for the provided hash, nil will be returned for the both
// the region and the error.
//
// P.S Because the transaction can be found in multiple blocks, this function arbitarily
// returns the first block region that is stored in the txindex.
func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.BlockRegion, error) {
// Load the record from the database and return now if it doesn't exist.
txBucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txID[:])
if txBucket == nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No block region "+
"was found for %s", txID),
}
}
cursor := txBucket.Cursor()
if ok := cursor.First(); !ok {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No block region "+
"was found for %s", txID),
}
}
serializedBlockID := cursor.Key()
serializedData := cursor.Value()
if len(serializedData) == 0 {
return nil, nil
}
// Ensure the serialized data has enough bytes to properly deserialize.
if len(serializedData) < includingBlocksIndexKeyEntrySize {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt transaction index "+
"entry for %s", txID),
}
}
// Load the block hash associated with the block ID.
hash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
if err != nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt transaction index "+
"entry for %s: %s", txID, err),
}
}
// Deserialize the final entry.
region := database.BlockRegion{Hash: &daghash.Hash{}}
copy(region.Hash[:], hash[:])
region.Offset = byteOrder.Uint32(serializedData[:4])
region.Len = byteOrder.Uint32(serializedData[4:])
return &region, nil
}
// dbAddTxIndexEntries uses an existing database transaction to add a
// transaction index entry for every transaction in the passed block.
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint64, multiBlockTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// The offset and length of the transactions within the serialized
// block.
txLocs, err := block.TxLoc()
if err != nil {
return err
}
// As an optimization, allocate a single slice big enough to hold all
// of the serialized transaction index entries for the block and
// serialize them directly into the slice. Then, pass the appropriate
// subslice to the database to be written. This approach significantly
// cuts down on the number of required allocations.
includingBlocksOffset := 0
serializedIncludingBlocksValues := make([]byte, len(block.Transactions())*includingBlocksIndexKeyEntrySize)
for i, tx := range block.Transactions() {
putIncludingBlocksEntry(serializedIncludingBlocksValues[includingBlocksOffset:], txLocs[i])
endOffset := includingBlocksOffset + includingBlocksIndexKeyEntrySize
err := dbPutIncludingBlocksEntry(dbTx, tx.ID(), blockID,
serializedIncludingBlocksValues[includingBlocksOffset:endOffset:endOffset])
if err != nil {
return err
}
includingBlocksOffset += includingBlocksIndexKeyEntrySize
}
for _, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
var includingBlockID uint64
if blockTxsAcceptanceData.BlockHash.IsEqual(block.Hash()) {
includingBlockID = blockID
} else {
includingBlockID, err = blockdag.DBFetchBlockIDByHash(dbTx, &blockTxsAcceptanceData.BlockHash)
if err != nil {
return err
}
}
serializedIncludingBlockID := blockdag.SerializeBlockID(includingBlockID)
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
if !txAcceptanceData.IsAccepted {
continue
}
err = dbPutAcceptingBlocksEntry(dbTx, txAcceptanceData.Tx.ID(), blockID, serializedIncludingBlockID)
if err != nil {
return err
}
}
}
return nil
}
func updateTxsAcceptedByVirtual(virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// Initialize a new txsAcceptedByVirtual
entries := 0
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
entries += len(blockTxsAcceptanceData.TxAcceptanceData)
}
txsAcceptedByVirtual = make(map[daghash.TxID]bool, entries)
// Copy virtualTxsAcceptanceData to txsAcceptedByVirtual
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
txsAcceptedByVirtual[*txAcceptanceData.Tx.ID()] = true
}
}
return nil
}
// TxIndex implements a transaction by hash index. That is to say, it supports
// querying all transactions by their hash.
type TxIndex struct {
db database.DB
}
// Ensure the TxIndex type implements the Indexer interface.
var _ Indexer = (*TxIndex)(nil)
// Init initializes the hash-based transaction index. In particular, it finds
// the highest used block ID and stores it for later use when connecting or
// disconnecting blocks.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
idx.db = db
// Initialize the txsAcceptedByVirtual index
virtualTxsAcceptanceData, err := dag.TxsAcceptedByVirtual()
if err != nil {
return err
}
err = updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
if err != nil {
return err
}
return nil
}
// Key returns the database key to use for the index as a byte slice.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Key() []byte {
return includingBlocksIndexKey
}
// Name returns the human-readable name of the index.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Name() string {
return txIndexName
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the buckets for the hash-based
// transaction index and the internal block ID indexes.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Create(dbTx database.Tx) error {
meta := dbTx.Metadata()
if _, err := meta.CreateBucket(includingBlocksIndexKey); err != nil {
return err
}
_, err := meta.CreateBucket(acceptingBlocksIndexKey)
return err
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the DAG. This indexer adds a hash-to-transaction mapping
// for every transaction in the passed block.
//
// This is part of the Indexer interface.
func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
if err := dbAddTxIndexEntries(dbTx, block, blockID, acceptedTxsData); err != nil {
return err
}
err := updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
if err != nil {
return err
}
return nil
}
// TxFirstBlockRegion returns the first block region for the provided transaction hash
// from the transaction index. The block region can in turn be used to load the
// raw transaction bytes. When there is no entry for the provided hash, nil
// will be returned for the both the entry and the error.
//
// This function is safe for concurrent access.
func (idx *TxIndex) TxFirstBlockRegion(txID *daghash.TxID) (*database.BlockRegion, error) {
var region *database.BlockRegion
err := idx.db.View(func(dbTx database.Tx) error {
var err error
region, err = dbFetchFirstTxRegion(dbTx, txID)
return err
})
return region, err
}
// TxBlocks returns the hashes of the blocks where the transaction exists
func (idx *TxIndex) TxBlocks(txHash *daghash.Hash) ([]*daghash.Hash, error) {
blockHashes := make([]*daghash.Hash, 0)
err := idx.db.View(func(dbTx database.Tx) error {
var err error
blockHashes, err = dbFetchTxBlocks(dbTx, txHash)
if err != nil {
return err
}
return nil
})
return blockHashes, err
}
func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]*daghash.Hash, error) {
blockHashes := make([]*daghash.Hash, 0)
bucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txHash[:])
if bucket == nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No including blocks "+
"were found for %s", txHash),
}
}
err := bucket.ForEach(func(serializedBlockID, _ []byte) error {
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
if err != nil {
return err
}
blockHashes = append(blockHashes, blockHash)
return nil
})
if err != nil {
return nil, err
}
return blockHashes, nil
}
// BlockThatAcceptedTx returns the hash of the block where the transaction got accepted (from the virtual block point of view)
func (idx *TxIndex) BlockThatAcceptedTx(dag *blockdag.BlockDAG, txID *daghash.TxID) (*daghash.Hash, error) {
var acceptingBlock *daghash.Hash
err := idx.db.View(func(dbTx database.Tx) error {
var err error
acceptingBlock, err = dbFetchTxAcceptingBlock(dbTx, txID, dag)
return err
})
return acceptingBlock, err
}
func dbFetchTxAcceptingBlock(dbTx database.Tx, txID *daghash.TxID, dag *blockdag.BlockDAG) (*daghash.Hash, error) {
// If the transaction was accepted by the current virtual,
// return the zeroHash immediately
if _, ok := txsAcceptedByVirtual[*txID]; ok {
return &daghash.ZeroHash, nil
}
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txID[:])
if bucket == nil {
return nil, nil
}
cursor := bucket.Cursor()
if !cursor.First() {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("Accepting blocks bucket is "+
"empty for %s", txID),
}
}
for ; cursor.Key() != nil; cursor.Next() {
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, cursor.Key())
if err != nil {
return nil, err
}
isBlockInSelectedParentChain, err := dag.IsInSelectedParentChain(blockHash)
if err != nil {
return nil, err
}
if isBlockInSelectedParentChain {
return blockHash, nil
}
}
return nil, nil
}
// NewTxIndex returns a new instance of an indexer that is used to create a
// mapping of the hashes of all transactions in the blockDAG to the respective
// block, location within the block, and size of the transaction.
//
// It implements the Indexer interface which plugs into the IndexManager that in
// turn is used by the blockdag package. This allows the index to be
// seamlessly maintained along with the DAG.
func NewTxIndex() *TxIndex {
return &TxIndex{}
}
// DropTxIndex drops the transaction index from the provided database if it
// exists. Since the address index relies on it, the address index will also be
// dropped when it exists.
func DropTxIndex(db database.DB, interrupt <-chan struct{}) error {
err := dropIndex(db, addrIndexKey, addrIndexName, interrupt)
if err != nil {
return err
}
err = dropIndex(db, includingBlocksIndexKey, addrIndexName, interrupt)
if err != nil {
return err
}
return dropIndex(db, acceptingBlocksIndexKey, txIndexName, interrupt)
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("txindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the txindex with --droptxindex", lastKnownBlockID-currentBlockID)
}

View File

@@ -1,144 +0,0 @@
package indexers
import (
"bytes"
"reflect"
"testing"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
func createTransaction(t *testing.T, value uint64, originTx *wire.MsgTx, outputIndex uint32) *wire.MsgTx {
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
if err != nil {
t.Fatalf("Error creating signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutpoint: wire.Outpoint{
TxID: *originTx.TxID(),
Index: outputIndex,
},
Sequence: wire.MaxTxInSequenceNum,
SignatureScript: signatureScript,
}
txOut := wire.NewTxOut(value, blockdag.OpTrueScript)
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
return tx
}
func TestTxIndexConnectBlock(t *testing.T) {
blocks := make(map[daghash.Hash]*util.Block)
txIndex := NewTxIndex()
indexManager := NewManager([]Indexer{txIndex})
params := dagconfig.SimnetParams
params.BlockCoinbaseMaturity = 0
params.K = 1
config := blockdag.Config{
IndexManager: indexManager,
DAGParams: &params,
}
dag, teardown, err := blockdag.DAGSetup("TestTxIndexConnectBlock", config)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Failed to setup DAG instance: %v", err)
}
if teardown != nil {
defer teardown()
}
prepareAndProcessBlock := func(parentHashes []*daghash.Hash, transactions []*wire.MsgTx, blockName string) *wire.MsgBlock {
block, err := mining.PrepareBlockForTest(dag, &params, parentHashes, transactions, false)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: block %v got unexpected error from PrepareBlockForTest: %v", blockName, err)
}
utilBlock := util.NewBlock(block)
blocks[*block.BlockHash()] = utilBlock
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, blockdag.BFNoPoWCheck)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: dag.ProcessBlock got unexpected error for block %v: %v", blockName, err)
}
if isDelayed {
t.Fatalf("TestTxIndexConnectBlock: block %s "+
"is too far in the future", blockName)
}
if isOrphan {
t.Fatalf("TestTxIndexConnectBlock: block %v was unexpectedly orphan", blockName)
}
return block
}
block1 := prepareAndProcessBlock([]*daghash.Hash{params.GenesisHash}, nil, "1")
block2Tx := createTransaction(t, block1.Transactions[0].TxOut[0].Value, block1.Transactions[0], 0)
block2 := prepareAndProcessBlock([]*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{block2Tx}, "2")
block3Tx := createTransaction(t, block2.Transactions[0].TxOut[0].Value, block2.Transactions[0], 0)
block3 := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3")
block2TxID := block2Tx.TxID()
block2TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
block3Hash := block3.BlockHash()
if !block2TxNewAcceptedBlock.IsEqual(block3Hash) {
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3Hash, block2TxNewAcceptedBlock)
}
block3TxID := block3Tx.TxID()
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block3TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
if !block3TxNewAcceptedBlock.IsEqual(&daghash.ZeroHash) {
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
"been accepted by the virtual block but instead got accepted in block %v", block3TxNewAcceptedBlock)
}
block3A := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3A")
block4 := prepareAndProcessBlock([]*daghash.Hash{block3.BlockHash()}, nil, "4")
prepareAndProcessBlock([]*daghash.Hash{block3A.BlockHash(), block4.BlockHash()}, nil, "5")
block2TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
if !block2TxAcceptedBlock.IsEqual(block3Hash) {
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3Hash, block2TxAcceptedBlock)
}
region, err := txIndex.TxFirstBlockRegion(block3TxID)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: no block region was found for block3Tx")
}
regionBlock, ok := blocks[*region.Hash]
if !ok {
t.Fatalf("TestTxIndexConnectBlock: couldn't find block with hash %v", region.Hash)
}
regionBlockBytes, err := regionBlock.Bytes()
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Couldn't serialize block to bytes")
}
block3TxInBlock := regionBlockBytes[region.Offset : region.Offset+region.Len]
block3TxBuf := bytes.NewBuffer(make([]byte, 0, block3Tx.SerializeSize()))
block3Tx.KaspaEncode(block3TxBuf, 0)
blockTxBytes := block3TxBuf.Bytes()
if !reflect.DeepEqual(blockTxBytes, block3TxInBlock) {
t.Errorf("TestTxIndexConnectBlock: the block region that was in the bucket doesn't match block3Tx")
}
}

View File

@@ -1,206 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"math"
"sort"
"sync"
"time"
)
const (
// maxAllowedOffsetSeconds is the maximum number of seconds in either
// direction that local clock will be adjusted. When the median time
// of the network is outside of this range, no offset will be applied.
maxAllowedOffsetSecs = 70 * 60 // 1 hour 10 minutes
// similarTimeSecs is the number of seconds in either direction from the
// local clock that is used to determine that it is likley wrong and
// hence to show a warning.
similarTimeSecs = 5 * 60 // 5 minutes
)
var (
// maxMedianTimeEntries is the maximum number of entries allowed in the
// median time data. This is a variable as opposed to a constant so the
// test code can modify it.
maxMedianTimeEntries = 200
)
// MedianTimeSource provides a mechanism to add several time samples which are
// used to determine a median time which is then used as an offset to the local
// clock.
type MedianTimeSource interface {
// AdjustedTime returns the current time adjusted by the median time
// offset as calculated from the time samples added by AddTimeSample.
AdjustedTime() time.Time
// AddTimeSample adds a time sample that is used when determining the
// median time of the added samples.
AddTimeSample(id string, timeVal time.Time)
// Offset returns the number of seconds to adjust the local clock based
// upon the median of the time samples added by AddTimeData.
Offset() time.Duration
}
// int64Sorter implements sort.Interface to allow a slice of 64-bit integers to
// be sorted.
type int64Sorter []int64
// Len returns the number of 64-bit integers in the slice. It is part of the
// sort.Interface implementation.
func (s int64Sorter) Len() int {
return len(s)
}
// Swap swaps the 64-bit integers at the passed indices. It is part of the
// sort.Interface implementation.
func (s int64Sorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less returns whether the 64-bit integer with index i should sort before the
// 64-bit integer with index j. It is part of the sort.Interface
// implementation.
func (s int64Sorter) Less(i, j int) bool {
return s[i] < s[j]
}
// medianTime provides an implementation of the MedianTimeSource interface.
type medianTime struct {
mtx sync.Mutex
knownIDs map[string]struct{}
offsets []int64
offsetSecs int64
invalidTimeChecked bool
}
// Ensure the medianTime type implements the MedianTimeSource interface.
var _ MedianTimeSource = (*medianTime)(nil)
// AdjustedTime returns the current time adjusted by the median time offset as
// calculated from the time samples added by AddTimeSample.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) AdjustedTime() time.Time {
m.mtx.Lock()
defer m.mtx.Unlock()
// Limit the adjusted time to 1 second precision.
now := time.Unix(time.Now().Unix(), 0)
return now.Add(time.Duration(m.offsetSecs) * time.Second)
}
// AddTimeSample adds a time sample that is used when determining the median
// time of the added samples.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) AddTimeSample(sourceID string, timeVal time.Time) {
m.mtx.Lock()
defer m.mtx.Unlock()
// Don't add time data from the same source.
if _, exists := m.knownIDs[sourceID]; exists {
return
}
m.knownIDs[sourceID] = struct{}{}
// Truncate the provided offset to seconds and append it to the slice
// of offsets while respecting the maximum number of allowed entries by
// replacing the oldest entry with the new entry once the maximum number
// of entries is reached.
now := time.Unix(time.Now().Unix(), 0)
offsetSecs := int64(timeVal.Sub(now).Seconds())
numOffsets := len(m.offsets)
if numOffsets == maxMedianTimeEntries && maxMedianTimeEntries > 0 {
m.offsets = m.offsets[1:]
numOffsets--
}
m.offsets = append(m.offsets, offsetSecs)
numOffsets++
// Sort the offsets so the median can be obtained as needed later.
sortedOffsets := make([]int64, numOffsets)
copy(sortedOffsets, m.offsets)
sort.Sort(int64Sorter(sortedOffsets))
offsetDuration := time.Duration(offsetSecs) * time.Second
log.Debugf("Added time sample of %s (total: %d)", offsetDuration,
numOffsets)
// The median offset is only updated when there are enough offsets and
// the number of offsets is odd so the middle value is the true median.
// Thus, there is nothing to do when those conditions are not met.
if numOffsets < 5 || numOffsets&0x01 != 1 {
return
}
// At this point the number of offsets in the list is odd, so the
// middle value of the sorted offsets is the median.
median := sortedOffsets[numOffsets/2]
// Set the new offset when the median offset is within the allowed
// offset range.
if math.Abs(float64(median)) < maxAllowedOffsetSecs {
m.offsetSecs = median
} else {
// The median offset of all added time data is larger than the
// maximum allowed offset, so don't use an offset. This
// effectively limits how far the local clock can be skewed.
m.offsetSecs = 0
if !m.invalidTimeChecked {
m.invalidTimeChecked = true
// Find if any time samples have a time that is close
// to the local time.
var remoteHasCloseTime bool
for _, offset := range sortedOffsets {
if math.Abs(float64(offset)) < similarTimeSecs {
remoteHasCloseTime = true
break
}
}
// Warn if none of the time samples are close.
if !remoteHasCloseTime {
log.Warnf("Please check your date and time " +
"are correct! kaspad will not work " +
"properly with an invalid time")
}
}
}
medianDuration := time.Duration(m.offsetSecs) * time.Second
log.Debugf("New time offset: %d", medianDuration)
}
// Offset returns the number of seconds to adjust the local clock based upon the
// median of the time samples added by AddTimeData.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) Offset() time.Duration {
m.mtx.Lock()
defer m.mtx.Unlock()
return time.Duration(m.offsetSecs) * time.Second
}
// NewMedianTime returns a new instance of concurrency-safe implementation of
// the MedianTimeSource interface. The returned implementation contains the
// rules necessary for proper time handling in the DAG consensus rules and
// expects the time samples to be added from the timestamp field of the version
// message received from remote peers that successfully connect and negotiate.
func NewMedianTime() MedianTimeSource {
return &medianTime{
knownIDs: make(map[string]struct{}),
offsets: make([]int64, 0, maxMedianTimeEntries),
}
}

View File

@@ -1,102 +0,0 @@
// Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"strconv"
"testing"
"time"
)
// TestMedianTime tests the medianTime implementation.
func TestMedianTime(t *testing.T) {
tests := []struct {
in []int64
wantOffset int64
useDupID bool
}{
// Not enough samples must result in an offset of 0.
{in: []int64{1}, wantOffset: 0},
{in: []int64{1, 2}, wantOffset: 0},
{in: []int64{1, 2, 3}, wantOffset: 0},
{in: []int64{1, 2, 3, 4}, wantOffset: 0},
// Various number of entries. The expected offset is only
// updated on odd number of elements.
{in: []int64{-13, 57, -4, -23, -12}, wantOffset: -12},
{in: []int64{55, -13, 61, -52, 39, 55}, wantOffset: 39},
{in: []int64{-62, -58, -30, -62, 51, -30, 15}, wantOffset: -30},
{in: []int64{29, -47, 39, 54, 42, 41, 8, -33}, wantOffset: 39},
{in: []int64{37, 54, 9, -21, -56, -36, 5, -11, -39}, wantOffset: -11},
{in: []int64{57, -28, 25, -39, 9, 63, -16, 19, -60, 25}, wantOffset: 9},
{in: []int64{-5, -4, -3, -2, -1}, wantOffset: -3, useDupID: true},
// The offset stops being updated once the max number of entries
// has been reached.
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52}, wantOffset: 17},
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45}, wantOffset: 17},
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45, 4}, wantOffset: 17},
// Offsets that are too far away from the local time should
// be ignored.
{in: []int64{-4201, 4202, -4203, 4204, -4205}, wantOffset: 0},
// Exercise the condition where the median offset is greater
// than the max allowed adjustment, but there is at least one
// sample that is close enough to the current time to avoid
// triggering a warning about an invalid local clock.
{in: []int64{4201, 4202, 4203, 4204, -299}, wantOffset: 0},
}
// Modify the max number of allowed median time entries for these tests.
maxMedianTimeEntries = 10
defer func() { maxMedianTimeEntries = 200 }()
for i, test := range tests {
filter := NewMedianTime()
for j, offset := range test.in {
id := strconv.Itoa(j)
now := time.Unix(time.Now().Unix(), 0)
tOffset := now.Add(time.Duration(offset) * time.Second)
filter.AddTimeSample(id, tOffset)
// Ensure the duplicate IDs are ignored.
if test.useDupID {
// Modify the offsets to ensure the final median
// would be different if the duplicate is added.
tOffset = tOffset.Add(time.Duration(offset) *
time.Second)
filter.AddTimeSample(id, tOffset)
}
}
// Since it is possible that the time.Now call in AddTimeSample
// and the time.Now calls here in the tests will be off by one
// second, allow a fudge factor to compensate.
gotOffset := filter.Offset()
wantOffset := time.Duration(test.wantOffset) * time.Second
wantOffset2 := time.Duration(test.wantOffset-1) * time.Second
if gotOffset != wantOffset && gotOffset != wantOffset2 {
t.Errorf("Offset #%d: unexpected offset -- got %v, "+
"want %v or %v", i, gotOffset, wantOffset,
wantOffset2)
continue
}
// Since it is possible that the time.Now call in AdjustedTime
// and the time.Now call here in the tests will be off by one
// second, allow a fudge factor to compensate.
adjustedTime := filter.AdjustedTime()
now := time.Unix(time.Now().Unix(), 0)
wantTime := now.Add(filter.Offset())
wantTime2 := now.Add(filter.Offset() - time.Second)
if !adjustedTime.Equal(wantTime) && !adjustedTime.Equal(wantTime2) {
t.Errorf("AdjustedTime #%d: unexpected result -- got %v, "+
"want %v or %v", i, adjustedTime, wantTime,
wantTime2)
continue
}
}
}

View File

@@ -3,17 +3,19 @@ package blockdag
import (
"bytes"
"encoding/binary"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"sort"
"time"
)
// BlockForMining returns a block with the given transactions
// that points to the current DAG tips, that is valid from
// all aspects except proof of work.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, error) {
blockTimestamp := dag.NextBlockTime()
requiredDifficulty := dag.NextRequiredDifficulty(blockTimestamp)
@@ -25,17 +27,6 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
return nil, err
}
// Sort transactions by subnetwork ID before building Merkle tree
sort.Slice(transactions, func(i, j int) bool {
if transactions[i].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
return true
}
if transactions[j].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
return false
}
return subnetworkid.Less(&transactions[i].MsgTx().SubnetworkID, &transactions[j].MsgTx().SubnetworkID)
})
// Create a new block ready to be solved.
hashMerkleTree := BuildHashMerkleTreeStore(transactions)
acceptedIDMerkleRoot, err := dag.NextAcceptedIDMerkleRootNoLock()
@@ -47,18 +38,17 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
msgBlock.AddTransaction(tx.MsgTx())
}
utxoWithTransactions, err := dag.UTXOSet().WithTransactions(msgBlock.Transactions, UnacceptedBlueScore, false)
multiset, err := dag.NextBlockMultiset()
if err != nil {
return nil, err
}
utxoCommitment := utxoWithTransactions.Multiset().Hash()
msgBlock.Header = wire.BlockHeader{
Version: nextBlockVersion,
ParentHashes: dag.TipHashes(),
HashMerkleRoot: hashMerkleTree.Root(),
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
UTXOCommitment: utxoCommitment,
UTXOCommitment: (*daghash.Hash)(multiset.Finalize()),
Timestamp: blockTimestamp,
Bits: requiredDifficulty,
}
@@ -66,6 +56,19 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
return &msgBlock, nil
}
// NextBlockMultiset returns the multiset of an assumed next block
// built on top of the current tips.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) NextBlockMultiset() (*secp256k1.MultiSet, error) {
_, selectedParentPastUTXO, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
if err != nil {
return nil, err
}
return dag.virtual.blockNode.calcMultiset(dag, txsAcceptanceData, selectedParentPastUTXO)
}
// CoinbasePayloadExtraData returns coinbase payload extra data parameter
// which is built from extra nonce and coinbase flags.
func CoinbasePayloadExtraData(extraNonce uint64, coinbaseFlags string) ([]byte, error) {
@@ -114,7 +117,7 @@ func (dag *BlockDAG) NextBlockTime() time.Time {
// timestamp is truncated to a second boundary before comparison since a
// block timestamp does not supported a precision greater than one
// second.
newTimestamp := dag.AdjustedTime()
newTimestamp := dag.Now()
minTimestamp := dag.NextBlockMinimumTime()
if newTimestamp.Before(minTimestamp) {
newTimestamp = minTimestamp

29
blockdag/multisetio.go Normal file
View File

@@ -0,0 +1,29 @@
package blockdag
import (
"encoding/binary"
"github.com/kaspanet/go-secp256k1"
"io"
)
const multisetPointSize = 32
// serializeMultiset serializes an ECMH multiset.
func serializeMultiset(w io.Writer, ms *secp256k1.MultiSet) error {
serialized := ms.Serialize()
err := binary.Write(w, byteOrder, serialized)
if err != nil {
return err
}
return nil
}
// deserializeMultiset deserializes an EMCH multiset.
func deserializeMultiset(r io.Reader) (*secp256k1.MultiSet, error) {
serialized := &secp256k1.SerializedMultiSet{}
err := binary.Read(r, byteOrder, serialized[:])
if err != nil {
return nil, err
}
return secp256k1.DeserializeMultiSet(serialized)
}

131
blockdag/multisetstore.go Normal file
View File

@@ -0,0 +1,131 @@
package blockdag
import (
"bytes"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/locks"
"github.com/pkg/errors"
)
type multisetStore struct {
dag *BlockDAG
new map[daghash.Hash]struct{}
loaded map[daghash.Hash]secp256k1.MultiSet
mtx *locks.PriorityMutex
}
func newMultisetStore(dag *BlockDAG) *multisetStore {
return &multisetStore{
dag: dag,
new: make(map[daghash.Hash]struct{}),
loaded: make(map[daghash.Hash]secp256k1.MultiSet),
}
}
func (store *multisetStore) setMultiset(node *blockNode, ms *secp256k1.MultiSet) {
store.loaded[*node.hash] = *ms
store.addToNewBlocks(node.hash)
}
func (store *multisetStore) addToNewBlocks(blockHash *daghash.Hash) {
store.new[*blockHash] = struct{}{}
}
func multisetNotFoundError(blockHash *daghash.Hash) error {
return errors.Errorf("Couldn't find multiset data for block %s", blockHash)
}
func (store *multisetStore) multisetByBlockNode(node *blockNode) (*secp256k1.MultiSet, error) {
ms, exists := store.multisetByBlockHash(node.hash)
if !exists {
return nil, multisetNotFoundError(node.hash)
}
return ms, nil
}
func (store *multisetStore) multisetByBlockHash(hash *daghash.Hash) (*secp256k1.MultiSet, bool) {
ms, ok := store.loaded[*hash]
return &ms, ok
}
// flushToDB writes all new multiset data to the database.
func (store *multisetStore) flushToDB(dbContext *dbaccess.TxContext) error {
if len(store.new) == 0 {
return nil
}
w := &bytes.Buffer{}
for hash := range store.new {
hash := hash // Copy hash to a new variable to avoid passing the same pointer
w.Reset()
ms, exists := store.loaded[hash]
if !exists {
return multisetNotFoundError(&hash)
}
err := serializeMultiset(w, &ms)
if err != nil {
return err
}
err = store.storeMultiset(dbContext, &hash, w.Bytes())
if err != nil {
return err
}
}
return nil
}
func (store *multisetStore) clearNewEntries() {
store.new = make(map[daghash.Hash]struct{})
}
func (store *multisetStore) init(dbContext dbaccess.Context) error {
cursor, err := dbaccess.MultisetCursor(dbContext)
if err != nil {
return err
}
defer cursor.Close()
for ok := cursor.First(); ok; ok = cursor.Next() {
key, err := cursor.Key()
if err != nil {
return err
}
hash, err := daghash.NewHash(key.Suffix())
if err != nil {
return err
}
serializedMS, err := cursor.Value()
if err != nil {
return err
}
ms, err := deserializeMultiset(bytes.NewReader(serializedMS))
if err != nil {
return err
}
store.loaded[*hash] = *ms
}
return nil
}
// storeMultiset stores the multiset data to the database.
func (store *multisetStore) storeMultiset(dbContext dbaccess.Context, blockHash *daghash.Hash, serializedMS []byte) error {
exists, err := dbaccess.HasMultiset(dbContext, blockHash)
if err != nil {
return err
}
if exists {
return errors.Errorf("Can't override an existing multiset database entry for block %s", blockHash)
}
return dbaccess.StoreMultiset(dbContext, blockHash, serializedMS)
}

View File

@@ -57,8 +57,8 @@ type Notification struct {
// NotificationType for details on the types and contents of notifications.
func (dag *BlockDAG) Subscribe(callback NotificationCallback) {
dag.notificationsLock.Lock()
defer dag.notificationsLock.Unlock()
dag.notifications = append(dag.notifications, callback)
dag.notificationsLock.Unlock()
}
// sendNotification sends a notification with the passed type and data if the
@@ -68,10 +68,10 @@ func (dag *BlockDAG) sendNotification(typ NotificationType, data interface{}) {
// Generate and send the notification.
n := Notification{Type: typ, Data: data}
dag.notificationsLock.RLock()
defer dag.notificationsLock.RUnlock()
for _, callback := range dag.notifications {
callback(&n)
}
dag.notificationsLock.RUnlock()
}
// BlockAddedNotificationData defines data to be sent along with a BlockAdded

View File

@@ -19,7 +19,7 @@ func TestNotifications(t *testing.T) {
}
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("notifications", Config{
dag, teardownFunc, err := DAGSetup("notifications", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {

View File

@@ -7,6 +7,7 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/pkg/errors"
"time"
"github.com/kaspanet/kaspad/util"
@@ -44,15 +45,19 @@ const (
// in the block index but was never fully processed
BFWasStored
// BFDisallowDelay is set to indicate that a delayed block should be rejected.
// This is used for the case where a block is submitted through RPC.
BFDisallowDelay
// BFNone is a convenience value to specifically indicate no flags.
BFNone BehaviorFlags = 0
)
// BlockExists determines whether a block with the given hash exists in
// IsInDAG determines whether a block with the given hash exists in
// the DAG.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) BlockExists(hash *daghash.Hash) bool {
func (dag *BlockDAG) IsInDAG(hash *daghash.Hash) bool {
return dag.index.HaveBlock(hash)
}
@@ -95,7 +100,8 @@ func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) err
// still missing.
_, err := lookupParentNodes(orphan.block, dag)
if err != nil {
if ruleErr, ok := err.(RuleError); ok && ruleErr.ErrorCode == ErrParentBlockUnknown {
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); ok && ruleErr.ErrorCode == ErrParentBlockUnknown {
continue
}
return err
@@ -111,7 +117,7 @@ func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) err
if err != nil {
// Since we don't want to reject the original block because of
// a bad unorphaned child, only return an error if it's not a RuleError.
if _, ok := err.(RuleError); !ok {
if !errors.As(err, &RuleError{}) {
return err
}
log.Warnf("Verification failed for orphan block %s: %s", orphanHash, err)
@@ -144,12 +150,13 @@ func (dag *BlockDAG) ProcessBlock(block *util.Block, flags BehaviorFlags) (isOrp
func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags) (isOrphan bool, isDelayed bool, err error) {
isAfterDelay := flags&BFAfterDelay == BFAfterDelay
wasBlockStored := flags&BFWasStored == BFWasStored
disallowDelay := flags&BFDisallowDelay == BFDisallowDelay
blockHash := block.Hash()
log.Tracef("Processing block %s", blockHash)
// The block must not already exist in the DAG.
if dag.BlockExists(blockHash) && !wasBlockStored {
if dag.IsInDAG(blockHash) && !wasBlockStored {
str := fmt.Sprintf("already have block %s", blockHash)
return false, false, ruleError(ErrDuplicateBlock, str)
}
@@ -172,6 +179,11 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
return false, false, err
}
if delay != 0 && disallowDelay {
str := fmt.Sprintf("Cannot process blocks beyond the allowed time offset while the BFDisallowDelay flag is raised %s", blockHash)
return false, true, ruleError(ErrDelayedBlockIsNotAllowed, str)
}
if delay != 0 {
err = dag.addDelayedBlock(block, delay)
if err != nil {
@@ -183,7 +195,7 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
var missingParents []*daghash.Hash
for _, parentHash := range block.MsgBlock().Header.ParentHashes {
if !dag.BlockExists(parentHash) {
if !dag.IsInDAG(parentHash) {
missingParents = append(missingParents, parentHash)
}
}
@@ -241,6 +253,8 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
}
}
dag.addBlockProcessingTimestamp()
log.Debugf("Accepted block %s", blockHash)
return false, false, nil
@@ -252,7 +266,7 @@ func (dag *BlockDAG) maxDelayOfParents(parentHashes []*daghash.Hash) (delay time
for _, parentHash := range parentHashes {
if delayedParent, exists := dag.delayedBlocks[*parentHash]; exists {
isDelayed = true
parentDelay := delayedParent.processTime.Sub(dag.AdjustedTime())
parentDelay := delayedParent.processTime.Sub(dag.Now())
if parentDelay > delay {
delay = parentDelay
}

View File

@@ -1,15 +1,17 @@
package blockdag
import (
"github.com/kaspanet/kaspad/util"
"path/filepath"
"testing"
"time"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/daghash"
)
func TestProcessOrphans(t *testing.T) {
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", Config{
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -69,3 +71,164 @@ func TestProcessOrphans(t *testing.T) {
t.Fatalf("TestProcessOrphans: child block erroneously not marked as invalid")
}
}
func TestProcessDelayedBlocks(t *testing.T) {
// We use dag1 so we can build the test blocks with the proper
// block header (UTXO commitment, acceptedIDMerkleroot, etc), and
// then we use dag2 for the actual test.
dag1, teardownFunc, err := DAGSetup("TestProcessDelayedBlocks1", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
isDAG1Open := true
defer func() {
if isDAG1Open {
teardownFunc()
}
}()
initialTime := dag1.dagParams.GenesisBlock.Header.Timestamp
// Here we use a fake time source that returns a timestamp
// one hour into the future to make delayedBlock artificially
// valid.
dag1.timeSource = newFakeTimeSource(initialTime.Add(time.Hour))
delayedBlock, err := PrepareBlockForTest(dag1, []*daghash.Hash{dag1.dagParams.GenesisBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
blockDelay := time.Duration(dag1.dagParams.TimestampDeviationTolerance*uint64(dag1.targetTimePerBlock)+5) * time.Second
delayedBlock.Header.Timestamp = initialTime.Add(blockDelay)
isOrphan, isDelayed, err := dag1.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
"is an orphan\n")
}
if isDelayed {
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
"is delayed\n")
}
delayedBlockChild, err := PrepareBlockForTest(dag1, []*daghash.Hash{delayedBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
teardownFunc()
isDAG1Open = false
// Here the actual test begins. We add a delayed block and
// its child and check that they are not added to the DAG,
// and check that they're added only if we add a new block
// after the delayed block timestamp is valid.
dag2, teardownFunc2, err := DAGSetup("TestProcessDelayedBlocks2", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc2()
dag2.timeSource = newFakeTimeSource(initialTime)
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
"is an orphan\n")
}
if !isDelayed {
t.Fatalf("ProcessBlock incorrectly returned delayedBlock " +
"is not delayed\n")
}
if dag2.IsInDAG(delayedBlock.BlockHash()) {
t.Errorf("dag.IsInDAG should return false for a delayed block")
}
if !dag2.IsKnownBlock(delayedBlock.BlockHash()) {
t.Errorf("dag.IsKnownBlock should return true for a a delayed block")
}
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(delayedBlockChild), BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned delayedBlockChild " +
"is an orphan\n")
}
if !isDelayed {
t.Fatalf("ProcessBlock incorrectly returned delayedBlockChild " +
"is not delayed\n")
}
if dag2.IsInDAG(delayedBlockChild.BlockHash()) {
t.Errorf("dag.IsInDAG should return false for a child of a delayed block")
}
if !dag2.IsKnownBlock(delayedBlockChild.BlockHash()) {
t.Errorf("dag.IsKnownBlock should return true for a child of a delayed block")
}
blockBeforeDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(blockBeforeDelay), BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
"is an orphan\n")
}
if isDelayed {
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
"is delayed\n")
}
if dag2.IsInDAG(delayedBlock.BlockHash()) {
t.Errorf("delayedBlock shouldn't be added to the DAG because its time hasn't reached yet")
}
if dag2.IsInDAG(delayedBlockChild.BlockHash()) {
t.Errorf("delayedBlockChild shouldn't be added to the DAG because its parent is not in the DAG")
}
// We advance the clock to the point where delayedBlock timestamp is valid.
deviationTolerance := int64(dag2.TimestampDeviationTolerance) * dag2.targetTimePerBlock
secondsUntilDelayedBlockIsValid := delayedBlock.Header.Timestamp.Unix() - deviationTolerance - dag2.Now().Unix() + 1
dag2.timeSource = newFakeTimeSource(initialTime.Add(time.Duration(secondsUntilDelayedBlockIsValid) * time.Second))
blockAfterDelay, err := PrepareBlockForTest(dag2,
[]*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()},
nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(blockAfterDelay), BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock returned unexpected error: %s\n", err)
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
"is an orphan\n")
}
if isDelayed {
t.Fatalf("ProcessBlock incorrectly returned blockBeforeDelay " +
"is not delayed\n")
}
if !dag2.IsInDAG(delayedBlock.BlockHash()) {
t.Fatalf("delayedBlock should be added to the DAG because its time has been reached")
}
if !dag2.IsInDAG(delayedBlockChild.BlockHash()) {
t.Errorf("delayedBlockChild shouldn't be added to the DAG because its parent has been added to the DAG")
}
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/pkg/errors"
"math"
"strings"
"time"
)
// reachabilityInterval represents an interval to be used within the
@@ -190,11 +191,6 @@ type reachabilityTreeNode struct {
// remainingInterval is the not-yet allocated interval (within
// this node's interval) awaiting new children
remainingInterval *reachabilityInterval
// subtreeSize is a helper field used only during reindexing
// (expected to be 0 any other time).
// See countSubtrees for further details.
subtreeSize uint64
}
func newReachabilityTreeNode(blockNode *blockNode) *reachabilityTreeNode {
@@ -218,7 +214,16 @@ func (rtn *reachabilityTreeNode) addChild(child *reachabilityTreeNode) ([]*reach
// No allocation space left -- reindex
if rtn.remainingInterval.size() == 0 {
return rtn.reindexIntervals()
reindexStartTime := time.Now()
modifiedNodes, err := rtn.reindexIntervals()
if err != nil {
return nil, err
}
reindexTimeElapsed := time.Since(reindexStartTime)
log.Debugf("Reachability reindex triggered for "+
"block %s. Modified %d tree nodes and took %dms.",
rtn.blockNode.hash, len(modifiedNodes), reindexTimeElapsed.Milliseconds())
return modifiedNodes, nil
}
// Allocate from the remaining space
@@ -253,10 +258,12 @@ func (rtn *reachabilityTreeNode) reindexIntervals() ([]*reachabilityTreeNode, er
// Initial interval and subtree sizes
intervalSize := current.interval.size()
subtreeSize := current.countSubtrees()
subTreeSizeMap := make(map[*reachabilityTreeNode]uint64)
current.countSubtrees(subTreeSizeMap)
currentSubtreeSize := subTreeSizeMap[current]
// Find the first ancestor that has sufficient interval space
for intervalSize < subtreeSize {
for intervalSize < currentSubtreeSize {
if current.parent == nil {
// If we ended up here it means that there are more
// than 2^64 blocks, which shouldn't ever happen.
@@ -267,14 +274,16 @@ func (rtn *reachabilityTreeNode) reindexIntervals() ([]*reachabilityTreeNode, er
}
current = current.parent
intervalSize = current.interval.size()
subtreeSize = current.countSubtrees()
current.countSubtrees(subTreeSizeMap)
currentSubtreeSize = subTreeSizeMap[current]
}
// Propagate the interval to the subtree
return current.propagateInterval()
return current.propagateInterval(subTreeSizeMap)
}
// countSubtrees counts the size of each subtree under this node.
// countSubtrees counts the size of each subtree under this node,
// and populates the provided subTreeSizeMap with the results.
// It is equivalent to the following recursive implementation:
//
// func (rtn *reachabilityTreeNode) countSubtrees() uint64 {
@@ -293,36 +302,20 @@ func (rtn *reachabilityTreeNode) reindexIntervals() ([]*reachabilityTreeNode, er
// intermediate updates from leaves via parent chains until all
// size information is gathered at the root of the operation
// (i.e. at rtn).
//
// Note the role of the subtreeSize field in the algorithm.
// For each node rtn this field is initialized to 0. The field
// has two possible states:
// * rtn.subtreeSize > |rtn.children|:
// indicates that rtn's subtree size is already known and
// calculated.
// * rtn.subtreeSize <= |rtn.children|:
// indicates that we are still in the counting stage of
// tracking which of rtn's children has already calculated
// its subtree size.
// This way, once rtn.subtree_size = |rtn.children| we know we
// can pull subtree sizes from children and continue pushing
// the readiness signal further up.
func (rtn *reachabilityTreeNode) countSubtrees() uint64 {
func (rtn *reachabilityTreeNode) countSubtrees(subTreeSizeMap map[*reachabilityTreeNode]uint64) {
queue := []*reachabilityTreeNode{rtn}
calculatedChildrenCount := make(map[*reachabilityTreeNode]uint64)
for len(queue) > 0 {
var current *reachabilityTreeNode
current, queue = queue[0], queue[1:]
if len(current.children) == 0 {
// We reached a leaf
current.subtreeSize = 1
}
if current.subtreeSize <= uint64(len(current.children)) {
subTreeSizeMap[current] = 1
} else if _, ok := subTreeSizeMap[current]; !ok {
// We haven't yet calculated the subtree size of
// the current node. Add all its children to the
// queue
for _, child := range current.children {
queue = append(queue, child)
}
queue = append(queue, current.children...)
continue
}
@@ -330,39 +323,39 @@ func (rtn *reachabilityTreeNode) countSubtrees() uint64 {
// Push information up
for current != rtn {
current = current.parent
current.subtreeSize++
if current.subtreeSize != uint64(len(current.children)) {
calculatedChildrenCount[current]++
if calculatedChildrenCount[current] != uint64(len(current.children)) {
// Not all subtrees of the current node are ready
break
}
// All subtrees of current have reported readiness.
// Count actual subtree size and continue pushing up.
// All children of `current` have calculated their subtree size.
// Sum them all together and add 1 to get the sub tree size of
// `current`.
childSubtreeSizeSum := uint64(0)
for _, child := range current.children {
childSubtreeSizeSum += child.subtreeSize
childSubtreeSizeSum += subTreeSizeMap[child]
}
current.subtreeSize = childSubtreeSizeSum + 1
subTreeSizeMap[current] = childSubtreeSizeSum + 1
}
}
return rtn.subtreeSize
}
// propagateInterval propagates the new interval using a BFS traversal.
// Subtree intervals are recursively allocated according to subtree sizes and
// the allocation rule in splitWithExponentialBias. This method returns
// a list of reachabilityTreeNodes modified by it.
func (rtn *reachabilityTreeNode) propagateInterval() ([]*reachabilityTreeNode, error) {
func (rtn *reachabilityTreeNode) propagateInterval(subTreeSizeMap map[*reachabilityTreeNode]uint64) ([]*reachabilityTreeNode, error) {
// We set the interval to reset its remainingInterval, so we could reallocate it while reindexing.
rtn.setInterval(rtn.interval)
modifiedNodes := []*reachabilityTreeNode{rtn}
queue := []*reachabilityTreeNode{rtn}
var modifiedNodes []*reachabilityTreeNode
for len(queue) > 0 {
var current *reachabilityTreeNode
current, queue = queue[0], queue[1:]
if len(current.children) > 0 {
sizes := make([]uint64, len(current.children))
for i, child := range current.children {
sizes[i] = child.subtreeSize
sizes[i] = subTreeSizeMap[child]
}
intervals, err := current.remainingInterval.splitWithExponentialBias(sizes)
if err != nil {
@@ -379,9 +372,6 @@ func (rtn *reachabilityTreeNode) propagateInterval() ([]*reachabilityTreeNode, e
}
modifiedNodes = append(modifiedNodes, current)
// Cleanup temp info for future reindexing
current.subtreeSize = 0
}
return modifiedNodes, nil
}
@@ -401,7 +391,7 @@ func (rtn *reachabilityTreeNode) String() string {
var current *reachabilityTreeNode
current, queue = queue[0], queue[1:]
if len(current.children) == 0 {
break
continue
}
line := ""
@@ -527,7 +517,8 @@ func (dag *BlockDAG) updateReachability(node *blockNode, selectedParentAnticone
// If this is the genesis node, simply initialize it and return
if node.isGenesis() {
return dag.reachabilityStore.setTreeNode(newTreeNode)
dag.reachabilityStore.setTreeNode(newTreeNode)
return nil
}
// Insert the node into the selected parent's reachability tree
@@ -540,10 +531,7 @@ func (dag *BlockDAG) updateReachability(node *blockNode, selectedParentAnticone
return err
}
for _, modifiedTreeNode := range modifiedTreeNodes {
err = dag.reachabilityStore.setTreeNode(modifiedTreeNode)
if err != nil {
return err
}
dag.reachabilityStore.setTreeNode(modifiedTreeNode)
}
// Add the block to the futureCoveringSets of all the blocks

View File

@@ -2,6 +2,7 @@ package blockdag
import (
"reflect"
"strings"
"testing"
)
@@ -322,6 +323,15 @@ func TestSplitWithExponentialBias(t *testing.T) {
newReachabilityInterval(41, 10_000),
},
},
{
interval: newReachabilityInterval(1, 100_000),
sizes: []uint64{31_000, 31_000, 30_001},
expectedIntervals: []*reachabilityInterval{
newReachabilityInterval(1, 35_000),
newReachabilityInterval(35_001, 69_999),
newReachabilityInterval(70_000, 100_000),
},
},
}
for i, test := range tests {
@@ -473,3 +483,206 @@ func TestInsertBlock(t *testing.T) {
}
}
}
func TestSplitFractionErrors(t *testing.T) {
interval := newReachabilityInterval(100, 200)
// Negative fraction
_, _, err := interval.splitFraction(-0.5)
if err == nil {
t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " +
"didn't return an error for a negative fraction")
}
expectedErrSubstring := "fraction must be between 0 and 1"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+
"for a negative fraction. "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
// Fraction > 1
_, _, err = interval.splitFraction(1.5)
if err == nil {
t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " +
"didn't return an error for a fraction greater than 1")
}
expectedErrSubstring = "fraction must be between 0 and 1"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+
"for a fraction greater than 1. "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
// Splitting an empty interval
emptyInterval := newReachabilityInterval(1, 0)
_, _, err = emptyInterval.splitFraction(0.5)
if err == nil {
t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " +
"didn't return an error for an empty interval")
}
expectedErrSubstring = "cannot split an empty interval"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+
"for an empty interval. "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
}
func TestSplitExactErrors(t *testing.T) {
interval := newReachabilityInterval(100, 199)
// Sum of sizes greater than the size of the interval
sizes := []uint64{50, 51}
_, err := interval.splitExact(sizes)
if err == nil {
t.Fatalf("TestSplitExactErrors: splitExact unexpectedly " +
"didn't return an error for (sum of sizes) > (size of interval)")
}
expectedErrSubstring := "sum of sizes must be equal to the interval's size"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestSplitExactErrors: splitExact returned wrong error "+
"for (sum of sizes) > (size of interval). "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
// Sum of sizes smaller than the size of the interval
sizes = []uint64{50, 49}
_, err = interval.splitExact(sizes)
if err == nil {
t.Fatalf("TestSplitExactErrors: splitExact unexpectedly " +
"didn't return an error for (sum of sizes) < (size of interval)")
}
expectedErrSubstring = "sum of sizes must be equal to the interval's size"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestSplitExactErrors: splitExact returned wrong error "+
"for (sum of sizes) < (size of interval). "+
"Want: %s, got: %s", expectedErrSubstring, err)
}
}
func TestSplitWithExponentialBiasErrors(t *testing.T) {
interval := newReachabilityInterval(100, 199)
// Sum of sizes greater than the size of the interval
sizes := []uint64{50, 51}
_, err := interval.splitWithExponentialBias(sizes)
if err == nil {
t.Fatalf("TestSplitWithExponentialBiasErrors: splitWithExponentialBias " +
"unexpectedly didn't return an error")
}
expectedErrSubstring := "sum of sizes must be less than or equal to the interval's size"
if !strings.Contains(err.Error(), expectedErrSubstring) {
t.Fatalf("TestSplitWithExponentialBiasErrors: splitWithExponentialBias "+
"returned wrong error. Want: %s, got: %s", expectedErrSubstring, err)
}
}
func TestReindexIntervalErrors(t *testing.T) {
// Create a treeNode and give it size = 100
treeNode := newReachabilityTreeNode(&blockNode{})
treeNode.setInterval(newReachabilityInterval(0, 99))
// Add a chain of 100 child treeNodes to treeNode
var err error
currentTreeNode := treeNode
for i := 0; i < 100; i++ {
childTreeNode := newReachabilityTreeNode(&blockNode{})
_, err = currentTreeNode.addChild(childTreeNode)
if err != nil {
break
}
currentTreeNode = childTreeNode
}
// At the 100th addChild we expect a reindex. This reindex should
// fail because our initial treeNode only has size = 100, and the
// reindex requires size > 100.
// This simulates the case when (somehow) there's more than 2^64
// blocks in the DAG, since the genesis block has size = 2^64.
if err == nil {
t.Fatalf("TestReindexIntervalErrors: reindexIntervals " +
"unexpectedly didn't return an error")
}
if !strings.Contains(err.Error(), "missing tree parent during reindexing") {
t.Fatalf("TestReindexIntervalErrors: reindexIntervals "+
"returned an expected error: %s", err)
}
}
func BenchmarkReindexInterval(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
root := newReachabilityTreeNode(&blockNode{})
const subTreeSize = 70000
// We set the interval of the root to subTreeSize*2 because
// its first child gets half of the interval, so a reindex
// from the root should happen after adding subTreeSize
// nodes.
root.setInterval(newReachabilityInterval(0, subTreeSize*2))
currentTreeNode := root
for i := 0; i < subTreeSize; i++ {
childTreeNode := newReachabilityTreeNode(&blockNode{})
_, err := currentTreeNode.addChild(childTreeNode)
if err != nil {
b.Fatalf("addChild: %s", err)
}
currentTreeNode = childTreeNode
}
remainingIntervalBefore := *root.remainingInterval
// After we added subTreeSize nodes, adding the next
// node should lead to a reindex from root.
fullReindexTriggeringNode := newReachabilityTreeNode(&blockNode{})
b.StartTimer()
_, err := currentTreeNode.addChild(fullReindexTriggeringNode)
b.StopTimer()
if err != nil {
b.Fatalf("addChild: %s", err)
}
if *root.remainingInterval == remainingIntervalBefore {
b.Fatal("Expected a reindex from root, but it didn't happen")
}
}
}
func TestFutureCoveringBlockSetString(t *testing.T) {
treeNodeA := newReachabilityTreeNode(&blockNode{})
treeNodeA.setInterval(newReachabilityInterval(123, 456))
treeNodeB := newReachabilityTreeNode(&blockNode{})
treeNodeB.setInterval(newReachabilityInterval(457, 789))
futureCoveringSet := futureCoveringBlockSet{
&futureCoveringBlock{treeNode: treeNodeA},
&futureCoveringBlock{treeNode: treeNodeB},
}
str := futureCoveringSet.String()
expectedStr := "[123,456][457,789]"
if str != expectedStr {
t.Fatalf("TestFutureCoveringBlockSetString: unexpected "+
"string. Want: %s, got: %s", expectedStr, str)
}
}
func TestReachabilityTreeNodeString(t *testing.T) {
treeNodeA := newReachabilityTreeNode(&blockNode{})
treeNodeA.setInterval(newReachabilityInterval(100, 199))
treeNodeB1 := newReachabilityTreeNode(&blockNode{})
treeNodeB1.setInterval(newReachabilityInterval(100, 150))
treeNodeB2 := newReachabilityTreeNode(&blockNode{})
treeNodeB2.setInterval(newReachabilityInterval(150, 199))
treeNodeC := newReachabilityTreeNode(&blockNode{})
treeNodeC.setInterval(newReachabilityInterval(100, 149))
treeNodeA.children = []*reachabilityTreeNode{treeNodeB1, treeNodeB2}
treeNodeB2.children = []*reachabilityTreeNode{treeNodeC}
str := treeNodeA.String()
expectedStr := "[100,149]\n[100,150][150,199]\n[100,199]"
if str != expectedStr {
t.Fatalf("TestReachabilityTreeNodeString: unexpected "+
"string. Want: %s, got: %s", expectedStr, str)
}
}

View File

@@ -3,6 +3,7 @@ package blockdag
import (
"bytes"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
@@ -28,7 +29,7 @@ func newReachabilityStore(dag *BlockDAG) *reachabilityStore {
}
}
func (store *reachabilityStore) setTreeNode(treeNode *reachabilityTreeNode) error {
func (store *reachabilityStore) setTreeNode(treeNode *reachabilityTreeNode) {
// load the reachability data from DB to store.loaded
node := treeNode.blockNode
_, exists := store.reachabilityDataByHash(node.hash)
@@ -38,7 +39,6 @@ func (store *reachabilityStore) setTreeNode(treeNode *reachabilityTreeNode) erro
store.loaded[*node.hash].treeNode = treeNode
store.setBlockAsDirty(node.hash)
return nil
}
func (store *reachabilityStore) setFutureCoveringSet(node *blockNode, futureCoveringSet futureCoveringBlockSet) error {
@@ -83,14 +83,15 @@ func (store *reachabilityStore) reachabilityDataByHash(hash *daghash.Hash) (*rea
}
// flushToDB writes all dirty reachability data to the database.
func (store *reachabilityStore) flushToDB(dbTx database.Tx) error {
func (store *reachabilityStore) flushToDB(dbContext *dbaccess.TxContext) error {
if len(store.dirty) == 0 {
return nil
}
for hash := range store.dirty {
hash := hash // Copy hash to a new variable to avoid passing the same pointer
reachabilityData := store.loaded[hash]
err := store.dbStoreReachabilityData(dbTx, &hash, reachabilityData)
err := store.storeReachabilityData(dbContext, &hash, reachabilityData)
if err != nil {
return err
}
@@ -102,22 +103,25 @@ func (store *reachabilityStore) clearDirtyEntries() {
store.dirty = make(map[daghash.Hash]struct{})
}
func (store *reachabilityStore) init(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
func (store *reachabilityStore) init(dbContext dbaccess.Context) error {
// TODO: (Stas) This is a quick and dirty hack.
// We iterate over the entire bucket twice:
// * First, populate the loaded set with all entries
// * Second, connect the parent/children pointers in each entry
// with other nodes, which are now guaranteed to exist
cursor := bucket.Cursor()
cursor, err := dbaccess.ReachabilityDataCursor(dbContext)
if err != nil {
return err
}
defer cursor.Close()
for ok := cursor.First(); ok; ok = cursor.Next() {
err := store.initReachabilityData(cursor)
if err != nil {
return err
}
}
cursor = bucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
err := store.loadReachabilityDataFromCursor(cursor)
if err != nil {
@@ -128,7 +132,12 @@ func (store *reachabilityStore) init(dbTx database.Tx) error {
}
func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) error {
hash, err := daghash.NewHash(cursor.Key())
key, err := cursor.Key()
if err != nil {
return err
}
hash, err := daghash.NewHash(key.Suffix())
if err != nil {
return err
}
@@ -141,7 +150,12 @@ func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) err
}
func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.Cursor) error {
hash, err := daghash.NewHash(cursor.Key())
key, err := cursor.Key()
if err != nil {
return err
}
hash, err := daghash.NewHash(key.Suffix())
if err != nil {
return err
}
@@ -151,7 +165,12 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C
return errors.Errorf("cannot find reachability data for block hash: %s", hash)
}
err = store.deserializeReachabilityData(cursor.Value(), reachabilityData)
serializedReachabilityData, err := cursor.Value()
if err != nil {
return err
}
err = store.deserializeReachabilityData(serializedReachabilityData, reachabilityData)
if err != nil {
return err
}
@@ -162,15 +181,15 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C
return nil
}
// dbStoreReachabilityData stores the reachability data to the database.
// storeReachabilityData stores the reachability data to the database.
// This overwrites the current entry if there exists one.
func (store *reachabilityStore) dbStoreReachabilityData(dbTx database.Tx, hash *daghash.Hash, reachabilityData *reachabilityData) error {
func (store *reachabilityStore) storeReachabilityData(dbContext dbaccess.Context, hash *daghash.Hash, reachabilityData *reachabilityData) error {
serializedReachabilyData, err := store.serializeReachabilityData(reachabilityData)
if err != nil {
return err
}
return dbTx.Metadata().Bucket(reachabilityDataBucketName).Put(hash[:], serializedReachabilyData)
return dbaccess.StoreReachabilityData(dbContext, hash, serializedReachabilyData)
}
func (store *reachabilityStore) serializeReachabilityData(reachabilityData *reachabilityData) ([]byte, error) {

View File

@@ -4,31 +4,20 @@ import (
"bytes"
"encoding/binary"
"fmt"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
)
// SubnetworkStore stores the subnetworks data
type SubnetworkStore struct {
db database.DB
}
func newSubnetworkStore(db database.DB) *SubnetworkStore {
return &SubnetworkStore{
db: db,
}
}
// registerSubnetworks scans a list of transactions, singles out
// subnetwork registry transactions, validates them, and registers a new
// subnetwork based on it.
// This function returns an error if one or more transactions are invalid
func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
func registerSubnetworks(dbContext dbaccess.Context, txs []*util.Tx) error {
subnetworkRegistryTxs := make([]*wire.MsgTx, 0)
for _, tx := range txs {
msgTx := tx.MsgTx()
@@ -50,13 +39,13 @@ func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
if err != nil {
return err
}
sNet, err := dbGetSubnetwork(dbTx, subnetworkID)
exists, err := dbaccess.HasSubnetwork(dbContext, subnetworkID)
if err != nil {
return err
}
if sNet == nil {
if !exists {
createdSubnetwork := newSubnetwork(registryTx)
err := dbRegisterSubnetwork(dbTx, subnetworkID, createdSubnetwork)
err := registerSubnetwork(dbContext, subnetworkID, createdSubnetwork)
if err != nil {
return errors.Errorf("failed registering subnetwork"+
"for tx '%s': %s", registryTx.TxHash(), err)
@@ -85,66 +74,39 @@ func TxToSubnetworkID(tx *wire.MsgTx) (*subnetworkid.SubnetworkID, error) {
return subnetworkid.New(util.Hash160(txHash[:]))
}
// subnetwork returns a registered subnetwork. If the subnetwork does not exist
// this method returns an error.
func (s *SubnetworkStore) subnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
var sNet *subnetwork
var err error
dbErr := s.db.View(func(dbTx database.Tx) error {
sNet, err = dbGetSubnetwork(dbTx, subnetworkID)
return nil
})
if dbErr != nil {
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, dbErr)
}
// fetchSubnetwork returns a registered subnetwork.
func fetchSubnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
serializedSubnetwork, err := dbaccess.FetchSubnetworkData(dbaccess.NoTx(), subnetworkID)
if err != nil {
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, err)
return nil, err
}
return sNet, nil
subnet, err := deserializeSubnetwork(serializedSubnetwork)
if err != nil {
return nil, err
}
return subnet, nil
}
// GasLimit returns the gas limit of a registered subnetwork. If the subnetwork does not
// exist this method returns an error.
func (s *SubnetworkStore) GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
sNet, err := s.subnetwork(subnetworkID)
func GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
sNet, err := fetchSubnetwork(subnetworkID)
if err != nil {
return 0, err
}
if sNet == nil {
return 0, errors.Errorf("subnetwork '%s' not found", subnetworkID)
}
return sNet.gasLimit, nil
}
// dbRegisterSubnetwork stores mappings from ID of the subnetwork to the subnetwork data.
func dbRegisterSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
// Serialize the subnetwork
func registerSubnetwork(dbContext dbaccess.Context, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
serializedSubnetwork, err := serializeSubnetwork(network)
if err != nil {
return errors.Errorf("failed to serialize sub-netowrk '%s': %s", subnetworkID, err)
}
// Store the subnetwork
subnetworksBucket := dbTx.Metadata().Bucket(subnetworksBucketName)
err = subnetworksBucket.Put(subnetworkID[:], serializedSubnetwork)
if err != nil {
return errors.Errorf("failed to write sub-netowrk '%s': %s", subnetworkID, err)
}
return nil
}
// dbGetSubnetwork returns the subnetwork associated with subnetworkID or nil if the subnetwork was not found.
func dbGetSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
bucket := dbTx.Metadata().Bucket(subnetworksBucketName)
serializedSubnetwork := bucket.Get(subnetworkID[:])
if serializedSubnetwork == nil {
return nil, nil
}
return deserializeSubnetwork(serializedSubnetwork)
return dbaccess.StoreSubnetwork(dbContext, subnetworkID, serializedSubnetwork)
}
type subnetwork struct {

57
blockdag/sync_rate.go Normal file
View File

@@ -0,0 +1,57 @@
package blockdag
import "time"
const syncRateWindowDuration = 15 * time.Minute
// addBlockProcessingTimestamp adds the last block processing timestamp in order to measure the recent sync rate.
//
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) addBlockProcessingTimestamp() {
now := time.Now()
dag.recentBlockProcessingTimestamps = append(dag.recentBlockProcessingTimestamps, now)
dag.removeNonRecentTimestampsFromRecentBlockProcessingTimestamps()
}
// removeNonRecentTimestampsFromRecentBlockProcessingTimestamps removes timestamps older than syncRateWindowDuration
// from dag.recentBlockProcessingTimestamps
//
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) removeNonRecentTimestampsFromRecentBlockProcessingTimestamps() {
dag.recentBlockProcessingTimestamps = dag.recentBlockProcessingTimestampsRelevantWindow()
}
func (dag *BlockDAG) recentBlockProcessingTimestampsRelevantWindow() []time.Time {
minTime := time.Now().Add(-syncRateWindowDuration)
windowStartIndex := len(dag.recentBlockProcessingTimestamps)
for i, processTime := range dag.recentBlockProcessingTimestamps {
if processTime.After(minTime) {
windowStartIndex = i
break
}
}
return dag.recentBlockProcessingTimestamps[windowStartIndex:]
}
// syncRate returns the rate of processed
// blocks in the last syncRateWindowDuration
// duration.
func (dag *BlockDAG) syncRate() float64 {
dag.RLock()
defer dag.RUnlock()
return float64(len(dag.recentBlockProcessingTimestampsRelevantWindow())) / syncRateWindowDuration.Seconds()
}
// IsSyncRateBelowThreshold checks whether the sync rate
// is below the expected threshold.
func (dag *BlockDAG) IsSyncRateBelowThreshold(maxDeviation float64) bool {
if dag.uptime() < syncRateWindowDuration {
return false
}
return dag.syncRate() < 1/dag.dagParams.TargetTimePerBlock.Seconds()*maxDeviation
}
func (dag *BlockDAG) uptime() time.Duration {
return time.Now().Sub(dag.startTime)
}

View File

@@ -5,47 +5,25 @@ package blockdag
import (
"compress/bzip2"
"encoding/binary"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"testing"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb" // blank import ffldb so that its init() function runs before tests
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const (
// testDbType is the database backend type to use for the tests.
testDbType = "ffldb"
// testDbRoot is the root directory used to create all test databases.
testDbRoot = "testdbs"
// blockDataNet is the expected network in the test block data.
blockDataNet = wire.Mainnet
)
// isSupportedDbType returns whether or not the passed database type is
// currently supported.
func isSupportedDbType(dbType string) bool {
supportedDrivers := database.SupportedDrivers()
for _, driver := range supportedDrivers {
if dbType == driver {
return true
}
}
return false
}
// FileExists returns whether or not the named file or directory exists.
func FileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
@@ -59,11 +37,10 @@ func FileExists(name string) bool {
// DAGSetup is used to create a new db and DAG instance with the genesis
// block already inserted. In addition to the new DAG instance, it returns
// a teardown function the caller should invoke when done testing to clean up.
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
if !isSupportedDbType(testDbType) {
return nil, nil, errors.Errorf("unsupported db type %s", testDbType)
}
// The openDB parameter instructs DAGSetup whether or not to also open the
// database. Setting it to false is useful in tests that handle database
// opening/closing by themselves.
func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), error) {
var teardown func()
// To make sure that the teardown function is not called before any goroutines finished to run -
@@ -78,20 +55,16 @@ func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
})
}
if config.DB == nil {
// Create the root directory for test databases.
if !FileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
err := errors.Errorf("unable to create test db "+
"root: %s", err)
return nil, nil, err
}
if openDb {
var err error
tmpDir, err := ioutil.TempDir("", "DAGSetup")
if err != nil {
return nil, nil, errors.Errorf("error creating temp dir: %s", err)
}
dbPath := filepath.Join(testDbRoot, dbName)
dbPath := filepath.Join(tmpDir, dbName)
_ = os.RemoveAll(dbPath)
var err error
config.DB, err = database.Create(testDbType, dbPath, blockDataNet)
err = dbaccess.Open(dbPath)
if err != nil {
return nil, nil, errors.Errorf("error creating db: %s", err)
}
@@ -101,19 +74,17 @@ func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
teardown = func() {
spawnWaitGroup.Wait()
spawn = realSpawn
config.DB.Close()
dbaccess.Close()
os.RemoveAll(dbPath)
os.RemoveAll(testDbRoot)
}
} else {
teardown = func() {
spawnWaitGroup.Wait()
spawn = realSpawn
config.DB.Close()
}
}
config.TimeSource = NewMedianTime()
config.TimeSource = NewTimeSource()
config.SigCache = txscript.NewSigCache(1000)
// Create the DAG instance.
@@ -173,7 +144,7 @@ func SetVirtualForTest(dag *BlockDAG, virtual VirtualForTest) VirtualForTest {
// GetVirtualFromParentsForTest generates a virtual block with the given parents.
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (VirtualForTest, error) {
parents := newSet()
parents := newBlockSet()
for _, hash := range parentHashes {
parent := dag.index.LookupNode(hash)
if parent == nil {
@@ -183,7 +154,7 @@ func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (
}
virtual := newVirtualBlock(dag, parents)
pastUTXO, _, err := dag.pastUTXO(&virtual.blockNode)
pastUTXO, _, _, err := dag.pastUTXO(&virtual.blockNode)
if err != nil {
return nil, err
}
@@ -288,6 +259,17 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
blockTransactions[i+1] = util.NewTx(tx)
}
// Sort transactions by subnetwork ID
sort.Slice(blockTransactions, func(i, j int) bool {
if blockTransactions[i].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
return true
}
if blockTransactions[j].MsgTx().SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase) {
return false
}
return subnetworkid.Less(&blockTransactions[i].MsgTx().SubnetworkID, &blockTransactions[j].MsgTx().SubnetworkID)
})
block, err := dag.BlockForMining(blockTransactions)
if err != nil {
return nil, err
@@ -298,6 +280,28 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
return block, nil
}
// PrepareAndProcessBlockForTest prepares a block that points to the given parent
// hashes and process it.
func PrepareAndProcessBlockForTest(t *testing.T, dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*wire.MsgTx) *wire.MsgBlock {
daghash.Sort(parentHashes)
block, err := PrepareBlockForTest(dag, parentHashes, transactions)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
utilBlock := util.NewBlock(block)
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
if err != nil {
t.Fatalf("unexpected error in ProcessBlock: %s", err)
}
if isDelayed {
t.Fatalf("block is too far in the future")
}
if isOrphan {
t.Fatalf("block was unexpectedly orphan")
}
return block
}
// generateDeterministicExtraNonceForTest returns a unique deterministic extra nonce for coinbase data, in order to create unique coinbase transactions.
func generateDeterministicExtraNonceForTest() uint64 {
extraNonceForTest++

View File

@@ -1,14 +0,0 @@
package blockdag
import (
"testing"
)
func TestIsSupportedDbType(t *testing.T) {
if !isSupportedDbType("ffldb") {
t.Errorf("ffldb should be a supported DB driver")
}
if isSupportedDbType("madeUpDb") {
t.Errorf("madeUpDb should not be a supported DB driver")
}
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -8,6 +8,7 @@ import (
"fmt"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
// ThresholdState define the various threshold states used when voting on
@@ -177,9 +178,9 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
var ok bool
state, ok = cache.Lookup(prevNode.hash)
if !ok {
return ThresholdFailed, AssertError(fmt.Sprintf(
return ThresholdFailed, errors.Errorf(
"thresholdState: cache lookup failed for %s",
prevNode.hash))
prevNode.hash)
}
}
@@ -230,9 +231,6 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
if condition {
count++
}
// Get the previous block node.
current = current.selectedParent
}
// The state is locked in if the number of blocks in the
@@ -267,8 +265,8 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
// This function is safe for concurrent access.
func (dag *BlockDAG) ThresholdState(deploymentID uint32) (ThresholdState, error) {
dag.dagLock.Lock()
defer dag.dagLock.Unlock()
state, err := dag.deploymentState(dag.selectedTip(), deploymentID)
dag.dagLock.Unlock()
return state, err
}
@@ -279,8 +277,8 @@ func (dag *BlockDAG) ThresholdState(deploymentID uint32) (ThresholdState, error)
// This function is safe for concurrent access.
func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
dag.dagLock.Lock()
defer dag.dagLock.Unlock()
state, err := dag.deploymentState(dag.selectedTip(), deploymentID)
dag.dagLock.Unlock()
if err != nil {
return false, err
}
@@ -300,7 +298,7 @@ func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) {
if deploymentID > uint32(len(dag.dagParams.Deployments)) {
return ThresholdFailed, DeploymentError(deploymentID)
return ThresholdFailed, errors.Errorf("deployment ID %d does not exist", deploymentID)
}
deployment := &dag.dagParams.Deployments[deploymentID]

25
blockdag/timesource.go Normal file
View File

@@ -0,0 +1,25 @@
package blockdag
import (
"time"
)
// TimeSource is the interface to access time.
type TimeSource interface {
// Now returns the current time.
Now() time.Time
}
// timeSource provides an implementation of the TimeSource interface
// that simply returns the current local time.
type timeSource struct{}
// Now returns the current local time, with one second precision.
func (m *timeSource) Now() time.Time {
return time.Unix(time.Now().Unix(), 0)
}
// NewTimeSource returns a new instance of a TimeSource
func NewTimeSource() TimeSource {
return &timeSource{}
}

View File

@@ -2,31 +2,26 @@ package blockdag
import (
"bytes"
"github.com/golang/groupcache/lru"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/wire"
)
const ecmhCacheSize = 4_000_000
var (
utxoToECMHCache = lru.New(ecmhCacheSize)
)
func utxoMultiset(entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
func addUTXOToMultiset(ms *secp256k1.MultiSet, entry *UTXOEntry, outpoint *wire.Outpoint) (*secp256k1.MultiSet, error) {
w := &bytes.Buffer{}
err := serializeUTXO(w, entry, outpoint)
if err != nil {
return nil, err
}
serializedUTXO := w.Bytes()
utxoHash := daghash.DoubleHashH(serializedUTXO)
if cachedMSPoint, ok := utxoToECMHCache.Get(utxoHash); ok {
return cachedMSPoint.(*ecc.Multiset), nil
}
msPoint := ecc.NewMultiset(ecc.S256()).Add(serializedUTXO)
utxoToECMHCache.Add(utxoHash, msPoint)
return msPoint, nil
ms.Add(w.Bytes())
return ms, nil
}
func removeUTXOFromMultiset(ms *secp256k1.MultiSet, entry *UTXOEntry, outpoint *wire.Outpoint) (*secp256k1.MultiSet, error) {
w := &bytes.Buffer{}
err := serializeUTXO(w, entry, outpoint)
if err != nil {
return nil, err
}
ms.Remove(w.Bytes())
return ms, nil
}

View File

@@ -2,14 +2,11 @@ package blockdag
import (
"bytes"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/locks"
"github.com/pkg/errors"
)
var multisetPointSize = 32
type blockUTXODiffData struct {
diff *UTXODiff
diffChild *blockNode
@@ -17,16 +14,16 @@ type blockUTXODiffData struct {
type utxoDiffStore struct {
dag *BlockDAG
dirty map[daghash.Hash]struct{}
loaded map[daghash.Hash]*blockUTXODiffData
dirty map[*blockNode]struct{}
loaded map[*blockNode]*blockUTXODiffData
mtx *locks.PriorityMutex
}
func newUTXODiffStore(dag *BlockDAG) *utxoDiffStore {
return &utxoDiffStore{
dag: dag,
dirty: make(map[daghash.Hash]struct{}),
loaded: make(map[daghash.Hash]*blockUTXODiffData),
dirty: make(map[*blockNode]struct{}),
loaded: make(map[*blockNode]*blockUTXODiffData),
mtx: locks.NewPriorityMutex(),
}
}
@@ -35,16 +32,15 @@ func (diffStore *utxoDiffStore) setBlockDiff(node *blockNode, diff *UTXODiff) er
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
// load the diff data from DB to diffStore.loaded
_, exists, err := diffStore.diffDataByHash(node.hash)
if err != nil {
_, err := diffStore.diffDataByBlockNode(node)
if dbaccess.IsNotFoundError(err) {
diffStore.loaded[node] = &blockUTXODiffData{}
} else if err != nil {
return err
}
if !exists {
diffStore.loaded[*node.hash] = &blockUTXODiffData{}
}
diffStore.loaded[*node.hash].diff = diff
diffStore.setBlockAsDirty(node.hash)
diffStore.loaded[node].diff = diff
diffStore.setBlockAsDirty(node)
return nil
}
@@ -52,22 +48,19 @@ func (diffStore *utxoDiffStore) setBlockDiffChild(node *blockNode, diffChild *bl
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
// load the diff data from DB to diffStore.loaded
_, exists, err := diffStore.diffDataByHash(node.hash)
_, err := diffStore.diffDataByBlockNode(node)
if err != nil {
return err
}
if !exists {
return diffNotFoundError(node)
}
diffStore.loaded[*node.hash].diffChild = diffChild
diffStore.setBlockAsDirty(node.hash)
diffStore.loaded[node].diffChild = diffChild
diffStore.setBlockAsDirty(node)
return nil
}
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbTx database.Tx, blockHashes []*daghash.Hash) error {
for _, hash := range blockHashes {
err := diffStore.removeBlockDiffData(dbTx, hash)
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbContext dbaccess.Context, nodes []*blockNode) error {
for _, node := range nodes {
err := diffStore.removeBlockDiffData(dbContext, node)
if err != nil {
return err
}
@@ -75,87 +68,64 @@ func (diffStore *utxoDiffStore) removeBlocksDiffData(dbTx database.Tx, blockHash
return nil
}
func (diffStore *utxoDiffStore) removeBlockDiffData(dbTx database.Tx, blockHash *daghash.Hash) error {
func (diffStore *utxoDiffStore) removeBlockDiffData(dbContext dbaccess.Context, node *blockNode) error {
diffStore.mtx.LowPriorityWriteLock()
defer diffStore.mtx.LowPriorityWriteUnlock()
delete(diffStore.loaded, *blockHash)
err := dbRemoveDiffData(dbTx, blockHash)
delete(diffStore.loaded, node)
err := dbaccess.RemoveDiffData(dbContext, node.hash)
if err != nil {
return err
}
return nil
}
func (diffStore *utxoDiffStore) setBlockAsDirty(blockHash *daghash.Hash) {
diffStore.dirty[*blockHash] = struct{}{}
func (diffStore *utxoDiffStore) setBlockAsDirty(node *blockNode) {
diffStore.dirty[node] = struct{}{}
}
func (diffStore *utxoDiffStore) diffDataByHash(hash *daghash.Hash) (*blockUTXODiffData, bool, error) {
if diffData, ok := diffStore.loaded[*hash]; ok {
return diffData, true, nil
func (diffStore *utxoDiffStore) diffDataByBlockNode(node *blockNode) (*blockUTXODiffData, error) {
if diffData, ok := diffStore.loaded[node]; ok {
return diffData, nil
}
diffData, err := diffStore.diffDataFromDB(hash)
diffData, err := diffStore.diffDataFromDB(node.hash)
if err != nil {
return nil, false, err
return nil, err
}
exists := diffData != nil
if exists {
diffStore.loaded[*hash] = diffData
}
return diffData, exists, nil
}
func diffNotFoundError(node *blockNode) error {
return errors.Errorf("Couldn't find diff data for block %s", node.hash)
diffStore.loaded[node] = diffData
return diffData, nil
}
func (diffStore *utxoDiffStore) diffByNode(node *blockNode) (*UTXODiff, error) {
diffStore.mtx.HighPriorityReadLock()
defer diffStore.mtx.HighPriorityReadUnlock()
diffData, exists, err := diffStore.diffDataByHash(node.hash)
diffData, err := diffStore.diffDataByBlockNode(node)
if err != nil {
return nil, err
}
if !exists {
return nil, diffNotFoundError(node)
}
return diffData.diff, nil
}
func (diffStore *utxoDiffStore) diffChildByNode(node *blockNode) (*blockNode, error) {
diffStore.mtx.HighPriorityReadLock()
defer diffStore.mtx.HighPriorityReadUnlock()
diffData, exists, err := diffStore.diffDataByHash(node.hash)
diffData, err := diffStore.diffDataByBlockNode(node)
if err != nil {
return nil, err
}
if !exists {
return nil, diffNotFoundError(node)
}
return diffData.diffChild, nil
}
func (diffStore *utxoDiffStore) diffDataFromDB(hash *daghash.Hash) (*blockUTXODiffData, error) {
var diffData *blockUTXODiffData
err := diffStore.dag.db.View(func(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(utxoDiffsBucketName)
serializedBlockDiffData := bucket.Get(hash[:])
if serializedBlockDiffData != nil {
var err error
diffData, err = diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
return err
}
return nil
})
serializedBlockDiffData, err := dbaccess.FetchUTXODiffData(dbaccess.NoTx(), hash)
if err != nil {
return nil, err
}
return diffData, nil
return diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
}
// flushToDB writes all dirty diff data to the database. If all writes
// succeed, this clears the dirty set.
func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
// flushToDB writes all dirty diff data to the database.
func (diffStore *utxoDiffStore) flushToDB(dbContext *dbaccess.TxContext) error {
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
if len(diffStore.dirty) == 0 {
@@ -165,10 +135,10 @@ func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
// Allocate a buffer here to avoid needless allocations/grows
// while writing each entry.
buffer := &bytes.Buffer{}
for hash := range diffStore.dirty {
for node := range diffStore.dirty {
buffer.Reset()
diffData := diffStore.loaded[hash]
err := dbStoreDiffData(dbTx, buffer, &hash, diffData)
diffData := diffStore.loaded[node]
err := storeDiffData(dbContext, buffer, node.hash, diffData)
if err != nil {
return err
}
@@ -177,31 +147,46 @@ func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
}
func (diffStore *utxoDiffStore) clearDirtyEntries() {
diffStore.dirty = make(map[daghash.Hash]struct{})
diffStore.dirty = make(map[*blockNode]struct{})
}
// dbStoreDiffData stores the UTXO diff data to the database.
// maxBlueScoreDifferenceToKeepLoaded is the maximum difference
// between the virtual's blueScore and a blockNode's blueScore
// under which to keep diff data loaded in memory.
var maxBlueScoreDifferenceToKeepLoaded uint64 = 100
// clearOldEntries removes entries whose blue score is lower than
// virtual.blueScore - maxBlueScoreDifferenceToKeepLoaded.
func (diffStore *utxoDiffStore) clearOldEntries() {
virtualBlueScore := diffStore.dag.VirtualBlueScore()
minBlueScore := virtualBlueScore - maxBlueScoreDifferenceToKeepLoaded
if maxBlueScoreDifferenceToKeepLoaded > virtualBlueScore {
minBlueScore = 0
}
toRemove := make(map[*blockNode]struct{})
for node := range diffStore.loaded {
if node.blueScore < minBlueScore {
toRemove[node] = struct{}{}
}
}
for node := range toRemove {
delete(diffStore.loaded, node)
}
}
// storeDiffData stores the UTXO diff data to the database.
// This overwrites the current entry if there exists one.
func dbStoreDiffData(dbTx database.Tx, writeBuffer *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error {
// To avoid a ton of allocs, use the given writeBuffer
func storeDiffData(dbContext dbaccess.Context, w *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error {
// To avoid a ton of allocs, use the io.Writer
// instead of allocating one. We expect the buffer to
// already be initalized and, in most cases, to already
// already be initialized and, in most cases, to already
// be large enough to accommodate the serialized data
// without growing.
err := serializeBlockUTXODiffData(writeBuffer, diffData)
err := serializeBlockUTXODiffData(w, diffData)
if err != nil {
return err
}
// Bucket.Put doesn't copy on its own, so we manually
// copy here. We do so because we expect the buffer
// to be reused once we're done with it.
serializedDiffData := make([]byte, writeBuffer.Len())
copy(serializedDiffData, writeBuffer.Bytes())
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Put(hash[:], serializedDiffData)
}
func dbRemoveDiffData(dbTx database.Tx, hash *daghash.Hash) error {
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Delete(hash[:])
return dbaccess.StoreUTXODiffData(dbContext, hash, w.Bytes())
}

View File

@@ -1,9 +1,8 @@
package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"reflect"
@@ -12,7 +11,7 @@ import (
func TestUTXODiffStore(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", Config{
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -31,9 +30,12 @@ func TestUTXODiffStore(t *testing.T) {
// Check that an error is returned when asking for non existing node
nonExistingNode := createNode()
_, err = dag.utxoDiffStore.diffByNode(nonExistingNode)
expectedErrString := fmt.Sprintf("Couldn't find diff data for block %s", nonExistingNode.hash)
if err == nil || err.Error() != expectedErrString {
t.Errorf("diffByNode: expected error %s but got %s", expectedErrString, err)
if !dbaccess.IsNotFoundError(err) {
if err != nil {
t.Errorf("diffByNode: %s", err)
} else {
t.Errorf("diffByNode: unexpectedly found diff data")
}
}
// Add node's diff data to the utxoDiffStore and check if it's checked correctly.
@@ -63,13 +65,20 @@ func TestUTXODiffStore(t *testing.T) {
// Flush changes to db, delete them from the dag.utxoDiffStore.loaded
// map, and check if the diff data is re-fetched from the database.
err = dag.db.Update(func(dbTx database.Tx) error {
return dag.utxoDiffStore.flushToDB(dbTx)
})
dbTx, err := dbaccess.NewTx()
if err != nil {
t.Fatalf("Failed to open database transaction: %s", err)
}
defer dbTx.RollbackUnlessClosed()
err = dag.utxoDiffStore.flushToDB(dbTx)
if err != nil {
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
}
delete(dag.utxoDiffStore.loaded, *node.hash)
err = dbTx.Commit()
if err != nil {
t.Fatalf("Failed to commit database transaction: %s", err)
}
delete(dag.utxoDiffStore.loaded, node)
if storeDiff, err := dag.utxoDiffStore.diffByNode(node); err != nil {
t.Fatalf("diffByNode: unexpected error: %s", err)
@@ -78,9 +87,79 @@ func TestUTXODiffStore(t *testing.T) {
}
// Check if getBlockDiff caches the result in dag.utxoDiffStore.loaded
if loadedDiffData, ok := dag.utxoDiffStore.loaded[*node.hash]; !ok {
if loadedDiffData, ok := dag.utxoDiffStore.loaded[node]; !ok {
t.Errorf("the diff data wasn't added to loaded map after requesting it")
} else if !reflect.DeepEqual(loadedDiffData.diff, diff) {
t.Errorf("Expected diff and loadedDiff to be equal")
}
}
func TestClearOldEntries(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestClearOldEntries", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("TestClearOldEntries: Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
// Set maxBlueScoreDifferenceToKeepLoaded to 10 to make this test fast to run
currentDifference := maxBlueScoreDifferenceToKeepLoaded
maxBlueScoreDifferenceToKeepLoaded = 10
defer func() { maxBlueScoreDifferenceToKeepLoaded = currentDifference }()
// Add 10 blocks
blockNodes := make([]*blockNode, 10)
for i := 0; i < 10; i++ {
processedBlock := PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil)
node := dag.index.LookupNode(processedBlock.BlockHash())
if node == nil {
t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash())
}
blockNodes[i] = node
}
// Make sure that all of them exist in the loaded set
for _, node := range blockNodes {
_, ok := dag.utxoDiffStore.loaded[node]
if !ok {
t.Fatalf("TestClearOldEntries: diffData for node %s is not in the loaded set", node.hash)
}
}
// Add 10 more blocks on top of the others
for i := 0; i < 10; i++ {
PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil)
}
// Make sure that all the old nodes no longer exist in the loaded set
for _, node := range blockNodes {
_, ok := dag.utxoDiffStore.loaded[node]
if ok {
t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash)
}
}
// Add a block on top of the genesis to force the retrieval of all diffData
processedBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
node := dag.index.LookupNode(processedBlock.BlockHash())
if node == nil {
t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash())
}
// Make sure that the child-of-genesis node isn't in the loaded set
_, ok := dag.utxoDiffStore.loaded[node]
if ok {
t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash)
}
// Make sure that all the old nodes still do not exist in the loaded set
for _, node := range blockNodes {
_, ok := dag.utxoDiffStore.loaded[node]
if ok {
t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash)
}
}
}

View File

@@ -2,15 +2,11 @@ package blockdag
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
"io"
"math/big"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"io"
)
// serializeBlockUTXODiffData serializes diff data in the following format:
@@ -40,54 +36,26 @@ func serializeBlockUTXODiffData(w io.Writer, diffData *blockUTXODiffData) error
return nil
}
// utxoEntryHeaderCode returns the calculated header code to be used when
// serializing the provided utxo entry.
func utxoEntryHeaderCode(entry *UTXOEntry) uint64 {
// As described in the serialization format comments, the header code
// encodes the blue score shifted over one bit and the block reward flag
// in the lowest bit.
headerCode := uint64(entry.BlockBlueScore()) << 1
if entry.IsCoinbase() {
headerCode |= 0x01
}
return headerCode
}
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffDataBytes []byte) (*blockUTXODiffData, error) {
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffData []byte) (*blockUTXODiffData, error) {
diffData := &blockUTXODiffData{}
serializedDiffData := bytes.NewBuffer(serializedDiffDataBytes)
r := bytes.NewBuffer(serializedDiffData)
var hasDiffChild bool
err := wire.ReadElement(serializedDiffData, &hasDiffChild)
err := wire.ReadElement(r, &hasDiffChild)
if err != nil {
return nil, err
}
if hasDiffChild {
hash := &daghash.Hash{}
err := wire.ReadElement(serializedDiffData, hash)
err := wire.ReadElement(r, hash)
if err != nil {
return nil, err
}
diffData.diffChild = diffStore.dag.index.LookupNode(hash)
}
diffData.diff = &UTXODiff{
useMultiset: true,
}
diffData.diff.toAdd, err = deserializeDiffEntries(serializedDiffData)
if err != nil {
return nil, err
}
diffData.diff.toRemove, err = deserializeDiffEntries(serializedDiffData)
if err != nil {
return nil, err
}
diffData.diff.diffMultiset, err = deserializeMultiset(serializedDiffData)
diffData.diff, err = deserializeUTXODiff(r)
if err != nil {
return nil, err
}
@@ -95,38 +63,31 @@ func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffDataB
return diffData, nil
}
func deserializeDiffEntries(r io.Reader) (utxoCollection, error) {
func deserializeUTXODiff(r io.Reader) (*UTXODiff, error) {
diff := &UTXODiff{}
var err error
diff.toAdd, err = deserializeUTXOCollection(r)
if err != nil {
return nil, err
}
diff.toRemove, err = deserializeUTXOCollection(r)
if err != nil {
return nil, err
}
return diff, nil
}
func deserializeUTXOCollection(r io.Reader) (utxoCollection, error) {
count, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
collection := utxoCollection{}
for i := uint64(0); i < count; i++ {
outpointSize, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
serializedOutpoint := make([]byte, outpointSize)
err = binary.Read(r, byteOrder, serializedOutpoint)
if err != nil {
return nil, err
}
outpoint, err := deserializeOutpoint(serializedOutpoint)
if err != nil {
return nil, err
}
utxoEntrySize, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
serializedEntry := make([]byte, utxoEntrySize)
err = binary.Read(r, byteOrder, serializedEntry)
if err != nil {
return nil, err
}
utxoEntry, err := deserializeUTXOEntry(serializedEntry)
utxoEntry, outpoint, err := deserializeUTXO(r)
if err != nil {
return nil, err
}
@@ -135,31 +96,22 @@ func deserializeDiffEntries(r io.Reader) (utxoCollection, error) {
return collection, nil
}
// deserializeMultiset deserializes an EMCH multiset.
// See serializeMultiset for more details.
func deserializeMultiset(r io.Reader) (*ecc.Multiset, error) {
xBytes := make([]byte, multisetPointSize)
yBytes := make([]byte, multisetPointSize)
err := binary.Read(r, byteOrder, xBytes)
func deserializeUTXO(r io.Reader) (*UTXOEntry, *wire.Outpoint, error) {
outpoint, err := deserializeOutpoint(r)
if err != nil {
return nil, err
return nil, nil, err
}
err = binary.Read(r, byteOrder, yBytes)
utxoEntry, err := deserializeUTXOEntry(r)
if err != nil {
return nil, err
return nil, nil, err
}
var x, y big.Int
x.SetBytes(xBytes)
y.SetBytes(yBytes)
return ecc.NewMultisetFromPoint(ecc.S256(), &x, &y), nil
return utxoEntry, outpoint, nil
}
// serializeUTXODiff serializes UTXODiff by serializing
// UTXODiff.toAdd, UTXODiff.toRemove and UTXODiff.Multiset one after the other.
func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
if !diff.useMultiset {
return errors.New("Cannot serialize a UTXO diff without a multiset")
}
err := serializeUTXOCollection(w, diff.toAdd)
if err != nil {
return err
@@ -169,10 +121,7 @@ func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
if err != nil {
return err
}
err = serializeMultiset(w, diff.diffMultiset)
if err != nil {
return err
}
return nil
}
@@ -193,120 +142,93 @@ func serializeUTXOCollection(w io.Writer, collection utxoCollection) error {
return nil
}
// serializeMultiset serializes an ECMH multiset. The serialization
// is done by taking the (x,y) coordinnates of the multiset point and
// padding each one of them with 32 byte (it'll be 32 byte in most
// cases anyway except one of the coordinates is zero) and writing
// them one after the other.
func serializeMultiset(w io.Writer, ms *ecc.Multiset) error {
x, y := ms.Point()
xBytes := make([]byte, multisetPointSize)
copy(xBytes, x.Bytes())
yBytes := make([]byte, multisetPointSize)
copy(yBytes, y.Bytes())
err := binary.Write(w, byteOrder, xBytes)
if err != nil {
return err
}
err = binary.Write(w, byteOrder, yBytes)
if err != nil {
return err
}
return nil
}
// serializeUTXO serializes a utxo entry-outpoint pair
func serializeUTXO(w io.Writer, entry *UTXOEntry, outpoint *wire.Outpoint) error {
serializedOutpoint := *outpointKey(*outpoint)
err := wire.WriteVarInt(w, uint64(len(serializedOutpoint)))
err := serializeOutpoint(w, outpoint)
if err != nil {
return err
}
err = binary.Write(w, byteOrder, serializedOutpoint)
if err != nil {
return err
}
serializedUTXOEntry := serializeUTXOEntry(entry)
err = wire.WriteVarInt(w, uint64(len(serializedUTXOEntry)))
if err != nil {
return err
}
err = binary.Write(w, byteOrder, serializedUTXOEntry)
err = serializeUTXOEntry(w, entry)
if err != nil {
return err
}
return nil
}
// serializeUTXOEntry returns the entry serialized to a format that is suitable
// for long-term storage. The format is described in detail above.
func serializeUTXOEntry(entry *UTXOEntry) []byte {
// Encode the header code.
headerCode := utxoEntryHeaderCode(entry)
// p2pkhUTXOEntrySerializeSize is the serialized size for a P2PKH UTXO entry.
// 8 bytes (header code) + 8 bytes (amount) + varint for script pub key length of 25 (for P2PKH) + 25 bytes for P2PKH script.
var p2pkhUTXOEntrySerializeSize = 8 + 8 + wire.VarIntSerializeSize(25) + 25
// Calculate the size needed to serialize the entry.
size := serializeSizeVLQ(headerCode) +
compressedTxOutSize(uint64(entry.Amount()), entry.ScriptPubKey())
// Serialize the header code followed by the compressed unspent
// transaction output.
serialized := make([]byte, size)
offset := putVLQ(serialized, headerCode)
offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()),
entry.ScriptPubKey())
return serialized
}
// deserializeOutpoint decodes an outpoint from the passed serialized byte
// slice into a new wire.Outpoint using a format that is suitable for long-
// term storage. this format is described in detail above.
func deserializeOutpoint(serialized []byte) (*wire.Outpoint, error) {
if len(serialized) <= daghash.HashSize {
return nil, errDeserialize("unexpected end of data")
}
txID := daghash.TxID{}
txID.SetBytes(serialized[:daghash.HashSize])
index, _ := deserializeVLQ(serialized[daghash.HashSize:])
return wire.NewOutpoint(&txID, uint32(index)), nil
}
// deserializeUTXOEntry decodes a UTXO entry from the passed serialized byte
// slice into a new UTXOEntry using a format that is suitable for long-term
// storage. The format is described in detail above.
func deserializeUTXOEntry(serialized []byte) (*UTXOEntry, error) {
// Deserialize the header code.
code, offset := deserializeVLQ(serialized)
if offset >= len(serialized) {
return nil, errDeserialize("unexpected end of data after header")
}
// Decode the header code.
//
// Bit 0 indicates whether the containing transaction is a coinbase.
// Bits 1-x encode blue score of the containing transaction.
isCoinbase := code&0x01 != 0
blockBlueScore := code >> 1
// Decode the compressed unspent transaction output.
amount, scriptPubKey, _, err := decodeCompressedTxOut(serialized[offset:])
// serializeUTXOEntry encodes the entry to the given io.Writer and use compression if useCompression is true.
// The compression format is described in detail above.
func serializeUTXOEntry(w io.Writer, entry *UTXOEntry) error {
// Encode the blueScore.
err := binaryserializer.PutUint64(w, byteOrder, entry.blockBlueScore)
if err != nil {
return nil, errDeserialize(fmt.Sprintf("unable to decode "+
"UTXO: %s", err))
return err
}
// Encode the packedFlags.
err = binaryserializer.PutUint8(w, uint8(entry.packedFlags))
if err != nil {
return err
}
err = binaryserializer.PutUint64(w, byteOrder, entry.Amount())
if err != nil {
return err
}
err = wire.WriteVarInt(w, uint64(len(entry.ScriptPubKey())))
if err != nil {
return err
}
_, err = w.Write(entry.ScriptPubKey())
if err != nil {
return errors.WithStack(err)
}
return nil
}
// deserializeUTXOEntry decodes a UTXO entry from the passed reader
// into a new UTXOEntry. If isCompressed is used it will decompress
// the entry according to the format that is described in detail
// above.
func deserializeUTXOEntry(r io.Reader) (*UTXOEntry, error) {
// Deserialize the blueScore.
blockBlueScore, err := binaryserializer.Uint64(r, byteOrder)
if err != nil {
return nil, err
}
// Decode the packedFlags.
packedFlags, err := binaryserializer.Uint8(r)
if err != nil {
return nil, err
}
entry := &UTXOEntry{
amount: amount,
scriptPubKey: scriptPubKey,
blockBlueScore: blockBlueScore,
packedFlags: 0,
packedFlags: txoFlags(packedFlags),
}
if isCoinbase {
entry.packedFlags |= tfCoinbase
entry.amount, err = binaryserializer.Uint64(r, byteOrder)
if err != nil {
return nil, err
}
scriptPubKeyLen, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
entry.scriptPubKey = make([]byte, scriptPubKeyLen)
_, err = r.Read(entry.scriptPubKey)
if err != nil {
return nil, errors.WithStack(err)
}
return entry, nil

View File

@@ -2,12 +2,13 @@ package blockdag
import (
"fmt"
"github.com/pkg/errors"
"math"
"sort"
"strings"
"github.com/kaspanet/kaspad/ecc"
"github.com/pkg/errors"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/wire"
)
@@ -152,29 +153,16 @@ func (uc utxoCollection) clone() utxoCollection {
// UTXODiff represents a diff between two UTXO Sets.
type UTXODiff struct {
toAdd utxoCollection
toRemove utxoCollection
diffMultiset *ecc.Multiset
useMultiset bool
toAdd utxoCollection
toRemove utxoCollection
}
// NewUTXODiffWithoutMultiset creates a new, empty utxoDiff
// NewUTXODiff creates a new, empty utxoDiff
// without a multiset.
func NewUTXODiffWithoutMultiset() *UTXODiff {
return &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
useMultiset: false,
}
}
// NewUTXODiff creates a new, empty utxoDiff.
func NewUTXODiff() *UTXODiff {
return &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
useMultiset: true,
diffMultiset: ecc.NewMultiset(ecc.S256()),
toAdd: utxoCollection{},
toRemove: utxoCollection{},
}
}
@@ -208,9 +196,8 @@ func NewUTXODiff() *UTXODiff {
// diffFrom results in the UTXO being added to toAdd
func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
result := UTXODiff{
toAdd: make(utxoCollection, len(d.toRemove)+len(other.toAdd)),
toRemove: make(utxoCollection, len(d.toAdd)+len(other.toRemove)),
useMultiset: d.useMultiset,
toAdd: make(utxoCollection, len(d.toRemove)+len(other.toAdd)),
toRemove: make(utxoCollection, len(d.toAdd)+len(other.toRemove)),
}
// Note that the following cases are not accounted for, as they are impossible
@@ -224,6 +211,10 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
for outpoint, utxoEntry := range d.toAdd {
if !other.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toRemove.add(outpoint, utxoEntry)
} else if (d.toRemove.contains(outpoint) && !other.toRemove.contains(outpoint)) ||
(!d.toRemove.contains(outpoint) && other.toRemove.contains(outpoint)) {
return nil, errors.New(
"diffFrom: outpoint both in d.toAdd, other.toAdd, and only one of d.toRemove and other.toRemove")
}
if diffEntry, ok := other.toRemove.get(outpoint); ok {
// An exception is made for entries with unequal blue scores
@@ -243,6 +234,18 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
// If they are not in other.toRemove - should be added in result.toAdd
// If they are in other.toAdd - base utxoSet is not the same
for outpoint, utxoEntry := range d.toRemove {
diffEntry, ok := other.toRemove.get(outpoint)
if ok {
// if have the same entry in d.toRemove - simply don't copy.
// unless existing entry is with different blue score, in this case - this is an error
if utxoEntry.blockBlueScore != diffEntry.blockBlueScore {
return nil, errors.New("diffFrom: outpoint both in d.toRemove and other.toRemove with different " +
"blue scores, with no corresponding entry in d.toAdd")
}
} else { // if no existing entry - add to result.toAdd
result.toAdd.add(outpoint, utxoEntry)
}
if !other.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toAdd.add(outpoint, utxoEntry)
}
@@ -256,7 +259,7 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
other.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore)) {
continue
}
return nil, errors.New("diffFrom: transaction both in d.toRemove and in other.toAdd")
return nil, errors.New("diffFrom: outpoint both in d.toRemove and in other.toAdd")
}
}
@@ -276,124 +279,72 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
}
}
if d.useMultiset {
// Create a new diffMultiset as the subtraction of the two diffs.
result.diffMultiset = other.diffMultiset.Subtract(d.diffMultiset)
}
return &result, nil
}
// WithDiff applies provided diff to this diff, creating a new utxoDiff, that would be the result if
// withDiffInPlace applies provided diff to this diff in-place, that would be the result if
// first d, and than diff were applied to the same base
//
// WithDiff follows a set of rules represented by the following 3 by 3 table:
//
// | | this | |
// ---------+-----------+-----------+-----------+-----------
// | | toAdd | toRemove | None
// ---------+-----------+-----------+-----------+-----------
// other | toAdd | X | - | toAdd
// ---------+-----------+-----------+-----------+-----------
// | toRemove | - | X | toRemove
// ---------+-----------+-----------+-----------+-----------
// | None | toAdd | toRemove | -
//
// Key:
// - Don't add anything to the result
// X Return an error
// toAdd Add the UTXO into the toAdd collection of the result
// toRemove Add the UTXO into the toRemove collection of the result
//
// Examples:
// 1. This diff contains a UTXO in toAdd, and the other diff contains it in toRemove
// WithDiff results in nothing being added
// 2. This diff contains a UTXO in toRemove, and the other diff does not contain it
// WithDiff results in the UTXO being added to toRemove
func (d *UTXODiff) withDiffInPlace(diff *UTXODiff) error {
for outpoint, entryToRemove := range diff.toRemove {
if d.toAdd.containsWithBlueScore(outpoint, entryToRemove.blockBlueScore) {
// If already exists in toAdd with the same blueScore - remove from toAdd
d.toAdd.remove(outpoint)
continue
}
if d.toRemove.contains(outpoint) {
// If already exists - this is an error
return errors.Errorf(
"withDiffInPlace: outpoint %s both in d.toRemove and in diff.toRemove", outpoint)
}
// If not exists neither in toAdd nor in toRemove - add to toRemove
d.toRemove.add(outpoint, entryToRemove)
}
for outpoint, entryToAdd := range diff.toAdd {
if d.toRemove.containsWithBlueScore(outpoint, entryToAdd.blockBlueScore) {
// If already exists in toRemove with the same blueScore - remove from toRemove
if d.toAdd.contains(outpoint) && !diff.toRemove.contains(outpoint) {
return errors.Errorf(
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd with no "+
"corresponding entry in diff.toRemove", outpoint)
}
d.toRemove.remove(outpoint)
continue
}
if existingEntry, ok := d.toAdd.get(outpoint); ok &&
(existingEntry.blockBlueScore == entryToAdd.blockBlueScore ||
!diff.toRemove.containsWithBlueScore(outpoint, existingEntry.blockBlueScore)) {
// If already exists - this is an error
return errors.Errorf(
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd", outpoint)
}
// If not exists neither in toAdd nor in toRemove, or exists in toRemove with different blueScore - add to toAdd
d.toAdd.add(outpoint, entryToAdd)
}
return nil
}
// WithDiff applies provided diff to this diff, creating a new utxoDiff, that would be the result if
// first d, and than diff were applied to some base
func (d *UTXODiff) WithDiff(diff *UTXODiff) (*UTXODiff, error) {
result := UTXODiff{
toAdd: make(utxoCollection, len(d.toAdd)+len(diff.toAdd)),
toRemove: make(utxoCollection, len(d.toRemove)+len(diff.toRemove)),
useMultiset: d.useMultiset,
clone := d.clone()
err := clone.withDiffInPlace(diff)
if err != nil {
return nil, err
}
// All transactions in d.toAdd:
// If they are not in diff.toRemove - should be added in result.toAdd
// If they are in diff.toAdd - should throw an error
// Otherwise - should be ignored
for outpoint, utxoEntry := range d.toAdd {
if !diff.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toAdd.add(outpoint, utxoEntry)
}
if diffEntry, ok := diff.toAdd.get(outpoint); ok {
// An exception is made for entries with unequal blue scores
// as long as the appropriate entry exists in either d.toRemove
// or diff.toRemove.
// These are just "updates" to accepted blue score
if diffEntry.blockBlueScore != utxoEntry.blockBlueScore &&
(d.toRemove.containsWithBlueScore(outpoint, diffEntry.blockBlueScore) ||
diff.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore)) {
continue
}
return nil, ruleError(ErrWithDiff, fmt.Sprintf("WithDiff: outpoint %s both in d.toAdd and in other.toAdd", outpoint))
}
}
// All transactions in d.toRemove:
// If they are not in diff.toAdd - should be added in result.toRemove
// If they are in diff.toRemove - should throw an error
// Otherwise - should be ignored
for outpoint, utxoEntry := range d.toRemove {
if !diff.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toRemove.add(outpoint, utxoEntry)
}
if diffEntry, ok := diff.toRemove.get(outpoint); ok {
// An exception is made for entries with unequal blue scores
// as long as the appropriate entry exists in either d.toAdd
// or diff.toAdd.
// These are just "updates" to accepted blue score
if diffEntry.blockBlueScore != utxoEntry.blockBlueScore &&
(d.toAdd.containsWithBlueScore(outpoint, diffEntry.blockBlueScore) ||
diff.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore)) {
continue
}
return nil, ruleError(ErrWithDiff, "WithDiff: transaction both in d.toRemove and in other.toRemove")
}
}
// All transactions in diff.toAdd:
// If they are not in d.toRemove - should be added in result.toAdd
for outpoint, utxoEntry := range diff.toAdd {
if !d.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toAdd.add(outpoint, utxoEntry)
}
}
// All transactions in diff.toRemove:
// If they are not in d.toAdd - should be added in result.toRemove
for outpoint, utxoEntry := range diff.toRemove {
if !d.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toRemove.add(outpoint, utxoEntry)
}
}
// Apply diff.diffMultiset to d.diffMultiset
if d.useMultiset {
result.diffMultiset = d.diffMultiset.Union(diff.diffMultiset)
}
return &result, nil
return clone, nil
}
// clone returns a clone of this utxoDiff
func (d *UTXODiff) clone() *UTXODiff {
clone := &UTXODiff{
toAdd: d.toAdd.clone(),
toRemove: d.toRemove.clone(),
useMultiset: d.useMultiset,
}
if d.useMultiset {
clone.diffMultiset = d.diffMultiset.Clone()
toAdd: d.toAdd.clone(),
toRemove: d.toRemove.clone(),
}
return clone
}
@@ -410,14 +361,6 @@ func (d *UTXODiff) AddEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
} else {
d.toAdd.add(outpoint, entry)
}
if d.useMultiset {
newMs, err := addUTXOToMultiset(d.diffMultiset, entry, &outpoint)
if err != nil {
return err
}
d.diffMultiset = newMs
}
return nil
}
@@ -433,21 +376,10 @@ func (d *UTXODiff) RemoveEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
} else {
d.toRemove.add(outpoint, entry)
}
if d.useMultiset {
newMs, err := removeUTXOFromMultiset(d.diffMultiset, entry, &outpoint)
if err != nil {
return err
}
d.diffMultiset = newMs
}
return nil
}
func (d UTXODiff) String() string {
if d.useMultiset {
return fmt.Sprintf("toAdd: %s; toRemove: %s, Multiset-Hash: %s", d.toAdd, d.toRemove, d.diffMultiset.Hash())
}
return fmt.Sprintf("toAdd: %s; toRemove: %s", d.toAdd, d.toRemove)
}
@@ -467,97 +399,27 @@ type UTXOSet interface {
fmt.Stringer
diffFrom(other UTXOSet) (*UTXODiff, error)
WithDiff(utxoDiff *UTXODiff) (UTXOSet, error)
diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error)
diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error)
AddTx(tx *wire.MsgTx, blockBlueScore uint64) (ok bool, err error)
clone() UTXOSet
Get(outpoint wire.Outpoint) (*UTXOEntry, bool)
Multiset() *ecc.Multiset
WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error)
}
// diffFromTx is a common implementation for diffFromTx, that works
// for both diff-based and full UTXO sets
// Returns a diff that is equivalent to provided transaction,
// or an error if provided transaction is not valid in the context of this UTXOSet
func diffFromTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
diff := NewUTXODiff()
isCoinbase := tx.IsCoinBase()
if !isCoinbase {
for _, txIn := range tx.TxIn {
if entry, ok := u.Get(txIn.PreviousOutpoint); ok {
err := diff.RemoveEntry(txIn.PreviousOutpoint, entry)
if err != nil {
return nil, err
}
} else {
return nil, ruleError(ErrMissingTxOut, fmt.Sprintf(
"Transaction %s is invalid because spends outpoint %s that is not in utxo set",
tx.TxID(), txIn.PreviousOutpoint))
}
}
}
for i, txOut := range tx.TxOut {
entry := NewUTXOEntry(txOut, isCoinbase, acceptingBlueScore)
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
err := diff.AddEntry(outpoint, entry)
if err != nil {
return nil, err
}
}
return diff, nil
}
// diffFromAcceptedTx is a common implementation for diffFromAcceptedTx, that works
// for both diff-based and full UTXO sets.
// Returns a diff that replaces an entry's blockBlueScore with the given acceptingBlueScore.
// Returns an error if the provided transaction's entry is not valid in the context
// of this UTXOSet.
func diffFromAcceptedTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
diff := NewUTXODiff()
isCoinbase := tx.IsCoinBase()
for i, txOut := range tx.TxOut {
// Fetch any unaccepted transaction
existingOutpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
existingEntry, ok := u.Get(existingOutpoint)
if !ok {
return nil, errors.Errorf("cannot accept outpoint %s because it doesn't exist in the given UTXO", existingOutpoint)
}
// Remove unaccepted entries
err := diff.RemoveEntry(existingOutpoint, existingEntry)
if err != nil {
return nil, err
}
// Add new entries with their accepting blue score
newEntry := NewUTXOEntry(txOut, isCoinbase, acceptingBlueScore)
err = diff.AddEntry(existingOutpoint, newEntry)
if err != nil {
return nil, err
}
}
return diff, nil
}
// FullUTXOSet represents a full list of transaction outputs and their values
type FullUTXOSet struct {
utxoCollection
UTXOMultiset *ecc.Multiset
}
// NewFullUTXOSet creates a new utxoSet with full list of transaction outputs and their values
func NewFullUTXOSet() *FullUTXOSet {
return &FullUTXOSet{
utxoCollection: utxoCollection{},
UTXOMultiset: ecc.NewMultiset(ecc.S256()),
}
}
// newFullUTXOSetFromUTXOCollection converts a utxoCollection to a FullUTXOSet
func newFullUTXOSetFromUTXOCollection(collection utxoCollection) (*FullUTXOSet, error) {
var err error
multiset := ecc.NewMultiset(ecc.S256())
multiset := secp256k1.NewMultiset()
for outpoint, utxoEntry := range collection {
multiset, err = addUTXOToMultiset(multiset, utxoEntry, &outpoint)
if err != nil {
@@ -566,7 +428,6 @@ func newFullUTXOSetFromUTXOCollection(collection utxoCollection) (*FullUTXOSet,
}
return &FullUTXOSet{
utxoCollection: collection,
UTXOMultiset: multiset,
}, nil
}
@@ -603,33 +464,19 @@ func (fus *FullUTXOSet) AddTx(tx *wire.MsgTx, blueScore uint64) (isAccepted bool
}
for _, txIn := range tx.TxIn {
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
err := fus.removeAndUpdateMultiset(outpoint)
if err != nil {
return false, err
}
fus.remove(txIn.PreviousOutpoint)
}
}
for i, txOut := range tx.TxOut {
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
entry := NewUTXOEntry(txOut, isCoinbase, blueScore)
err := fus.addAndUpdateMultiset(outpoint, entry)
if err != nil {
return false, err
}
fus.add(outpoint, entry)
}
return true, nil
}
// diffFromTx returns a diff that is equivalent to provided transaction,
// or an error if provided transaction is not valid in the context of this UTXOSet
func (fus *FullUTXOSet) diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
return diffFromTx(fus, tx, acceptingBlueScore)
}
func (fus *FullUTXOSet) containsInputs(tx *wire.MsgTx) bool {
for _, txIn := range tx.TxIn {
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
@@ -641,13 +488,9 @@ func (fus *FullUTXOSet) containsInputs(tx *wire.MsgTx) bool {
return true
}
func (fus *FullUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
return diffFromAcceptedTx(fus, tx, acceptingBlueScore)
}
// clone returns a clone of this utxoSet
func (fus *FullUTXOSet) clone() UTXOSet {
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone(), UTXOMultiset: fus.UTXOMultiset.Clone()}
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone()}
}
// Get returns the UTXOEntry associated with the given Outpoint, and a boolean indicating if such entry was found
@@ -656,55 +499,6 @@ func (fus *FullUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
return utxoEntry, ok
}
// Multiset returns the ecmh-Multiset of this utxoSet
func (fus *FullUTXOSet) Multiset() *ecc.Multiset {
return fus.UTXOMultiset
}
// addAndUpdateMultiset adds a UTXOEntry to this utxoSet and updates its multiset accordingly
func (fus *FullUTXOSet) addAndUpdateMultiset(outpoint wire.Outpoint, entry *UTXOEntry) error {
fus.add(outpoint, entry)
newMs, err := addUTXOToMultiset(fus.UTXOMultiset, entry, &outpoint)
if err != nil {
return err
}
fus.UTXOMultiset = newMs
return nil
}
// removeAndUpdateMultiset removes a UTXOEntry from this utxoSet and updates its multiset accordingly
func (fus *FullUTXOSet) removeAndUpdateMultiset(outpoint wire.Outpoint) error {
entry, ok := fus.Get(outpoint)
if !ok {
return errors.Errorf("Couldn't find outpoint %s", outpoint)
}
fus.remove(outpoint)
var err error
newMs, err := removeUTXOFromMultiset(fus.UTXOMultiset, entry, &outpoint)
if err != nil {
return err
}
fus.UTXOMultiset = newMs
return nil
}
// WithTransactions returns a new UTXO Set with the added transactions.
//
// This function MUST be called with the DAG lock held.
func (fus *FullUTXOSet) WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error) {
diffSet := NewDiffUTXOSet(fus, NewUTXODiff())
for _, tx := range transactions {
isAccepted, err := diffSet.AddTx(tx, blockBlueScore)
if err != nil {
return nil, err
}
if !ignoreDoubleSpends && !isAccepted {
return nil, errors.Errorf("Transaction %s is not valid with the current UTXO set", tx.TxID())
}
}
return UTXOSet(diffSet), nil
}
// DiffUTXOSet represents a utxoSet with a base fullUTXOSet and a UTXODiff
type DiffUTXOSet struct {
base *FullUTXOSet
@@ -765,12 +559,11 @@ func (dus *DiffUTXOSet) AddTx(tx *wire.MsgTx, blockBlueScore uint64) (bool, erro
func (dus *DiffUTXOSet) appendTx(tx *wire.MsgTx, blockBlueScore uint64, isCoinbase bool) error {
if !isCoinbase {
for _, txIn := range tx.TxIn {
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
entry, ok := dus.Get(outpoint)
entry, ok := dus.Get(txIn.PreviousOutpoint)
if !ok {
return errors.Errorf("Couldn't find entry for outpoint %s", outpoint)
return errors.Errorf("Couldn't find entry for outpoint %s", txIn.PreviousOutpoint)
}
err := dus.UTXODiff.RemoveEntry(outpoint, entry)
err := dus.UTXODiff.RemoveEntry(txIn.PreviousOutpoint, entry)
if err != nil {
return err
}
@@ -816,31 +609,12 @@ func (dus *DiffUTXOSet) meldToBase() error {
for outpoint, utxoEntry := range dus.UTXODiff.toAdd {
dus.base.add(outpoint, utxoEntry)
}
if dus.UTXODiff.useMultiset {
dus.base.UTXOMultiset = dus.base.UTXOMultiset.Union(dus.UTXODiff.diffMultiset)
}
if dus.UTXODiff.useMultiset {
dus.UTXODiff = NewUTXODiff()
} else {
dus.UTXODiff = NewUTXODiffWithoutMultiset()
}
dus.UTXODiff = NewUTXODiff()
return nil
}
// diffFromTx returns a diff that is equivalent to provided transaction,
// or an error if provided transaction is not valid in the context of this UTXOSet
func (dus *DiffUTXOSet) diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
return diffFromTx(dus, tx, acceptingBlueScore)
}
func (dus *DiffUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
return diffFromAcceptedTx(dus, tx, acceptingBlueScore)
}
func (dus *DiffUTXOSet) String() string {
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s, Multiset-Hash:%s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove, dus.Multiset().Hash())
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove)
}
// clone returns a clone of this UTXO Set
@@ -848,6 +622,12 @@ func (dus *DiffUTXOSet) clone() UTXOSet {
return NewDiffUTXOSet(dus.base.clone().(*FullUTXOSet), dus.UTXODiff.clone())
}
// cloneWithoutBase returns a *DiffUTXOSet with same
// base as this *DiffUTXOSet and a cloned diff.
func (dus *DiffUTXOSet) cloneWithoutBase() UTXOSet {
return NewDiffUTXOSet(dus.base, dus.UTXODiff.clone())
}
// Get returns the UTXOEntry associated with provided outpoint in this UTXOSet.
// Returns false in second output if this UTXOEntry was not found
func (dus *DiffUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
@@ -865,42 +645,3 @@ func (dus *DiffUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
txOut, ok := dus.UTXODiff.toAdd.get(outpoint)
return txOut, ok
}
// Multiset returns the ecmh-Multiset of this utxoSet
func (dus *DiffUTXOSet) Multiset() *ecc.Multiset {
return dus.base.UTXOMultiset.Union(dus.UTXODiff.diffMultiset)
}
// WithTransactions returns a new UTXO Set with the added transactions.
//
// If dus.UTXODiff.useMultiset is true, this function MUST be
// called with the DAG lock held.
func (dus *DiffUTXOSet) WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error) {
diffSet := NewDiffUTXOSet(dus.base, dus.UTXODiff.clone())
for _, tx := range transactions {
isAccepted, err := diffSet.AddTx(tx, blockBlueScore)
if err != nil {
return nil, err
}
if !ignoreDoubleSpends && !isAccepted {
return nil, errors.Errorf("Transaction %s is not valid with the current UTXO set", tx.TxID())
}
}
return UTXOSet(diffSet), nil
}
func addUTXOToMultiset(ms *ecc.Multiset, entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
utxoMS, err := utxoMultiset(entry, outpoint)
if err != nil {
return nil, err
}
return ms.Union(utxoMS), nil
}
func removeUTXOFromMultiset(ms *ecc.Multiset, entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
utxoMS, err := utxoMultiset(entry, outpoint)
if err != nil {
return nil, err
}
return ms.Subtract(utxoMS), nil
}

View File

@@ -1,12 +1,12 @@
package blockdag
import (
"github.com/kaspanet/kaspad/util/subnetworkid"
"math"
"reflect"
"testing"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
@@ -79,49 +79,40 @@ func TestUTXODiff(t *testing.T) {
utxoEntry0 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 0)
utxoEntry1 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 20}, false, 1)
for i := 0; i < 2; i++ {
withMultiset := i == 0
// Test utxoDiff creation
var diff *UTXODiff
if withMultiset {
diff = NewUTXODiff()
} else {
diff = NewUTXODiffWithoutMultiset()
}
if len(diff.toAdd) != 0 || len(diff.toRemove) != 0 {
t.Errorf("new diff is not empty")
}
// Test utxoDiff creation
err := diff.AddEntry(outpoint0, utxoEntry0)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
diff := NewUTXODiff()
err = diff.RemoveEntry(outpoint1, utxoEntry1)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
if len(diff.toAdd) != 0 || len(diff.toRemove) != 0 {
t.Errorf("new diff is not empty")
}
// Test utxoDiff cloning
clonedDiff := diff.clone()
if clonedDiff == diff {
t.Errorf("cloned diff is reference-equal to the original")
}
if !reflect.DeepEqual(clonedDiff, diff) {
t.Errorf("cloned diff not equal to the original"+
"Original: \"%v\", cloned: \"%v\".", diff, clonedDiff)
}
err := diff.AddEntry(outpoint0, utxoEntry0)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
// Test utxoDiff string representation
expectedDiffString := "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ]"
if withMultiset {
expectedDiffString = "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], Multiset-Hash: 7cb61e48005b0c817211d04589d719bff87d86a6a6ce2454515f57265382ded7"
}
diffString := clonedDiff.String()
if diffString != expectedDiffString {
t.Errorf("unexpected diff string. "+
"Expected: \"%s\", got: \"%s\".", expectedDiffString, diffString)
}
err = diff.RemoveEntry(outpoint1, utxoEntry1)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
// Test utxoDiff cloning
clonedDiff := diff.clone()
if clonedDiff == diff {
t.Errorf("cloned diff is reference-equal to the original")
}
if !reflect.DeepEqual(clonedDiff, diff) {
t.Errorf("cloned diff not equal to the original"+
"Original: \"%v\", cloned: \"%v\".", diff, clonedDiff)
}
// Test utxoDiff string representation
expectedDiffString := "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ]"
diffString := clonedDiff.String()
if diffString != expectedDiffString {
t.Errorf("unexpected diff string. "+
"Expected: \"%s\", got: \"%s\".", expectedDiffString, diffString)
}
}
@@ -136,8 +127,11 @@ func TestUTXODiffRules(t *testing.T) {
// For each of the following test cases, we will:
// this.diffFrom(other) and compare it to expectedDiffFromResult
// this.WithDiff(other) and compare it to expectedWithDiffResult
// this.withDiffInPlace(other) and compare it to expectedWithDiffResult
//
// Note: an expected nil result means that we expect the respective operation to fail
// See the following spreadsheet for a summary of all test-cases:
// https://docs.google.com/spreadsheets/d/1E8G3mp5y1-yifouwLLXRLueSRfXdDRwRKFieYE07buY/edit?usp=sharing
tests := []struct {
name string
this *UTXODiff
@@ -146,7 +140,7 @@ func TestUTXODiffRules(t *testing.T) {
expectedWithDiffResult *UTXODiff
}{
{
name: "one toAdd in this, one toAdd in other",
name: "first toAdd in this, first toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
@@ -162,7 +156,23 @@ func TestUTXODiffRules(t *testing.T) {
expectedWithDiffResult: nil,
},
{
name: "one toAdd in this, one toRemove in other",
name: "first in toAdd in this, second in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedWithDiffResult: nil,
},
{
name: "first in toAdd in this, second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
@@ -178,7 +188,36 @@ func TestUTXODiffRules(t *testing.T) {
},
},
{
name: "one toAdd in this, empty other",
name: "first in toAdd in this and other, second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: nil,
},
{
name: "first in toAdd in this and toRemove in other, second in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{},
},
},
{
name: "first in toAdd in this, empty other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
@@ -197,7 +236,7 @@ func TestUTXODiffRules(t *testing.T) {
},
},
{
name: "one toRemove in this, one toAdd in other",
name: "first in toRemove in this and in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
@@ -213,7 +252,23 @@ func TestUTXODiffRules(t *testing.T) {
},
},
{
name: "one toRemove in this, one toRemove in other",
name: "first in toRemove in this, second in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
},
{
name: "first in toRemove in this and other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
@@ -229,7 +284,49 @@ func TestUTXODiffRules(t *testing.T) {
expectedWithDiffResult: nil,
},
{
name: "one toRemove in this, empty other",
name: "first in toRemove in this, second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
other: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: nil,
},
{
name: "first in toRemove in this and toAdd in other, second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: nil,
},
{
name: "first in toRemove in this and other, second in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{},
},
expectedWithDiffResult: nil,
},
{
name: "first in toRemove in this, empty other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
@@ -248,7 +345,116 @@ func TestUTXODiffRules(t *testing.T) {
},
},
{
name: "empty this, one toAdd in other",
name: "first in toAdd in this and other, second in toRemove in this",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: nil,
},
{
name: "first in toAdd in this, second in toRemove in this and toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: nil,
},
{
name: "first in toAdd in this and toRemove in other, second in toRemove in this",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
},
{
name: "first in toAdd in this, second in toRemove in this and in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedWithDiffResult: nil,
},
{
name: "first in toAdd and second in toRemove in both this and other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
expectedWithDiffResult: nil,
},
{
name: "first in toAdd in this and toRemove in other, second in toRemove in this and toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
},
{
name: "first in toAdd and second in toRemove in this, empty other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
},
{
name: "empty this, first in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
@@ -267,7 +473,7 @@ func TestUTXODiffRules(t *testing.T) {
},
},
{
name: "empty this, one toRemove in other",
name: "empty this, first in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
@@ -285,6 +491,25 @@ func TestUTXODiffRules(t *testing.T) {
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
},
{
name: "empty this, first in toAdd and second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
},
{
name: "empty this, empty other",
this: &UTXODiff{
@@ -304,244 +529,104 @@ func TestUTXODiffRules(t *testing.T) {
toRemove: utxoCollection{},
},
},
{
name: "equal outpoints different blue scores: first in toAdd in this, second in toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedWithDiffResult: nil,
},
{
name: "equal outpoints different blue scores: first in toRemove in this, second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
other: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedWithDiffResult: nil,
},
{
name: "equal outpoints different blue scores: first in toAdd and second in toRemove in this, empty other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
},
{
name: "equal outpoints different blue scores: empty this, first in toAdd and second in toRemove in other",
this: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
},
{
name: "equal outpoints different blue scores: first in toAdd and second in toRemove in both this and other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
expectedDiffFromResult: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
expectedWithDiffResult: nil,
},
{
name: "equal outpoints different blue scores: first in toAdd in this and toRemove in other, second in toRemove in this and toAdd in other",
this: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry1},
toRemove: utxoCollection{outpoint0: utxoEntry2},
},
other: &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry2},
toRemove: utxoCollection{outpoint0: utxoEntry1},
},
expectedDiffFromResult: nil,
expectedWithDiffResult: &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
},
},
}
for _, test := range tests {
this := addMultisetToDiff(t, test.this)
other := addMultisetToDiff(t, test.other)
expectedDiffFromResult := addMultisetToDiff(t, test.expectedDiffFromResult)
expectedWithDiffResult := addMultisetToDiff(t, test.expectedWithDiffResult)
// diffFrom from this to other
diffResult, err := this.diffFrom(other)
// diffFrom from test.this to test.other
diffResult, err := test.this.diffFrom(test.other)
// Test whether diffFrom returned an error
isDiffFromOk := err == nil
expectedIsDiffFromOk := expectedDiffFromResult != nil
expectedIsDiffFromOk := test.expectedDiffFromResult != nil
if isDiffFromOk != expectedIsDiffFromOk {
t.Errorf("unexpected diffFrom error in test \"%s\". "+
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsDiffFromOk, isDiffFromOk)
}
// If not error, test the diffFrom result
if isDiffFromOk && !expectedDiffFromResult.equal(diffResult) {
if isDiffFromOk && !test.expectedDiffFromResult.equal(diffResult) {
t.Errorf("unexpected diffFrom result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedDiffFromResult, diffResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedDiffFromResult, diffResult)
}
// Make sure that WithDiff after diffFrom results in the original other
// Make sure that WithDiff after diffFrom results in the original test.other
if isDiffFromOk {
otherResult, err := this.WithDiff(diffResult)
otherResult, err := test.this.WithDiff(diffResult)
if err != nil {
t.Errorf("WithDiff unexpectedly failed in test \"%s\": %s", test.name, err)
}
if !other.equal(otherResult) {
if !test.other.equal(otherResult) {
t.Errorf("unexpected WithDiff result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, other, otherResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.other, otherResult)
}
}
// WithDiff from this to other
withDiffResult, err := this.WithDiff(other)
// WithDiff from test.this to test.other
withDiffResult, err := test.this.WithDiff(test.other)
// Test whether WithDiff returned an error
isWithDiffOk := err == nil
expectedIsWithDiffOk := expectedWithDiffResult != nil
expectedIsWithDiffOk := test.expectedWithDiffResult != nil
if isWithDiffOk != expectedIsWithDiffOk {
t.Errorf("unexpected WithDiff error in test \"%s\". "+
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsWithDiffOk, isWithDiffOk)
}
// If not error, test the WithDiff result
if isWithDiffOk && !withDiffResult.equal(expectedWithDiffResult) {
if isWithDiffOk && !withDiffResult.equal(test.expectedWithDiffResult) {
t.Errorf("unexpected WithDiff result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedWithDiffResult, withDiffResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedWithDiffResult, withDiffResult)
}
// Make sure that diffFrom after WithDiff results in the original other
// Repeat WithDiff check test.this time using withDiffInPlace
thisClone := test.this.clone()
err = thisClone.withDiffInPlace(test.other)
// Test whether withDiffInPlace returned an error
isWithDiffInPlaceOk := err == nil
expectedIsWithDiffInPlaceOk := test.expectedWithDiffResult != nil
if isWithDiffInPlaceOk != expectedIsWithDiffInPlaceOk {
t.Errorf("unexpected withDiffInPlace error in test \"%s\". "+
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsWithDiffInPlaceOk, isWithDiffInPlaceOk)
}
// If not error, test the withDiffInPlace result
if isWithDiffInPlaceOk && !thisClone.equal(test.expectedWithDiffResult) {
t.Errorf("unexpected withDiffInPlace result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedWithDiffResult, thisClone)
}
// Make sure that diffFrom after WithDiff results in the original test.other
if isWithDiffOk {
otherResult, err := this.diffFrom(withDiffResult)
otherResult, err := test.this.diffFrom(withDiffResult)
if err != nil {
t.Errorf("diffFrom unexpectedly failed in test \"%s\": %s", test.name, err)
}
if !other.equal(otherResult) {
if !test.other.equal(otherResult) {
t.Errorf("unexpected diffFrom result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, other, otherResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.other, otherResult)
}
}
}
}
func areMultisetsEqual(a *ecc.Multiset, b *ecc.Multiset) bool {
aX, aY := a.Point()
bX, bY := b.Point()
return aX.Cmp(bX) == 0 && aY.Cmp(bY) == 0
}
func (d *UTXODiff) equal(other *UTXODiff) bool {
if d == nil || other == nil {
return d == other
}
return reflect.DeepEqual(d.toAdd, other.toAdd) &&
reflect.DeepEqual(d.toRemove, other.toRemove) &&
areMultisetsEqual(d.diffMultiset, other.diffMultiset)
reflect.DeepEqual(d.toRemove, other.toRemove)
}
func (fus *FullUTXOSet) equal(other *FullUTXOSet) bool {
return reflect.DeepEqual(fus.utxoCollection, other.utxoCollection) &&
areMultisetsEqual(fus.UTXOMultiset, other.UTXOMultiset)
return reflect.DeepEqual(fus.utxoCollection, other.utxoCollection)
}
func (dus *DiffUTXOSet) equal(other *DiffUTXOSet) bool {
return dus.base.equal(other.base) && dus.UTXODiff.equal(other.UTXODiff)
}
func addMultisetToDiff(t *testing.T, diff *UTXODiff) *UTXODiff {
if diff == nil {
return nil
}
diffWithMs := NewUTXODiff()
for outpoint, entry := range diff.toAdd {
err := diffWithMs.AddEntry(outpoint, entry)
if err != nil {
t.Fatalf("Error with diffWithMs.AddEntry: %s", err)
}
}
for outpoint, entry := range diff.toRemove {
err := diffWithMs.RemoveEntry(outpoint, entry)
if err != nil {
t.Fatalf("Error with diffWithMs.removeEntry: %s", err)
}
}
return diffWithMs
}
func addMultisetToFullUTXOSet(t *testing.T, fus *FullUTXOSet) *FullUTXOSet {
if fus == nil {
return nil
}
fusWithMs := NewFullUTXOSet()
for outpoint, entry := range fus.utxoCollection {
err := fusWithMs.addAndUpdateMultiset(outpoint, entry)
if err != nil {
t.Fatalf("Error with diffWithMs.AddEntry: %s", err)
}
}
return fusWithMs
}
func addMultisetToDiffUTXOSet(t *testing.T, diffSet *DiffUTXOSet) *DiffUTXOSet {
if diffSet == nil {
return nil
}
diffWithMs := addMultisetToDiff(t, diffSet.UTXODiff)
baseWithMs := addMultisetToFullUTXOSet(t, diffSet.base)
return NewDiffUTXOSet(baseWithMs, diffWithMs)
}
// TestFullUTXOSet makes sure that fullUTXOSet is working as expected.
func TestFullUTXOSet(t *testing.T) {
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
@@ -552,10 +637,10 @@ func TestFullUTXOSet(t *testing.T) {
txOut1 := &wire.TxOut{ScriptPubKey: []byte{}, Value: 20}
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
diff := addMultisetToDiff(t, &UTXODiff{
diff := &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry0},
toRemove: utxoCollection{outpoint1: utxoEntry1},
})
}
// Test fullUTXOSet creation
emptySet := NewFullUTXOSet()
@@ -584,7 +669,7 @@ func TestFullUTXOSet(t *testing.T) {
} else if isAccepted {
t.Errorf("addTx unexpectedly succeeded")
}
emptySet = addMultisetToFullUTXOSet(t, &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}})
emptySet = &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}}
if isAccepted, err := emptySet.AddTx(transaction0, 0); err != nil {
t.Errorf("addTx unexpectedly failed. Error: %s", err)
} else if !isAccepted {
@@ -616,10 +701,10 @@ func TestDiffUTXOSet(t *testing.T) {
txOut1 := &wire.TxOut{ScriptPubKey: []byte{}, Value: 20}
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
diff := addMultisetToDiff(t, &UTXODiff{
diff := &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry0},
toRemove: utxoCollection{outpoint1: utxoEntry1},
})
}
// Test diffUTXOSet creation
emptySet := NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff())
@@ -677,7 +762,7 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ ], Multiset-Hash:0000000000000000000000000000000000000000000000000000000000000000}",
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ ]}",
expectedCollection: utxoCollection{},
},
{
@@ -696,7 +781,7 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ ], To Add: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Remove: [ ], Multiset-Hash:da4768bd0359c3426268d6707c1fc17a68c45ef1ea734331b07568418234487f}",
expectedString: "{Base: [ ], To Add: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Remove: [ ]}",
expectedCollection: utxoCollection{outpoint0: utxoEntry0},
},
{
@@ -709,7 +794,7 @@ func TestDiffUTXOSet(t *testing.T) {
},
},
expectedMeldSet: nil,
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], Multiset-Hash:046242cb1bb1e6d3fd91d0f181e1b2d4a597ac57fa2584fc3c2eb0e0f46c9369}",
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]}",
expectedCollection: utxoCollection{},
expectedMeldToBaseError: "Couldn't remove outpoint 0000000000000000000000000000000000000000000000000000000000000000:0 because it doesn't exist in the DiffUTXOSet base",
},
@@ -734,7 +819,7 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], To Remove: [ ], Multiset-Hash:556cc61fd4d7e74d7807ca2298c5320375a6a20310a18920e54667220924baff}",
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], To Remove: [ ]}",
expectedCollection: utxoCollection{
outpoint0: utxoEntry0,
outpoint1: utxoEntry1,
@@ -758,24 +843,21 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], Multiset-Hash:0000000000000000000000000000000000000000000000000000000000000000}",
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]}",
expectedCollection: utxoCollection{},
},
}
for _, test := range tests {
diffSet := addMultisetToDiffUTXOSet(t, test.diffSet)
expectedMeldSet := addMultisetToDiffUTXOSet(t, test.expectedMeldSet)
// Test string representation
setString := diffSet.String()
setString := test.diffSet.String()
if setString != test.expectedString {
t.Errorf("unexpected string in test \"%s\". "+
"Expected: \"%s\", got: \"%s\".", test.name, test.expectedString, setString)
}
// Test meldToBase
meldSet := diffSet.clone().(*DiffUTXOSet)
meldSet := test.diffSet.clone().(*DiffUTXOSet)
err := meldSet.meldToBase()
errString := ""
if err != nil {
@@ -787,27 +869,27 @@ func TestDiffUTXOSet(t *testing.T) {
if err != nil {
continue
}
if !meldSet.equal(expectedMeldSet) {
if !meldSet.equal(test.expectedMeldSet) {
t.Errorf("unexpected melded set in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedMeldSet, meldSet)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedMeldSet, meldSet)
}
// Test collection
setCollection, err := diffSet.collection()
setCollection, err := test.diffSet.collection()
if err != nil {
t.Errorf("Error getting diffSet collection: %s", err)
t.Errorf("Error getting test.diffSet collection: %s", err)
} else if !reflect.DeepEqual(setCollection, test.expectedCollection) {
t.Errorf("unexpected set collection in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedCollection, setCollection)
}
// Test cloning
clonedSet := diffSet.clone().(*DiffUTXOSet)
if !reflect.DeepEqual(clonedSet, diffSet) {
clonedSet := test.diffSet.clone().(*DiffUTXOSet)
if !reflect.DeepEqual(clonedSet, test.diffSet) {
t.Errorf("unexpected set clone in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, diffSet, clonedSet)
"Expected: \"%v\", got: \"%v\".", test.name, test.diffSet, clonedSet)
}
if clonedSet == diffSet {
if clonedSet == test.diffSet {
t.Errorf("cloned set is reference-equal to the original")
}
}
@@ -1008,10 +1090,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
testLoop:
for _, test := range tests {
startSet := addMultisetToDiffUTXOSet(t, test.startSet)
expectedSet := addMultisetToDiffUTXOSet(t, test.expectedSet)
diffSet := startSet.clone()
diffSet := test.startSet.clone()
// Apply all transactions to diffSet, in order, with the initial block height startHeight
for i, transaction := range test.toAdd {
@@ -1023,89 +1102,14 @@ testLoop:
}
}
// Make sure that the result diffSet equals to the expectedSet
if !diffSet.(*DiffUTXOSet).equal(expectedSet) {
// Make sure that the result diffSet equals to test.expectedSet
if !diffSet.(*DiffUTXOSet).equal(test.expectedSet) {
t.Errorf("unexpected diffSet in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedSet, diffSet)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedSet, diffSet)
}
}
}
func TestDiffFromTx(t *testing.T) {
fus := addMultisetToFullUTXOSet(t, &FullUTXOSet{
utxoCollection: utxoCollection{},
})
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutpoint: wire.Outpoint{TxID: *txID0, Index: math.MaxUint32}, Sequence: 0}
txOut0 := &wire.TxOut{ScriptPubKey: []byte{0}, Value: 10}
cbTx := wire.NewSubnetworkMsgTx(1, []*wire.TxIn{txIn0}, []*wire.TxOut{txOut0}, subnetworkid.SubnetworkIDCoinbase, 0, nil)
if isAccepted, err := fus.AddTx(cbTx, 1); err != nil {
t.Fatalf("AddTx unexpectedly failed. Error: %s", err)
} else if !isAccepted {
t.Fatalf("AddTx unexpectedly didn't add tx %s", cbTx.TxID())
}
acceptingBlueScore := uint64(2)
cbOutpoint := wire.Outpoint{TxID: *cbTx.TxID(), Index: 0}
txIns := []*wire.TxIn{{
PreviousOutpoint: cbOutpoint,
SignatureScript: nil,
Sequence: wire.MaxTxInSequenceNum,
}}
txOuts := []*wire.TxOut{{
ScriptPubKey: OpTrueScript,
Value: uint64(1),
}}
tx := wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
diff, err := fus.diffFromTx(tx, acceptingBlueScore)
if err != nil {
t.Errorf("diffFromTx: %v", err)
}
if !reflect.DeepEqual(diff.toAdd, utxoCollection{
wire.Outpoint{TxID: *tx.TxID(), Index: 0}: NewUTXOEntry(tx.TxOut[0], false, 2),
}) {
t.Errorf("diff.toAdd doesn't have the expected values")
}
if !reflect.DeepEqual(diff.toRemove, utxoCollection{
wire.Outpoint{TxID: *cbTx.TxID(), Index: 0}: NewUTXOEntry(cbTx.TxOut[0], true, 1),
}) {
t.Errorf("diff.toRemove doesn't have the expected values")
}
//Test that we get an error if we don't have the outpoint inside the utxo set
invalidTxIns := []*wire.TxIn{{
PreviousOutpoint: wire.Outpoint{TxID: daghash.TxID{}, Index: 0},
SignatureScript: nil,
Sequence: wire.MaxTxInSequenceNum,
}}
invalidTxOuts := []*wire.TxOut{{
ScriptPubKey: OpTrueScript,
Value: uint64(1),
}}
invalidTx := wire.NewNativeMsgTx(wire.TxVersion, invalidTxIns, invalidTxOuts)
_, err = fus.diffFromTx(invalidTx, acceptingBlueScore)
if err == nil {
t.Errorf("diffFromTx: expected an error but got <nil>")
}
//Test that we get an error if the outpoint is inside diffUTXOSet's toRemove
diff2 := addMultisetToDiff(t, &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
})
dus := NewDiffUTXOSet(fus, diff2)
if isAccepted, err := dus.AddTx(tx, 2); err != nil {
t.Fatalf("AddTx unexpectedly failed. Error: %s", err)
} else if !isAccepted {
t.Fatalf("AddTx unexpectedly didn't add tx %s", tx.TxID())
}
_, err = dus.diffFromTx(tx, acceptingBlueScore)
if err == nil {
t.Errorf("diffFromTx: expected an error but got <nil>")
}
}
// collection returns a collection of all UTXOs in this set
func (fus *FullUTXOSet) collection() utxoCollection {
return fus.utxoCollection.clone()
@@ -1170,7 +1174,6 @@ func TestUTXOSetAddEntry(t *testing.T) {
}
for _, test := range tests {
expectedUTXODiff := addMultisetToDiff(t, test.expectedUTXODiff)
err := utxoDiff.AddEntry(*test.outpointToAdd, test.utxoEntryToAdd)
errString := ""
if err != nil {
@@ -1179,9 +1182,9 @@ func TestUTXOSetAddEntry(t *testing.T) {
if errString != test.expectedError {
t.Fatalf("utxoDiff.AddEntry: unexpected err in test \"%s\". Expected: %s but got: %s", test.name, test.expectedError, err)
}
if err == nil && !utxoDiff.equal(expectedUTXODiff) {
if err == nil && !utxoDiff.equal(test.expectedUTXODiff) {
t.Fatalf("utxoDiff.AddEntry: unexpected utxoDiff in test \"%s\". "+
"Expected: %v, got: %v", test.name, expectedUTXODiff, utxoDiff)
"Expected: %v, got: %v", test.name, test.expectedUTXODiff, utxoDiff)
}
}
}

View File

@@ -435,7 +435,7 @@ func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags Beha
// the duration of time that should be waited before the block becomes valid.
// This check needs to be last as it does not return an error but rather marks the
// header as delayed (and valid).
maxTimestamp := dag.AdjustedTime().Add(time.Second *
maxTimestamp := dag.Now().Add(time.Second *
time.Duration(int64(dag.TimestampDeviationTolerance)*dag.targetTimePerBlock))
if header.Timestamp.After(maxTimestamp) {
return header.Timestamp.Sub(maxTimestamp), nil
@@ -446,9 +446,9 @@ func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags Beha
//checkBlockParentsOrder ensures that the block's parents are ordered by hash
func checkBlockParentsOrder(header *wire.BlockHeader) error {
sortedHashes := make([]*daghash.Hash, 0, header.NumParentBlocks())
for _, hash := range header.ParentHashes {
sortedHashes = append(sortedHashes, hash)
sortedHashes := make([]*daghash.Hash, header.NumParentBlocks())
for i, hash := range header.ParentHashes {
sortedHashes[i] = hash
}
sort.Slice(sortedHashes, func(i, j int) bool {
return daghash.Less(sortedHashes[i], sortedHashes[j])
@@ -545,6 +545,20 @@ func (dag *BlockDAG) checkBlockSanity(block *util.Block, flags BehaviorFlags) (t
existingTxIDs[*id] = struct{}{}
}
// Check for double spends with transactions on the same block.
usedOutpoints := make(map[wire.Outpoint]*daghash.TxID)
for _, tx := range transactions {
for _, txIn := range tx.MsgTx().TxIn {
if spendingTxID, exists := usedOutpoints[txIn.PreviousOutpoint]; exists {
str := fmt.Sprintf("transaction %s spends "+
"outpoint %s that was already spent by "+
"transaction %s in this block", tx.ID(), txIn.PreviousOutpoint, spendingTxID)
return 0, ruleError(ErrDoubleSpendInSameBlock, str)
}
usedOutpoints[txIn.PreviousOutpoint] = tx.ID()
}
}
return delay, nil
}
@@ -598,41 +612,32 @@ func (dag *BlockDAG) validateDifficulty(header *wire.BlockHeader, bluestParent *
}
// validateParents validates that no parent is an ancestor of another parent, and no parent is finalized
func validateParents(blockHeader *wire.BlockHeader, parents blockSet) error {
minBlueScore := uint64(math.MaxUint64)
queue := newDownHeap()
visited := newSet()
for parent := range parents {
func (dag *BlockDAG) validateParents(blockHeader *wire.BlockHeader, parents blockSet) error {
for parentA := range parents {
// isFinalized might be false-negative because node finality status is
// updated in a separate goroutine. This is why later the block is
// checked more thoroughly on the finality rules in dag.checkFinalityRules.
if parent.isFinalized {
return ruleError(ErrFinality, fmt.Sprintf("block %s is a finalized parent of block %s", parent.hash, blockHeader.BlockHash()))
if parentA.isFinalized {
return ruleError(ErrFinality, fmt.Sprintf("block %s is a finalized "+
"parent of block %s", parentA.hash, blockHeader.BlockHash()))
}
if parent.blueScore < minBlueScore {
minBlueScore = parent.blueScore
}
for grandParent := range parent.parents {
if !visited.contains(grandParent) {
queue.Push(grandParent)
visited.add(grandParent)
for parentB := range parents {
if parentA == parentB {
continue
}
}
}
for queue.Len() > 0 {
current := queue.pop()
if parents.contains(current) {
return ruleError(ErrInvalidParentsRelation, fmt.Sprintf("block %s is both a parent of %s and an"+
" ancestor of another parent",
current.hash,
blockHeader.BlockHash()))
}
if current.blueScore > minBlueScore {
for parent := range current.parents {
if !visited.contains(parent) {
queue.Push(parent)
visited.add(parent)
}
isAncestorOf, err := dag.isAncestorOf(parentA, parentB)
if err != nil {
return err
}
if isAncestorOf {
return ruleError(ErrInvalidParentsRelation, fmt.Sprintf("block %s is both a parent of %s and an"+
" ancestor of another parent %s",
parentA.hash,
blockHeader.BlockHash(),
parentB.hash,
))
}
}
}
@@ -654,7 +659,7 @@ func (dag *BlockDAG) checkBlockContext(block *util.Block, parents blockSet, flag
bluestParent := parents.bluest()
fastAdd := flags&BFFastAdd == BFFastAdd
err := validateParents(&block.MsgBlock().Header, parents)
err := dag.validateParents(&block.MsgBlock().Header, parents)
if err != nil {
return err
}
@@ -847,6 +852,11 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
return nil, err
}
err = checkDoubleSpendsWithBlockPast(pastUTXO, transactions)
if err != nil {
return nil, err
}
if err := validateBlockMass(pastUTXO, transactions); err != nil {
return nil, err
}
@@ -922,7 +932,7 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
// Now that the inexpensive checks are done and have passed, verify the
// transactions are actually allowed to spend the coins by running the
// expensive ECDSA signature check scripts. Doing this last helps
// expensive SCHNORR signature check scripts. Doing this last helps
// prevent CPU exhaustion attacks.
err := checkBlockScripts(block, pastUTXO, transactions, scriptFlags, dag.sigCache)
if err != nil {

View File

@@ -10,6 +10,8 @@ import (
"testing"
"time"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
@@ -68,7 +70,7 @@ func TestSequenceLocksActive(t *testing.T) {
// ensure it fails.
func TestCheckConnectBlockTemplate(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", Config{
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -160,7 +162,7 @@ func TestCheckConnectBlockTemplate(t *testing.T) {
// as expected.
func TestCheckBlockSanity(t *testing.T) {
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", Config{
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -168,6 +170,7 @@ func TestCheckBlockSanity(t *testing.T) {
return
}
defer teardownFunc()
dag.timeSource = newFakeTimeSource(time.Now())
block := util.NewBlock(&Block100000)
if len(block.Transactions()) < 3 {
@@ -186,11 +189,12 @@ func TestCheckBlockSanity(t *testing.T) {
if err == nil {
t.Errorf("CheckBlockSanity: transactions disorder is not detected")
}
ruleErr, ok := err.(RuleError)
if !ok {
var ruleErr RuleError
if !errors.As(err, &ruleErr) {
t.Errorf("CheckBlockSanity: wrong error returned, expect RuleError, got %T", err)
} else if ruleErr.ErrorCode != ErrTransactionsNotSorted {
t.Errorf("CheckBlockSanity: wrong error returned, expect ErrTransactionsNotSorted, got %v, err %s", ruleErr.ErrorCode, err)
t.Errorf("CheckBlockSanity: wrong error returned, expect ErrTransactionsNotSorted, got"+
" %v, err %s", ruleErr.ErrorCode, err)
}
if delay != 0 {
t.Errorf("CheckBlockSanity: unexpected return %s delay", delay)
@@ -479,8 +483,10 @@ func TestCheckBlockSanity(t *testing.T) {
if err == nil {
t.Errorf("CheckBlockSanity: error is nil when it shouldn't be")
}
rError := err.(RuleError)
if rError.ErrorCode != ErrWrongParentsOrder {
var rError RuleError
if !errors.As(err, &rError) {
t.Fatalf("CheckBlockSanity: expected a RuleError, but got %s", err)
} else if rError.ErrorCode != ErrWrongParentsOrder {
t.Errorf("CheckBlockSanity: Expected error was ErrWrongParentsOrder but got %v", err)
}
if delay != 0 {
@@ -489,8 +495,8 @@ func TestCheckBlockSanity(t *testing.T) {
blockInTheFuture := Block100000
expectedDelay := 10 * time.Second
now := time.Unix(time.Now().Unix(), 0)
blockInTheFuture.Header.Timestamp = now.Add(time.Duration(dag.TimestampDeviationTolerance)*time.Second + expectedDelay)
deviationTolerance := time.Duration(dag.TimestampDeviationTolerance*uint64(dag.targetTimePerBlock)) * time.Second
blockInTheFuture.Header.Timestamp = dag.Now().Add(deviationTolerance + expectedDelay)
delay, err = dag.checkBlockSanity(util.NewBlock(&blockInTheFuture), BFNoPoWCheck)
if err != nil {
t.Errorf("CheckBlockSanity: %v", err)
@@ -509,14 +515,14 @@ func TestPastMedianTime(t *testing.T) {
for i := 0; i < 100; i++ {
blockTime = blockTime.Add(time.Second)
tip = newTestNode(dag, setFromSlice(tip),
tip = newTestNode(dag, blockSetFromSlice(tip),
blockVersion,
0,
blockTime)
}
// Checks that a block is valid if it has timestamp equals to past median time
node := newTestNode(dag, setFromSlice(tip),
node := newTestNode(dag, blockSetFromSlice(tip),
blockVersion,
dag.powMaxBits,
tip.PastMedianTime(dag))
@@ -529,7 +535,7 @@ func TestPastMedianTime(t *testing.T) {
}
// Checks that a block is valid if its timestamp is after past median time
node = newTestNode(dag, setFromSlice(tip),
node = newTestNode(dag, blockSetFromSlice(tip),
blockVersion,
dag.powMaxBits,
tip.PastMedianTime(dag).Add(time.Second))
@@ -542,7 +548,7 @@ func TestPastMedianTime(t *testing.T) {
}
// Checks that a block is invalid if its timestamp is before past median time
node = newTestNode(dag, setFromSlice(tip),
node = newTestNode(dag, blockSetFromSlice(tip),
blockVersion,
0,
tip.PastMedianTime(dag).Add(-time.Second))
@@ -555,24 +561,23 @@ func TestPastMedianTime(t *testing.T) {
}
func TestValidateParents(t *testing.T) {
dag := newTestDAG(&dagconfig.SimnetParams)
genesisNode := dag.genesis
blockVersion := int32(0x10000000)
blockTime := genesisNode.Header().Timestamp
generateNode := func(parents ...*blockNode) *blockNode {
// The timestamp of each block is changed to prevent a situation where two blocks share the same hash
blockTime = blockTime.Add(time.Second)
return newTestNode(dag, setFromSlice(parents...),
blockVersion,
0,
blockTime)
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Errorf("Failed to setup dag instance: %v", err)
return
}
defer teardownFunc()
a := generateNode(genesisNode)
b := generateNode(a)
c := generateNode(genesisNode)
a := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
b := prepareAndProcessBlockByParentMsgBlocks(t, dag, a)
c := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
aNode := nodeByMsgBlock(t, dag, a)
bNode := nodeByMsgBlock(t, dag, b)
cNode := nodeByMsgBlock(t, dag, c)
fakeBlockHeader := &wire.BlockHeader{
HashMerkleRoot: &daghash.ZeroHash,
@@ -581,19 +586,19 @@ func TestValidateParents(t *testing.T) {
}
// Check direct parents relation
err := validateParents(fakeBlockHeader, setFromSlice(a, b))
err = dag.validateParents(fakeBlockHeader, blockSetFromSlice(aNode, bNode))
if err == nil {
t.Errorf("validateParents: `a` is a parent of `b`, so an error is expected")
}
// Check indirect parents relation
err = validateParents(fakeBlockHeader, setFromSlice(genesisNode, b))
err = dag.validateParents(fakeBlockHeader, blockSetFromSlice(dag.genesis, bNode))
if err == nil {
t.Errorf("validateParents: `genesis` and `b` are indirectly related, so an error is expected")
}
// Check parents with no relation
err = validateParents(fakeBlockHeader, setFromSlice(b, c))
err = dag.validateParents(fakeBlockHeader, blockSetFromSlice(bNode, cNode))
if err != nil {
t.Errorf("validateParents: unexpected error: %v", err)
}

View File

@@ -32,22 +32,13 @@ func newVirtualBlock(dag *BlockDAG, tips blockSet) *virtualBlock {
var virtual virtualBlock
virtual.dag = dag
virtual.utxoSet = NewFullUTXOSet()
virtual.selectedParentChainSet = newSet()
virtual.selectedParentChainSet = newBlockSet()
virtual.selectedParentChainSlice = nil
virtual.setTips(tips)
return &virtual
}
// clone creates and returns a clone of the virtual block.
func (v *virtualBlock) clone() *virtualBlock {
return &virtualBlock{
utxoSet: v.utxoSet,
blockNode: v.blockNode,
selectedParentChainSet: v.selectedParentChainSet,
}
}
// setTips replaces the tips of the virtual block with the blocks in the
// given blockSet. This only differs from the exported version in that it
// is up to the caller to ensure the lock is held.
@@ -122,8 +113,8 @@ func (v *virtualBlock) updateSelectedParentSet(oldSelectedParent *blockNode) *ch
// This function is safe for concurrent access.
func (v *virtualBlock) SetTips(tips blockSet) {
v.mtx.Lock()
defer v.mtx.Unlock()
v.setTips(tips)
v.mtx.Unlock()
}
// addTip adds the given tip to the set of tips in the virtual block.

View File

@@ -35,7 +35,7 @@ func TestVirtualBlock(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestVirtualBlock", Config{
dag, teardownFunc, err := DAGSetup("TestVirtualBlock", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -52,12 +52,12 @@ func TestVirtualBlock(t *testing.T) {
// \ X
// <- 4 <- 6
node0 := dag.genesis
node1 := buildNode(t, dag, setFromSlice(node0))
node2 := buildNode(t, dag, setFromSlice(node1))
node3 := buildNode(t, dag, setFromSlice(node0))
node4 := buildNode(t, dag, setFromSlice(node0))
node5 := buildNode(t, dag, setFromSlice(node3, node4))
node6 := buildNode(t, dag, setFromSlice(node3, node4))
node1 := buildNode(t, dag, blockSetFromSlice(node0))
node2 := buildNode(t, dag, blockSetFromSlice(node1))
node3 := buildNode(t, dag, blockSetFromSlice(node0))
node4 := buildNode(t, dag, blockSetFromSlice(node0))
node5 := buildNode(t, dag, blockSetFromSlice(node3, node4))
node6 := buildNode(t, dag, blockSetFromSlice(node3, node4))
// Given an empty VirtualBlock, each of the following test cases will:
// Set its tips to tipsToSet
@@ -75,29 +75,29 @@ func TestVirtualBlock(t *testing.T) {
name: "empty virtual",
tipsToSet: []*blockNode{},
tipsToAdd: []*blockNode{},
expectedTips: newSet(),
expectedTips: newBlockSet(),
expectedSelectedParent: nil,
},
{
name: "virtual with genesis tip",
tipsToSet: []*blockNode{node0},
tipsToAdd: []*blockNode{},
expectedTips: setFromSlice(node0),
expectedTips: blockSetFromSlice(node0),
expectedSelectedParent: node0,
},
{
name: "virtual with genesis tip, add child of genesis",
tipsToSet: []*blockNode{node0},
tipsToAdd: []*blockNode{node1},
expectedTips: setFromSlice(node1),
expectedTips: blockSetFromSlice(node1),
expectedSelectedParent: node1,
},
{
name: "empty virtual, add a full DAG",
tipsToSet: []*blockNode{},
tipsToAdd: []*blockNode{node0, node1, node2, node3, node4, node5, node6},
expectedTips: setFromSlice(node2, node5, node6),
expectedSelectedParent: node5,
expectedTips: blockSetFromSlice(node2, node5, node6),
expectedSelectedParent: node6,
},
}
@@ -106,7 +106,7 @@ func TestVirtualBlock(t *testing.T) {
virtual := newVirtualBlock(dag, nil)
// Set the tips. This will be the initial state
virtual.SetTips(setFromSlice(test.tipsToSet...))
virtual.SetTips(blockSetFromSlice(test.tipsToSet...))
// Add all blockNodes in tipsToAdd in order
for _, tipToAdd := range test.tipsToAdd {
@@ -134,7 +134,7 @@ func TestSelectedPath(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestSelectedPath", Config{
dag, teardownFunc, err := DAGSetup("TestSelectedPath", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -147,9 +147,9 @@ func TestSelectedPath(t *testing.T) {
tip := dag.genesis
virtual.AddTip(tip)
initialPath := setFromSlice(tip)
initialPath := blockSetFromSlice(tip)
for i := 0; i < 5; i++ {
tip = buildNode(t, dag, setFromSlice(tip))
tip = buildNode(t, dag, blockSetFromSlice(tip))
initialPath.add(tip)
virtual.AddTip(tip)
}
@@ -157,7 +157,7 @@ func TestSelectedPath(t *testing.T) {
firstPath := initialPath.clone()
for i := 0; i < 5; i++ {
tip = buildNode(t, dag, setFromSlice(tip))
tip = buildNode(t, dag, blockSetFromSlice(tip))
firstPath.add(tip)
virtual.AddTip(tip)
}
@@ -175,7 +175,7 @@ func TestSelectedPath(t *testing.T) {
secondPath := initialPath.clone()
tip = initialTip
for i := 0; i < 100; i++ {
tip = buildNode(t, dag, setFromSlice(tip))
tip = buildNode(t, dag, blockSetFromSlice(tip))
secondPath.add(tip)
virtual.AddTip(tip)
}
@@ -193,7 +193,7 @@ func TestSelectedPath(t *testing.T) {
tip = initialTip
for i := 0; i < 3; i++ {
tip = buildNode(t, dag, setFromSlice(tip))
tip = buildNode(t, dag, blockSetFromSlice(tip))
virtual.AddTip(tip)
}
// Because we added a very short chain, the selected path should not be affected.
@@ -215,14 +215,14 @@ func TestSelectedPath(t *testing.T) {
t.Fatalf("updateSelectedParentSet didn't panic")
}
}()
virtual2.updateSelectedParentSet(buildNode(t, dag, setFromSlice()))
virtual2.updateSelectedParentSet(buildNode(t, dag, blockSetFromSlice()))
}
func TestChainUpdates(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestChainUpdates", Config{
dag, teardownFunc, err := DAGSetup("TestChainUpdates", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -236,23 +236,23 @@ func TestChainUpdates(t *testing.T) {
var toBeRemovedNodes []*blockNode
toBeRemovedTip := genesis
for i := 0; i < 5; i++ {
toBeRemovedTip = buildNode(t, dag, setFromSlice(toBeRemovedTip))
toBeRemovedTip = buildNode(t, dag, blockSetFromSlice(toBeRemovedTip))
toBeRemovedNodes = append(toBeRemovedNodes, toBeRemovedTip)
}
// Create a VirtualBlock with the toBeRemoved chain
virtual := newVirtualBlock(dag, setFromSlice(toBeRemovedNodes...))
virtual := newVirtualBlock(dag, blockSetFromSlice(toBeRemovedNodes...))
// Create a chain to be added
var toBeAddedNodes []*blockNode
toBeAddedTip := genesis
for i := 0; i < 8; i++ {
toBeAddedTip = buildNode(t, dag, setFromSlice(toBeAddedTip))
toBeAddedTip = buildNode(t, dag, blockSetFromSlice(toBeAddedTip))
toBeAddedNodes = append(toBeAddedNodes, toBeAddedTip)
}
// Set the virtual tip to be the tip of the toBeAdded chain
chainUpdates := virtual.setTips(setFromSlice(toBeAddedTip))
chainUpdates := virtual.setTips(blockSetFromSlice(toBeAddedTip))
// Make sure that the removed blocks are as expected (in reverse order)
if len(chainUpdates.removedChainBlockHashes) != len(toBeRemovedNodes) {

View File

@@ -6,58 +6,24 @@ package main
import (
"os"
"path/filepath"
"runtime"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/limits"
"github.com/kaspanet/kaspad/logs"
"github.com/kaspanet/kaspad/util/panics"
)
const (
// blockDbNamePrefix is the prefix for the kaspad block database.
blockDbNamePrefix = "blocks"
// blockDBNamePrefix is the prefix for the kaspad block database.
blockDBNamePrefix = "blocks"
)
var (
cfg *ConfigFlags
log logs.Logger
log *logs.Logger
spawn func(func())
)
// loadBlockDB opens the block database and returns a handle to it.
func loadBlockDB() (database.DB, error) {
// The database name is based on the database type.
dbName := blockDbNamePrefix + "_" + cfg.DbType
dbPath := filepath.Join(cfg.DataDir, dbName)
log.Infof("Loading block database from '%s'", dbPath)
db, err := database.Open(cfg.DbType, dbPath, ActiveConfig().NetParams().Net)
if err != nil {
// Return the error if it's not because the database doesn't
// exist.
if dbErr, ok := err.(database.Error); !ok || dbErr.ErrorCode !=
database.ErrDbDoesNotExist {
return nil, err
}
// Create the db if it does not exist.
err = os.MkdirAll(cfg.DataDir, 0700)
if err != nil {
return nil, err
}
db, err = database.Create(cfg.DbType, dbPath, ActiveConfig().NetParams().Net)
if err != nil {
return nil, err
}
}
log.Info("Block database loaded")
return db, nil
}
// realMain is the real main function for the utility. It is necessary to work
// around the fact that deferred functions do not run when os.Exit() is called.
func realMain() error {
@@ -74,14 +40,6 @@ func realMain() error {
log = backendLogger.Logger("MAIN")
spawn = panics.GoroutineWrapperFunc(log)
// Load the block database.
db, err := loadBlockDB()
if err != nil {
log.Errorf("Failed to load database: %s", err)
return err
}
defer db.Close()
fi, err := os.Open(cfg.InFile)
if err != nil {
log.Errorf("Failed to open file %s: %s", cfg.InFile, err)
@@ -92,7 +50,7 @@ func realMain() error {
// Create a block importer for the database and input file and start it.
// The done channel returned from start will contain an error if
// anything went wrong.
importer, err := newBlockImporter(db, fi)
importer, err := newBlockImporter(fi)
if err != nil {
log.Errorf("Failed create block importer: %s", err)
return err

View File

@@ -6,20 +6,15 @@ package main
import (
"fmt"
flags "github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"os"
"path/filepath"
"strings"
flags "github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
)
const (
defaultDbType = "ffldb"
defaultDataFile = "bootstrap.dat"
defaultProgress = 10
)
@@ -27,7 +22,6 @@ const (
var (
kaspadHomeDir = util.AppDataDir("kaspad", false)
defaultDataDir = filepath.Join(kaspadHomeDir, "data")
knownDbTypes = database.SupportedDrivers()
activeConfig *ConfigFlags
)
@@ -40,12 +34,10 @@ func ActiveConfig() *ConfigFlags {
//
// See loadConfig for details on the configuration load process.
type ConfigFlags struct {
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
DbType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
TxIndex bool `long:"txindex" description:"Build a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
AddrIndex bool `long:"addrindex" description:"Build a full address-based transaction index which makes the searchrawtransactions RPC available"`
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainFromBlock RPC available"`
config.NetworkFlags
}
@@ -59,23 +51,11 @@ func fileExists(name string) bool {
return true
}
// validDbType returns whether or not dbType is a supported database type.
func validDbType(dbType string) bool {
for _, knownType := range knownDbTypes {
if dbType == knownType {
return true
}
}
return false
}
// loadConfig initializes and parses the config using command line options.
func loadConfig() (*ConfigFlags, []string, error) {
// Default config.
activeConfig = &ConfigFlags{
DataDir: defaultDataDir,
DbType: defaultDbType,
InFile: defaultDataFile,
Progress: defaultProgress,
}
@@ -84,7 +64,8 @@ func loadConfig() (*ConfigFlags, []string, error) {
parser := flags.NewParser(&activeConfig, flags.Default)
remainingArgs, err := parser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
parser.WriteHelp(os.Stderr)
}
return nil, nil, err
@@ -95,16 +76,6 @@ func loadConfig() (*ConfigFlags, []string, error) {
return nil, nil, err
}
// Validate database type.
if !validDbType(activeConfig.DbType) {
str := "%s: The specified database type [%s] is invalid -- " +
"supported types %s"
err := errors.Errorf(str, "loadConfig", activeConfig.DbType, strings.Join(knownDbTypes, ", "))
fmt.Fprintln(os.Stderr, err)
parser.WriteHelp(os.Stderr)
return nil, nil, err
}
// Append the network type to the data directory so it is "namespaced"
// per network. In addition to the block database, there are other
// pieces of data that are saved to disk such as address manager state.

View File

@@ -6,14 +6,13 @@ package main
import (
"encoding/binary"
"github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/pkg/errors"
"io"
"sync"
"time"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
)
@@ -28,7 +27,6 @@ type importResults struct {
// blockImporter houses information about an ongoing import from a block data
// file to the block database.
type blockImporter struct {
db database.DB
dag *blockdag.BlockDAG
r io.ReadSeeker
processQueue chan []byte
@@ -101,14 +99,14 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
// Skip blocks that already exist.
blockHash := block.Hash()
if bi.dag.HaveBlock(blockHash) {
if bi.dag.IsKnownBlock(blockHash) {
return false, nil
}
// Don't bother trying to process orphans.
parentHashes := block.MsgBlock().Header.ParentHashes
if len(parentHashes) > 0 {
if !bi.dag.HaveBlocks(parentHashes) {
if !bi.dag.AreKnownBlocks(parentHashes) {
return false, errors.Errorf("import file contains block "+
"%v which does not link to the available "+
"block DAG", parentHashes)
@@ -287,29 +285,12 @@ func (bi *blockImporter) Import() chan *importResults {
// newBlockImporter returns a new importer for the provided file reader seeker
// and database.
func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
// Create the transaction and address indexes if needed.
//
// CAUTION: the txindex needs to be first in the indexes array because
// the addrindex uses data from the txindex during catchup. If the
// addrindex is run first, it may not have the transactions from the
// current block indexed.
func newBlockImporter(r io.ReadSeeker) (*blockImporter, error) {
// Create the acceptance index if needed.
var indexes []indexers.Indexer
if cfg.TxIndex || cfg.AddrIndex {
// Enable transaction index if address index is enabled since it
// requires it.
if !cfg.TxIndex {
log.Infof("Transaction index enabled because it is " +
"required by the address index")
cfg.TxIndex = true
} else {
log.Info("Transaction index is enabled")
}
indexes = append(indexes, indexers.NewTxIndex())
}
if cfg.AddrIndex {
log.Info("Address index is enabled")
indexes = append(indexes, indexers.NewAddrIndex(ActiveConfig().NetParams()))
if cfg.AcceptanceIndex {
log.Info("Acceptance index is enabled")
indexes = append(indexes, indexers.NewAcceptanceIndex())
}
// Create an index manager if any of the optional indexes are enabled.
@@ -319,9 +300,8 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
}
dag, err := blockdag.New(&blockdag.Config{
DB: db,
DAGParams: ActiveConfig().NetParams(),
TimeSource: blockdag.NewMedianTime(),
TimeSource: blockdag.NewTimeSource(),
IndexManager: indexManager,
})
if err != nil {
@@ -329,7 +309,6 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
}
return &blockImporter{
db: db,
r: r,
processQueue: make(chan []byte, 2),
doneChan: make(chan bool),

View File

@@ -1,87 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"time"
)
const (
getSubnetworkRetryDelay = 5 * time.Second
maxGetSubnetworkRetries = 12
)
func main() {
cfg, err := parseConfig()
if err != nil {
panic(errors.Errorf("error parsing command-line arguments: %s", err))
}
privateKey, addrPubKeyHash, err := decodeKeys(cfg)
if err != nil {
panic(errors.Errorf("error decoding public key: %s", err))
}
client, err := connect(cfg)
if err != nil {
panic(errors.Errorf("could not connect to RPC server: %s", err))
}
log.Infof("Connected to server %s", cfg.RPCServer)
fundingOutpoint, fundingTx, err := findUnspentTXO(cfg, client, addrPubKeyHash)
if err != nil {
panic(errors.Errorf("error finding unspent transactions: %s", err))
}
if fundingOutpoint == nil || fundingTx == nil {
panic(errors.Errorf("could not find any unspent transactions this for key"))
}
log.Infof("Found transaction to spend: %s:%d", fundingOutpoint.TxID, fundingOutpoint.Index)
registryTx, err := buildSubnetworkRegistryTx(cfg, fundingOutpoint, fundingTx, privateKey)
if err != nil {
panic(errors.Errorf("error building subnetwork registry tx: %s", err))
}
_, err = client.SendRawTransaction(registryTx, true)
if err != nil {
panic(errors.Errorf("failed sending subnetwork registry tx: %s", err))
}
log.Infof("Successfully sent subnetwork registry transaction")
subnetworkID, err := blockdag.TxToSubnetworkID(registryTx)
if err != nil {
panic(errors.Errorf("could not build subnetwork ID: %s", err))
}
err = waitForSubnetworkToBecomeAccepted(client, subnetworkID)
if err != nil {
panic(errors.Errorf("error waiting for subnetwork to become accepted: %s", err))
}
log.Infof("Subnetwork '%s' was successfully registered.", subnetworkID)
}
func waitForSubnetworkToBecomeAccepted(client *rpcclient.Client, subnetworkID *subnetworkid.SubnetworkID) error {
retries := 0
for {
_, err := client.GetSubnetwork(subnetworkID.String())
if err != nil {
if rpcError, ok := err.(*rpcmodel.RPCError); ok && rpcError.Code == rpcmodel.ErrRPCSubnetworkNotFound {
log.Infof("Subnetwork not found")
retries++
if retries == maxGetSubnetworkRetries {
return errors.Errorf("failed to get subnetwork %d times: %s", maxGetSubnetworkRetries, err)
}
log.Infof("Waiting %d seconds...", int(getSubnetworkRetryDelay.Seconds()))
<-time.After(getSubnetworkRetryDelay)
continue
}
return errors.Errorf("failed getting subnetwork: %s", err)
}
return nil
}
}

View File

@@ -1,71 +0,0 @@
package main
import (
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/config"
"github.com/pkg/errors"
)
var activeConfig *ConfigFlags
// ActiveConfig returns the active configuration struct
func ActiveConfig() *ConfigFlags {
return activeConfig
}
// ConfigFlags holds the configurations set by the command line argument
type ConfigFlags struct {
PrivateKey string `short:"k" long:"private-key" description:"Private key" required:"true"`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username" required:"true"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password" required:"true"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to" required:"true"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
GasLimit uint64 `long:"gaslimit" description:"The gas limit of the new subnetwork"`
RegistryTxFee uint64 `long:"regtxfee" description:"The fee for the subnetwork registry transaction"`
config.NetworkFlags
}
const (
defaultSubnetworkGasLimit = 1000
defaultRegistryTxFee = 3000
)
func parseConfig() (*ConfigFlags, error) {
activeConfig = &ConfigFlags{}
parser := flags.NewParser(activeConfig, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return nil, err
}
if activeConfig.RPCCert == "" && !activeConfig.DisableTLS {
return nil, errors.New("--notls has to be disabled if --cert is used")
}
if activeConfig.RPCCert != "" && activeConfig.DisableTLS {
return nil, errors.New("--cert should be omitted if --notls is used")
}
err = activeConfig.ResolveNetwork(parser)
if err != nil {
return nil, err
}
if activeConfig.GasLimit < 0 {
return nil, errors.Errorf("gaslimit may not be smaller than 0")
}
if activeConfig.GasLimit == 0 {
activeConfig.GasLimit = defaultSubnetworkGasLimit
}
if activeConfig.RegistryTxFee < 0 {
return nil, errors.Errorf("regtxfee may not be smaller than 0")
}
if activeConfig.RegistryTxFee == 0 {
activeConfig.RegistryTxFee = defaultRegistryTxFee
}
return activeConfig, nil
}

View File

@@ -1,37 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/rpcclient"
"github.com/pkg/errors"
"io/ioutil"
)
func connect(cfg *ConfigFlags) (*rpcclient.Client, error) {
var cert []byte
if !cfg.DisableTLS {
var err error
cert, err = ioutil.ReadFile(cfg.RPCCert)
if err != nil {
return nil, errors.Errorf("error reading certificates file: %s", err)
}
}
connCfg := &rpcclient.ConnConfig{
Host: cfg.RPCServer,
Endpoint: "ws",
User: cfg.RPCUser,
Pass: cfg.RPCPassword,
DisableTLS: cfg.DisableTLS,
}
if !cfg.DisableTLS {
connCfg.Certificates = cert
}
client, err := rpcclient.New(connCfg, nil)
if err != nil {
return nil, errors.Errorf("error connecting to address %s: %s", cfg.RPCServer, err)
}
return client, nil
}

View File

@@ -1,19 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/base58"
)
func decodeKeys(cfg *ConfigFlags) (*ecc.PrivateKey, *util.AddressPubKeyHash, error) {
privateKeyBytes := base58.Decode(cfg.PrivateKey)
privateKey, _ := ecc.PrivKeyFromBytes(ecc.S256(), privateKeyBytes)
serializedPrivateKey := privateKey.PubKey().SerializeCompressed()
addr, err := util.NewAddressPubKeyHashFromPublicKey(serializedPrivateKey, ActiveConfig().NetParams().Prefix)
if err != nil {
return nil, nil, err
}
return privateKey, addr, nil
}

View File

@@ -1,10 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/logs"
)
var (
backendLog = logs.NewBackend()
log = backendLog.Logger("ASUB")
)

View File

@@ -1,29 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
func buildSubnetworkRegistryTx(cfg *ConfigFlags, fundingOutpoint *wire.Outpoint, fundingTx *wire.MsgTx, privateKey *ecc.PrivateKey) (*wire.MsgTx, error) {
txIn := &wire.TxIn{
PreviousOutpoint: *fundingOutpoint,
Sequence: wire.MaxTxInSequenceNum,
}
txOut := &wire.TxOut{
ScriptPubKey: fundingTx.TxOut[fundingOutpoint.Index].ScriptPubKey,
Value: fundingTx.TxOut[fundingOutpoint.Index].Value - cfg.RegistryTxFee,
}
registryTx := wire.NewRegistryMsgTx(1, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}, cfg.GasLimit)
SignatureScript, err := txscript.SignatureScript(registryTx, 0, fundingTx.TxOut[fundingOutpoint.Index].ScriptPubKey,
txscript.SigHashAll, privateKey, true)
if err != nil {
return nil, errors.Errorf("failed to build signature script: %s", err)
}
txIn.SignatureScript = SignatureScript
return registryTx, nil
}

View File

@@ -1,112 +0,0 @@
package main
import (
"bytes"
"encoding/hex"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
const (
resultsCount = 1000
minConfirmations = 10
)
func findUnspentTXO(cfg *ConfigFlags, client *rpcclient.Client, addrPubKeyHash *util.AddressPubKeyHash) (*wire.Outpoint, *wire.MsgTx, error) {
txs, err := collectTransactions(client, addrPubKeyHash)
if err != nil {
return nil, nil, err
}
utxos := buildUTXOs(txs)
for outpoint, tx := range utxos {
// Skip TXOs that can't pay for registration
if tx.TxOut[outpoint.Index].Value < cfg.RegistryTxFee {
continue
}
return &outpoint, tx, nil
}
return nil, nil, nil
}
func collectTransactions(client *rpcclient.Client, addrPubKeyHash *util.AddressPubKeyHash) ([]*wire.MsgTx, error) {
txs := make([]*wire.MsgTx, 0)
skip := 0
for {
results, err := client.SearchRawTransactionsVerbose(addrPubKeyHash, skip, resultsCount, true, false, nil)
if err != nil {
// Break when there are no further txs
if rpcError, ok := err.(*rpcmodel.RPCError); ok && rpcError.Code == rpcmodel.ErrRPCNoTxInfo {
break
}
return nil, err
}
for _, result := range results {
// Mempool transactions bring about unnecessary complexity, so
// simply don't bother processing them
if result.IsInMempool {
continue
}
tx, err := parseRawTransactionResult(result)
if err != nil {
return nil, errors.Errorf("failed to process SearchRawTransactionResult: %s", err)
}
if tx == nil {
continue
}
if !isTxMatured(tx, *result.Confirmations) {
continue
}
txs = append(txs, tx)
}
skip += resultsCount
}
return txs, nil
}
func parseRawTransactionResult(result *rpcmodel.SearchRawTransactionsResult) (*wire.MsgTx, error) {
txBytes, err := hex.DecodeString(result.Hex)
if err != nil {
return nil, errors.Errorf("failed to decode transaction bytes: %s", err)
}
var tx wire.MsgTx
reader := bytes.NewReader(txBytes)
err = tx.Deserialize(reader)
if err != nil {
return nil, errors.Errorf("failed to deserialize transaction: %s", err)
}
return &tx, nil
}
func isTxMatured(tx *wire.MsgTx, confirmations uint64) bool {
if !tx.IsCoinBase() {
return confirmations >= minConfirmations
}
return confirmations >= ActiveConfig().NetParams().BlockCoinbaseMaturity
}
func buildUTXOs(txs []*wire.MsgTx) map[wire.Outpoint]*wire.MsgTx {
utxos := make(map[wire.Outpoint]*wire.MsgTx)
for _, tx := range txs {
for i := range tx.TxOut {
outpoint := wire.NewOutpoint(tx.TxID(), uint32(i))
utxos[*outpoint] = tx
}
}
for _, tx := range txs {
for _, input := range tx.TxIn {
delete(utxos, input.PreviousOutpoint)
}
}
return utxos
}

View File

@@ -6,6 +6,7 @@ package main
import (
"fmt"
"github.com/pkg/errors"
"io/ioutil"
"os"
"path/filepath"
@@ -32,7 +33,8 @@ func main() {
parser := flags.NewParser(&cfg, flags.Default)
_, err := parser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
parser.WriteHelp(os.Stderr)
}
return

View File

@@ -7,9 +7,9 @@ package main
import (
"fmt"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/version"
"github.com/pkg/errors"
"io/ioutil"
"net"
"os"
"path/filepath"
"regexp"
@@ -36,11 +36,6 @@ var (
activeConfig *ConfigFlags
)
// ActiveConfig returns the active configuration struct
func ActiveConfig() *ConfigFlags {
return activeConfig
}
// listCommands categorizes and lists all of the usable commands along with
// their one-line usage.
func listCommands() {
@@ -108,28 +103,6 @@ type ConfigFlags struct {
config.NetworkFlags
}
// normalizeAddress returns addr with the passed default port appended if
// there is not already a port specified.
func normalizeAddress(addr string, useTestnet, useSimnet, useDevnet bool) string {
_, _, err := net.SplitHostPort(addr)
if err != nil {
var defaultPort string
switch {
case useDevnet:
defaultPort = "16610"
case useTestnet:
defaultPort = "16210"
case useSimnet:
defaultPort = "16510"
default:
defaultPort = "16110"
}
return net.JoinHostPort(addr, defaultPort)
}
return addr
}
// cleanAndExpandPath expands environement variables and leading ~ in the
// passed path, cleans the result, and returns it.
func cleanAndExpandPath(path string) string {
@@ -172,7 +145,8 @@ func loadConfig() (*ConfigFlags, []string, error) {
preParser := flags.NewParser(preCfg, flags.HelpFlag)
_, err := preParser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); ok && flagsErr.Type == flags.ErrHelp {
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, "")
fmt.Fprintln(os.Stderr, "The special parameter `-` "+
@@ -188,7 +162,7 @@ func loadConfig() (*ConfigFlags, []string, error) {
appName = strings.TrimSuffix(appName, filepath.Ext(appName))
usageMessage := fmt.Sprintf("Use %s -h to show options", appName)
if preCfg.ShowVersion {
fmt.Println(appName, "version", version())
fmt.Println(appName, "version", version.Version())
os.Exit(0)
}
@@ -216,7 +190,7 @@ func loadConfig() (*ConfigFlags, []string, error) {
parser := flags.NewParser(activeConfig, flags.Default)
err = flags.NewIniParser(parser).ParseFile(preCfg.ConfigFile)
if err != nil {
if _, ok := err.(*os.PathError); !ok {
if pErr := &(os.PathError{}); !errors.As(err, &pErr) {
fmt.Fprintf(os.Stderr, "Error parsing config file: %s\n",
err)
fmt.Fprintln(os.Stderr, usageMessage)
@@ -227,7 +201,8 @@ func loadConfig() (*ConfigFlags, []string, error) {
// Parse command line options again to ensure they take precedence.
remainingArgs, err := parser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
fmt.Fprintln(os.Stderr, usageMessage)
}
return nil, nil, err
@@ -242,8 +217,10 @@ func loadConfig() (*ConfigFlags, []string, error) {
// Add default port to RPC server based on --testnet and --simnet flags
// if needed.
activeConfig.RPCServer = normalizeAddress(activeConfig.RPCServer, activeConfig.Testnet,
activeConfig.Simnet, activeConfig.Devnet)
activeConfig.RPCServer, err = activeConfig.NetParams().NormalizeRPCServerAddress(activeConfig.RPCServer)
if err != nil {
return nil, nil, err
}
return activeConfig, remainingArgs, nil
}

View File

@@ -95,11 +95,12 @@ func sendPostRequest(marshalledJSON []byte, cfg *ConfigFlags) ([]byte, error) {
}
// Read the raw bytes and close the response.
respBytes, err := ioutil.ReadAll(httpResponse.Body)
httpResponse.Body.Close()
respBytes, err := func() ([]byte, error) {
defer httpResponse.Body.Close()
return ioutil.ReadAll(httpResponse.Body)
}()
if err != nil {
err = errors.Errorf("error reading json reply: %s", err)
return nil, err
return nil, errors.Wrap(err, "error reading json reply")
}
// Handle unsuccessful HTTP responses

View File

@@ -5,6 +5,7 @@ import (
"bytes"
"encoding/json"
"fmt"
"github.com/pkg/errors"
"io"
"os"
"path/filepath"
@@ -110,9 +111,10 @@ func main() {
// rpcmodel.Error as it reallistcally will always be since the
// NewCommand function is only supposed to return errors of that
// type.
if jerr, ok := err.(rpcmodel.Error); ok {
var rpcModelErr rpcmodel.Error
if ok := errors.As(err, &rpcModelErr); ok {
fmt.Fprintf(os.Stderr, "%s error: %s (command code: %s)\n",
method, err, jerr.ErrorCode)
method, err, rpcModelErr.ErrorCode)
commandUsage(method)
os.Exit(1)
}

View File

@@ -1,75 +0,0 @@
// Copyright (c) 2013 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"fmt"
"strings"
)
// semanticAlphabet
const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
// These constants define the application version and follow the semantic
// versioning 2.0.0 spec (http://semver.org/).
const (
appMajor uint = 0
appMinor uint = 12
appPatch uint = 0
// appPreRelease MUST only contain characters from semanticAlphabet
// per the semantic versioning spec.
appPreRelease = "beta"
)
// appBuild is defined as a variable so it can be overridden during the build
// process with '-ldflags "-X main.appBuild foo' if needed. It MUST only
// contain characters from semanticAlphabet per the semantic versioning spec.
var appBuild string
// version returns the application version as a properly formed string per the
// semantic versioning 2.0.0 spec (http://semver.org/).
func version() string {
// Start with the major, minor, and patch versions.
version := fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch)
// Append pre-release version if there is one. The hyphen called for
// by the semantic versioning spec is automatically appended and should
// not be contained in the pre-release string. The pre-release version
// is not appended if it contains invalid characters.
preRelease := normalizeVerString(appPreRelease)
if preRelease != "" {
version = fmt.Sprintf("%s-%s", version, preRelease)
}
// Append build metadata if there is any. The plus called for
// by the semantic versioning spec is automatically appended and should
// not be contained in the build metadata string. The build metadata
// string is not appended if it contains invalid characters.
build := normalizeVerString(appBuild)
if build != "" {
version = fmt.Sprintf("%s+%s", version, build)
}
return version
}
// normalizeVerString returns the passed string stripped of all characters which
// are not valid according to the semantic versioning guidelines for pre-release
// version and build metadata strings. In particular they MUST only contain
// characters in semanticAlphabet.
func normalizeVerString(str string) string {
var result bytes.Buffer
for _, r := range str {
if strings.ContainsRune(semanticAlphabet, r) {
// Ignoring the error here since it can only fail if
// the the system is out of memory and there are much
// bigger issues at that point.
_, _ = result.WriteRune(r)
}
}
return result.String()
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"io/ioutil"
"net"
"time"
)
@@ -43,8 +42,13 @@ func connectToServer(cfg *configFlags) (*minerClient, error) {
return nil, err
}
rpcAddr, err := cfg.NetParams().NormalizeRPCServerAddress(cfg.RPCServer)
if err != nil {
return nil, err
}
connCfg := &rpcclient.ConnConfig{
Host: normalizeRPCServerAddress(cfg.RPCServer, cfg),
Host: rpcAddr,
Endpoint: "ws",
User: cfg.RPCUser,
Pass: cfg.RPCPassword,
@@ -63,16 +67,6 @@ func connectToServer(cfg *configFlags) (*minerClient, error) {
return client, nil
}
// normalizeRPCServerAddress returns addr with the current network default
// port appended if there is not already a port specified.
func normalizeRPCServerAddress(addr string, cfg *configFlags) string {
_, _, err := net.SplitHostPort(addr)
if err != nil {
return net.JoinHostPort(addr, cfg.NetParams().RPCPort)
}
return addr
}
func readCert(cfg *configFlags) ([]byte, error) {
if cfg.DisableTLS {
return nil, nil

View File

@@ -2,16 +2,18 @@ package main
import (
"fmt"
"github.com/kaspanet/kaspad/config"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/cmd/kaspaminer/version"
"github.com/kaspanet/kaspad/version"
)
const (
@@ -28,15 +30,17 @@ var (
)
type configFlags struct {
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."`
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."`
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
config.NetworkFlags
}
@@ -72,12 +76,19 @@ func parseConfig() (*configFlags, error) {
}
if cfg.RPCCert == "" && !cfg.DisableTLS {
return nil, errors.New("--notls has to be disabled if --cert is used")
return nil, errors.New("either --notls or --rpccert must be specified")
}
if cfg.RPCCert != "" && cfg.DisableTLS {
return nil, errors.New("--rpccert should be omitted if --notls is used")
}
if cfg.Profile != "" {
profilePort, err := strconv.Atoi(cfg.Profile)
if err != nil || profilePort < 1024 || profilePort > 65535 {
return nil, errors.New("The profile port must be between 1024 and 65535")
}
}
initLog(defaultLogFile, defaultErrLogFile)
return cfg, nil

View File

@@ -1,5 +1,5 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
FROM golang:1.14-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
@@ -20,7 +20,7 @@ WORKDIR /go/src/github.com/kaspanet/kaspad/cmd/kaspaminer
RUN GOFMT_RESULT=`go fmt ./...`; echo $GOFMT_RESULT; test -z "$GOFMT_RESULT"
RUN go vet ./...
RUN golint -set_exit_status ./...
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o kaspaminer .
RUN GOOS=linux go build -a -installsuffix cgo -o kaspaminer .
# --- multistage docker build: stage #2: runtime image
FROM alpine

View File

@@ -28,7 +28,5 @@ func initLog(logFile, errLogFile string) {
}
func enableRPCLogging() {
rpclog := backendLog.Logger("RPCC")
rpclog.SetLevel(logs.LevelTrace)
rpcclient.UseLogger(rpclog)
rpcclient.UseLogger(backendLog, logs.LevelTrace)
}

View File

@@ -4,15 +4,19 @@ import (
"fmt"
"os"
"github.com/kaspanet/kaspad/version"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/cmd/kaspaminer/version"
_ "net/http/pprof"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/util/profiling"
)
func main() {
defer panics.HandlePanic(log, nil, nil)
defer panics.HandlePanic(log, nil)
interrupt := signal.InterruptListener()
cfg, err := parseConfig()
@@ -28,6 +32,11 @@ func main() {
enableRPCLogging()
}
// Enable http profiling server if requested.
if cfg.Profile != "" {
profiling.Start(cfg.Profile, log)
}
client, err := connectToServer(cfg)
if err != nil {
panic(errors.Wrap(err, "Error connecting to the RPC server"))
@@ -36,7 +45,7 @@ func main() {
doneChan := make(chan struct{})
spawn(func() {
err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay)
err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay, cfg.MineWhenNotSynced)
if err != nil {
panic(errors.Errorf("Error in mine loop: %s", err))
}

View File

@@ -7,6 +7,7 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/kaspanet/kaspad/rpcclient"
@@ -20,8 +21,11 @@ import (
)
var random = rand.New(rand.NewSource(time.Now().UnixNano()))
var hashesTried uint64
func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) error {
const logHashRateInterval = 10 * time.Second
func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64, mineWhenNotSynced bool) error {
errChan := make(chan error)
templateStopChan := make(chan struct{})
@@ -31,7 +35,7 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) err
wg := sync.WaitGroup{}
for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ {
foundBlock := make(chan *util.Block)
mineNextBlock(client, foundBlock, templateStopChan, errChan)
mineNextBlock(client, foundBlock, mineWhenNotSynced, templateStopChan, errChan)
block := <-foundBlock
templateStopChan <- struct{}{}
wg.Add(1)
@@ -50,6 +54,8 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) err
doneChan <- struct{}{}
})
logHashRate()
select {
case err := <-errChan:
return err
@@ -58,10 +64,32 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) err
}
}
func mineNextBlock(client *minerClient, foundBlock chan *util.Block, templateStopChan chan struct{}, errChan chan error) {
func logHashRate() {
spawn(func() {
lastCheck := time.Now()
for range time.Tick(logHashRateInterval) {
currentHashesTried := hashesTried
currentTime := time.Now()
kiloHashesTried := float64(currentHashesTried) / 1000.0
hashRate := kiloHashesTried / currentTime.Sub(lastCheck).Seconds()
log.Infof("Current hash rate is %.2f Khash/s", hashRate)
lastCheck = currentTime
// subtract from hashesTried the hashes we already sampled
atomic.AddUint64(&hashesTried, -currentHashesTried)
}
})
}
func mineNextBlock(client *minerClient, foundBlock chan *util.Block, mineWhenNotSynced bool,
templateStopChan chan struct{}, errChan chan error) {
newTemplateChan := make(chan *rpcmodel.GetBlockTemplateResult)
go templatesLoop(client, newTemplateChan, errChan, templateStopChan)
go solveLoop(newTemplateChan, foundBlock, errChan)
spawn(func() {
templatesLoop(client, newTemplateChan, errChan, templateStopChan)
})
spawn(func() {
solveLoop(newTemplateChan, foundBlock, mineWhenNotSynced, errChan)
})
}
func handleFoundBlock(client *minerClient, block *util.Block) error {
@@ -131,6 +159,7 @@ func solveBlock(block *util.Block, stopChan chan struct{}, foundBlock chan *util
default:
msgBlock.Header.Nonce = i
hash := msgBlock.BlockHash()
atomic.AddUint64(&hashesTried, 1)
if daghash.HashToBig(hash).Cmp(targetDifficulty) <= 0 {
foundBlock <- block
return
@@ -180,12 +209,23 @@ func getBlockTemplate(client *minerClient, longPollID string) (*rpcmodel.GetBloc
return client.GetBlockTemplate([]string{"coinbasetxn"}, longPollID)
}
func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block, errChan chan error) {
func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block,
mineWhenNotSynced bool, errChan chan error) {
var stopOldTemplateSolving chan struct{}
for template := range newTemplateChan {
if stopOldTemplateSolving != nil {
close(stopOldTemplateSolving)
}
if !template.IsSynced {
if !mineWhenNotSynced {
errChan <- errors.Errorf("got template with isSynced=false")
return
}
log.Warnf("Got template with isSynced=false")
}
stopOldTemplateSolving = make(chan struct{})
block, err := parseBlock(template)
if err != nil {
@@ -193,7 +233,9 @@ func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock
return
}
go solveBlock(block, stopOldTemplateSolving, foundBlock)
spawn(func() {
solveBlock(block, stopOldTemplateSolving, foundBlock)
})
}
if stopOldTemplateSolving != nil {
close(stopOldTemplateSolving)

View File

@@ -1,50 +0,0 @@
package version
import (
"fmt"
"strings"
)
// validCharacters is a list of characters valid in the appBuild string
const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
const (
appMajor uint = 0
appMinor uint = 1
appPatch uint = 0
)
// appBuild is defined as a variable so it can be overridden during the build
// process with '-ldflags "-X github.com/kaspanet/kaspad/cmd/kaspaminer/version.appBuild=foo"' if needed.
// It MUST only contain characters from validCharacters.
var appBuild string
var version = "" // string used for memoization of version
// Version returns the application version as a properly formed string
func Version() string {
if version == "" {
// Start with the major, minor, and patch versions.
version = fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch)
// Append build metadata if there is any.
// Panic if any invalid characters are encountered
if appBuild != "" {
checkAppBuild(appBuild)
version = fmt.Sprintf("%s-%s", version, appBuild)
}
}
return version
}
// checkAppBuild verifies that appBuild does not contain any characters outside of validCharacters.
// In case of any invalid characters checkAppBuild panics
func checkAppBuild(appBuild string) {
for _, r := range appBuild {
if !strings.ContainsRune(validCharacters, r) {
panic(fmt.Errorf("appBuild string (%s) contains forbidden characters. Only alphanumeric characters and dashes are allowed", appBuild))
}
}
}

View File

@@ -4,10 +4,11 @@ import (
"bytes"
"encoding/hex"
"fmt"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"os"
)
@@ -27,7 +28,11 @@ func main() {
printErrorAndExit(err, "Failed to decode transaction")
}
scriptPubKey, err := createScriptPubKey(privateKey.PubKey())
pubkey, err := privateKey.SchnorrPublicKey()
if err != nil {
printErrorAndExit(err, "Failed to generate a public key")
}
scriptPubKey, err := createScriptPubKey(pubkey)
if err != nil {
printErrorAndExit(err, "Failed to create scriptPubKey")
}
@@ -45,26 +50,38 @@ func main() {
fmt.Printf("Signed Transaction (hex): %s\n\n", serializedTransaction)
}
func parsePrivateKey(privateKeyHex string) (*ecc.PrivateKey, error) {
func parsePrivateKey(privateKeyHex string) (*secp256k1.PrivateKey, error) {
privateKeyBytes, err := hex.DecodeString(privateKeyHex)
privateKey, _ := ecc.PrivKeyFromBytes(ecc.S256(), privateKeyBytes)
return privateKey, err
if err != nil {
return nil, errors.Errorf("'%s' isn't a valid hex. err: '%s' ", privateKeyHex, err)
}
return secp256k1.DeserializePrivateKeyFromSlice(privateKeyBytes)
}
func parseTransaction(transactionHex string) (*wire.MsgTx, error) {
serializedTx, err := hex.DecodeString(transactionHex)
if err != nil {
return nil, errors.Wrap(err, "couldn't decode transaction hex")
}
var transaction wire.MsgTx
err = transaction.Deserialize(bytes.NewReader(serializedTx))
return &transaction, err
}
func createScriptPubKey(publicKey *ecc.PublicKey) ([]byte, error) {
p2pkhAddress, err := util.NewAddressPubKeyHashFromPublicKey(publicKey.SerializeCompressed(), ActiveConfig().NetParams().Prefix)
func createScriptPubKey(publicKey *secp256k1.SchnorrPublicKey) ([]byte, error) {
serializedKey, err := publicKey.SerializeCompressed()
if err != nil {
return nil, err
}
p2pkhAddress, err := util.NewAddressPubKeyHashFromPublicKey(serializedKey, ActiveConfig().NetParams().Prefix)
if err != nil {
return nil, err
}
scriptPubKey, err := txscript.PayToAddrScript(p2pkhAddress)
return scriptPubKey, err
}
func signTransaction(transaction *wire.MsgTx, privateKey *ecc.PrivateKey, scriptPubKey []byte) error {
func signTransaction(transaction *wire.MsgTx, privateKey *secp256k1.PrivateKey, scriptPubKey []byte) error {
for i, transactionInput := range transaction.TxIn {
signatureScript, err := txscript.SignatureScript(transaction, i, scriptPubKey, txscript.SigHashAll, privateKey, true)
if err != nil {

Some files were not shown because too many files have changed in this diff Show More