Compare commits

...

72 Commits

Author SHA1 Message Date
Svarog
1e6458973b [NOD-1064] Don't send GetBlockInvsMsg with lowHash = nil (#769) 2020-06-21 09:09:07 +03:00
stasatdaglabs
03cb6cbd4d [NOD-1048] Use a smaller writeBuffer and use disableSeeksCompaction directly. (#759) 2020-06-11 16:11:22 +03:00
stasatdaglabs
3e5a840c5a [NOD-1052] Add a lock around clearOldEntries to protect against concurrent access of utxoDiffStore.loaded. (#758) 2020-06-11 11:56:25 +03:00
Ori Newman
d6d34238d2 [NOD-1049] Allow empty addr messages (#753) 2020-06-10 16:13:13 +03:00
Ori Newman
8bbced5925 [NOD-1051] Don't disconnect from sync peer if it sends an orphan (#757) 2020-06-10 16:05:48 +03:00
stasatdaglabs
20da1b9c9a [NOD-1048] Make leveldb compaction much less frequent (#756)
* [NOD-1048] Make leveldb compaction much less frequent. Also, allocate an entire gigabyte for leveldb's blockCache and writeBuffer.

* [NOD-1048] Implement changing the options for testing purposes.

* [NOD-1048] Rename originalOptions to originalLDBOptions.

* [NOD-1048] Add a comment.
2020-06-10 16:05:02 +03:00
stasatdaglabs
222477b33e [NOD-1040] Don't remove DAG tips from the diffStore's loaded set (#750)
* [NOD-1040] Don't remove DAG tips from the diffStore's loaded set

* [NOD-1040] Remove a debug log.
2020-06-08 12:14:58 +03:00
Mike Zak
4a50d94633 Update to v0.4.1 2020-06-07 17:54:30 +03:00
stasatdaglabs
b4dba782fb [NOD-1040] Increase maxBlueScoreDifferenceToKeepLoaded to 1500 (#746)
* [NOD-1040] Don't remove DAG tips from the diffStore's loaded set

* [NOD-1040] Fix TestClearOldEntries.

* Revert "[NOD-1040] Fix TestClearOldEntries."

This reverts commit e0705814

* Revert "[NOD-1040] Don't remove DAG tips from the diffStore's loaded set"

This reverts commit d3eba1c1

* [NOD-1040] Increase maxBlueScoreDifferenceToKeepLoaded to 1500.
2020-06-07 17:50:57 +03:00
stasatdaglabs
9c78a797e4 [NOD-1041] Call outboundPeerConnected and outboundPeerConnectionFailed directly instead of routing them through peerHandler (#748)
* [NOD-1041] Fix a deadlock between connHandler and peerHandler.

* [NOD-1041] Simplified the fix.
2020-06-07 16:35:48 +03:00
Ori Newman
35c733a4c1 [NOD-970] Add isSyncing flag (#747)
* [NOD-970] Add isSyncing flag

* [NOD-970] Rename shouldSendSelectedTip->peerShouldSendSelectedTip
2020-06-07 16:31:17 +03:00
stasatdaglabs
96930bd6ea [NOD-1039] Remove the call to SetGCPercent. (#745) 2020-06-07 09:19:28 +03:00
stasatdaglabs
d15c009b3c [NOD-1030] Disconnect from syncPeers that send orphan blocks (#744)
* [NOD-1030] Disconnect from syncPeers that send orphan blocks.

* [NOD-1030] Remove debug log.

* [NOD-1030] Remove unnecessary call to stopSyncFromPeer.
2020-06-04 15:11:05 +03:00
Ori Newman
6219b93430 [NOD-1018] Exit after 2 minutes if graceful shutdown fails (#732)
* [NOD-1018] Exit after 2 minutes if graceful shutdown fails

* [NOD-1018] Change time.Tick to time.After
2020-05-25 14:30:43 +03:00
Ori Newman
6463a4b5d0 [NOD-1011] Don't cache isSynced on getBlockTemplate (#728) 2020-05-20 14:38:24 +03:00
Svarog
0ca127853d [NOD-974] UTXO-Commitments shouldn't include the new block's transactions (#727)
* [NOD-975] Don't include block transactions inside its UTXO commitment (#711)

* [NOD-975] Don't include block transactions inside its UTXO commitment.

* Revert "[NOD-975] Don't include block transactions inside its UTXO commitment."

This reverts commit b1a2ae66

* [NOD-975] Implement a (currently failing) TestUTXOCommitment.

* [NOD-975] Remove the block's own transactions from calcMultiset.

* [NOD-975] Simplify calcMultiset.

* [NOD-975] Add a comment on top of selectedParentMultiset.

* [NOD-975] Use pastUTXO instead of selectedParentUTXO in calcMultiset.

* [NOD-975] Use selected parent's pastUTXO instead of this block's pastUTXO in calcMultiset.

* [NOD-975] Extract selectedParentPastUTXO to a separate function.

* [NOD-975] Remove selectedParentUTXO from pastUTXO's return values.

* [NOD-975] Add txs to TestUTXOCommitment.

* [NOD-975] Remove debug code.

* [NOD-975] In pastUTXOMultiSet, copy the multiset to avoid modifying the original.

* [NOD-975] Add a test: TestPastUTXOMultiSet.

* [NOD-975] Improve TestPastUTXOMultiSet.

* [NOD-976] Implement tests for UTXO commitments (#715)

* [NOD-975] Don't include block transactions inside its UTXO commitment.

* Revert "[NOD-975] Don't include block transactions inside its UTXO commitment."

This reverts commit b1a2ae66

* [NOD-975] Implement a (currently failing) TestUTXOCommitment.

* [NOD-975] Remove the block's own transactions from calcMultiset.

* [NOD-975] Simplify calcMultiset.

* [NOD-975] Add a comment on top of selectedParentMultiset.

* [NOD-975] Use pastUTXO instead of selectedParentUTXO in calcMultiset.

* [NOD-975] Use selected parent's pastUTXO instead of this block's pastUTXO in calcMultiset.

* [NOD-975] Extract selectedParentPastUTXO to a separate function.

* [NOD-975] Remove selectedParentUTXO from pastUTXO's return values.

* [NOD-975] Add txs to TestUTXOCommitment.

* [NOD-976] Generate new blockDB blocks for tests.

* [NOD-976] Fix TestBlueBlockWindow.

* [NOD-976] Fix TestIsKnownBlock.

* [NOD-976] Fix TestGHOSTDAG.

* [NOD-976] Fix TestUTXOCommitment.

* [NOD-976] Remove kaka.

* [NOD-990] Save utxo diffs of past UTXO (#724)

* [NOD-990] Save UTXO diffs of past UTXO

* [NOD-990] Check for block double spends with its past instead of building its UTXO

* [NOD-990] Call resetExtraNonceForTest in TestUTXOCommitment

* [NOD-990] Remove redundant functions diffFromTx and diffFromAcceptedTx

* [NOD-990] Rename i->j to avoid confusion

* [NOD-990] Break long lines

* [NOD-990] Rename ErrDoubleSpendsWithBlockTransaction -> ErrDoubleSpendInSameBlock

* [NOD-990] Make ErrDoubleSpendInSameBlock more detailed

* [NOD-990] Add testProcessBlockRuleError

* [NOD-990] Fix comment

* [NOD-990] Add test for duplicate transactions on the same block

* [NOD-990] Use pkg/errors on panic

* [NOD-990] Make cloneWithoutBase method

* [NOD-990] Break long lines

* [NOD-990] Fix comment

* [NOD-990] Fix wrong variable names

* [NOD-990] Fix comment

* [NOD-974] Generate new test blocks.

* [NOD-974] Fix TestIsKnownBlock and TestGHOSTDAG.

* [NOD-974] Fix TestUTXOCommitment.

* [NOD-974] Fix comments

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
Co-authored-by: stasatdaglabs <stas@daglabs.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-05-20 12:43:52 +03:00
stasatdaglabs
b884ba128e [NOD-1008] In utxoDiffStore, keep diffData in memory for blocks whose blueScore is at least virtualBlueScore - X (#726)
* [NOD-1008] Use *blockNode as keys in utxoDiffStore.loaded and .dirty.

* [NOD-1008] Implement clearOldEntries.

* [NOD-1008] Increase maxBlueScoreDifferenceToKeepLoaded to 100.

* [NOD-1008] Fix a typo.

* [NOD-1008] Add clearOldEntries to saveChangesFromBlock.

* [NOD-1008] Begin implementing TestClearOldEntries.

* [NOD-1008] Finish implementing TestClearOldEntries.

* [NOD-1008] Fix a comment.

* [NOD-1008] Rename diffDataByHash to diffDataByBlockNode.

* [NOD-1008] Use dag.TipHashes instead of tracking tips manually.
2020-05-20 10:47:01 +03:00
Svarog
fe25ea3d8c [NOD-1001] Make an error in Peer.start() stop the connection process from continuing. (#723)
* [NOD-1001] Move side-effects of connection out of OnVersion

* [NOD-1001] Make AssociateConnection synchronous

* [NOD-1001] Wait for 2 veracks in TestPeerListeners

* [NOD-1001] Made AssociateConnection return error

* [NOD-1001] Remove temporary logs

* [NOD-1001] Fix typos and find-and-replace errors

* [NOD-1001] Move example_test back out of peer package + fix some typos

* [NOD-1001] Use correct remote address in setupPeersWithConns and return to address string literals

* [NOD-1001] Use separate verack channels for inPeer and outPeer

* [NOD-1001] Make verack channels buffered

* [NOD-1001] Removed temporary sleep of 1 second

* [NOD-1001] Removed redundant //
2020-05-20 10:36:44 +03:00
stasatdaglabs
e0f587f599 [NOD-877] Separate UTXO header code to two fields in serialization: blue score and packed flags (#725)
* [NOD-877] In UTXOEntry serialization, extract packedFlags out to a separate Uint8.

* [NOD-877] Generate new test blocks.

* [NOD-877] Fix TestIsKnownBlock.

* [NOD-877] Fix TestBlueBlockWindow.

* [NOD-877] Fix TestUTXOSerialization and TestGHOSTDAG.

* [NOD-877] Fix TestVirtualBlock.
2020-05-19 17:56:07 +03:00
stasatdaglabs
e9e1ef4772 [NOD-1006] Make use of a pool to avoid excessive allocation of big.Ints (#722)
* [NOD-1006] Make CompactToBig take an out param so that we can reuse the same big.Int in averageTarget.

* [NOD-1006] Fix merge errors.

* [NOD-1006] Use CompactToBigWithDestination only in averageTarget.

* [NOD-1006] Fix refactor errors.

* [NOD-1006] Fix refactor errors.

* [NOD-1006] Optimize averageTarget with a big.Int pool.

* [NOD-1006] Defer releasing bigInts.

* [NOD-1006] Use a pool for requiredDifficulty as well.

* [NOD-1006] Move the big int pool to utils.

* [NOD-1006] Remove unnecessary line.
2020-05-19 16:29:21 +03:00
Svarog
eb8b841850 [NOD-1005] Use sm.isSynced to check whether should request blocks from invs (#721)
* [NOD-1005] Moved isSyncedForMining to netsync manager, and renamed to isSynced + removed isCurrent

* [NOD-1005] Use sm.isSynced to check whether should request blocks from invs

* [NOD-1005] Use private version of isSynced to avoid infinite loop

* [NOD-1005] Fix a few typos
2020-05-18 10:42:58 +03:00
Svarog
28681affda [NOD-994] Greatly increase the amount of logs kaspad keeps before rotating them away (#720)
* [NOD-994] Greatly increased the amount of logs kaspad keeps before rotating them away

* [NOD-994] Actually invcrease the log file

* [NOD-994] Update comments

* [NOD-994] Fix typo
2020-05-14 10:58:46 +03:00
Svarog
378f0b659a [NOD-993] Get rid of redundant error types + Use %+v when printing startup errors (#719)
* [NOD-993] Use %+v when printing errors

* [NOD-993] Get rid of AssertError

* [NOD-993] Made ruleError use github.com/pkg/errors

* [NOD-993] remove redundant TODO

* [NOD-993] remove redundant Comment

* [NOD-993] Removed DeploymentError
2020-05-13 17:27:53 +03:00
stasatdaglabs
35b943e04f [NOD-996] Disable kaspad logs in TestScripts (#718)
* [NOD-996] Disable kaspad logs in TestScripts.

* [NOD-996] Return the log level to its original state after TestScripts is done.
2020-05-13 15:57:30 +03:00
stasatdaglabs
65f75c17fc [NOD-982] Log message with level WARN when getting MsgReject (#717)
* [NOD-982] Log message with level WARN when getting MsgReject.

* [NOD-982] Fix wrong logLevel in Write and Writef.

* [NOD-982] Use Write and Writef inside Trace, Tracef, Debug, Debugf, etc...

* [NOD-982] Move peer message logging to a separate file.
2020-05-13 10:03:37 +03:00
stasatdaglabs
806eab817c [NOD-820] When the node isn't synced, make getBlockTemplate return a boolean isSynced instead of an error (#716)
* [NOD-820] Add IsSynced to GetBlockTemplateResult.

* [NOD-820] Add isSynced to the help file.

* [NOD-820] Add MineWhenNotSynced to the kaspaminer config.

* [NOD-820] Implement miner MineWhenNotSynced logic.

* [NOD-820] Fixed capitalization in an error message.
2020-05-12 15:08:24 +03:00
Ori Newman
585510d76c [NOD-847] Fix CIDR protection and prevent connecting to the same address twice (#714)
* [NOD-847] Fix CIDR protection and prevent connecting to the same address twice

* [NOD-847] Fix Tests

* [NOD-847] Add TestDuplicateOutboundConnections and TestSameOutboundGroupConnections

* [NOD-847] Fix TestRetryPermanent, TestNetworkFailure and wait 10 ms before restoring the previous active config

* [NOD-847] Add "is" before boolean methods

* [NOD-847] Fix Connect's lock

* [NOD-847] Make numAddressesInAddressManager an argument

* [NOD-847] Add teardown function for address manager

* [NOD-847] Add stack trace to ConnManager errors

* [NOD-847] Change emptyAddressManagerForTest->createEmptyAddressManagerForTest and fix typos

* [NOD-847] Fix wrong test name for addressManagerForTest

* [NOD-847] Change error message if New fails

* [NOD-847] Add new line on releaseAddress

* [NOD-847] Always try to reconnect on disconnect
2020-05-12 13:47:15 +03:00
Svarog
c8a381d5bb [NOD-981] Fixed error message when both --notls and --rpccert ommited (#713) 2020-05-06 13:05:48 +03:00
Ori Newman
3d04e6bded [NOD-943] Add acceptedBlockHashes to GetBlockVerboseResult (#708)
* [NOD-943] Add acceptedBlockHashes to GetBlockVerboseResult

* [NOD-943] Remove intermediate variables

* [NOD-943] Add block hash to error message

* [NOD-943] Change comment
2020-05-05 17:26:54 +03:00
Svarog
f8e851a6ed [NOD-968] Wrap all ldb errors with pkg/errors (#712) 2020-05-04 16:33:23 +03:00
stasatdaglabs
e70a615135 [NOD-872] Defer all currently undeferred unlocks in the database package (#706)
* [NOD-872] Defer unlocks in write.go.

* [NOD-872] Defer unlocks in rollback.go.

* [NOD-872] Defer unlocks in read.go.

* [NOD-872] Fix duplicate RUnlock.

* [NOD-872] Remove a redundant empty line.

* [NOD-872] Extract closeCurrentWriteCursorFile to a separate method.
2020-05-04 13:07:40 +03:00
Ori Newman
73ad0adf72 [NOD-913] Use sync rate in getBlockTemplate (#705)
* [NOD-913] Use sync rate in getBlockTemplate

* [NOD-913] Rename addBlockProcessTime->addBlockProcessTimestamp, maxDiff->maxTipAge

* [NOD-913] Pass maxDeviation as an argument

* [NOD-913] Change maxDeviation to +5%

* [NOD-913] Rename variables

* [NOD-913] Rename variables and functions and change comments

* [NOD-913] Split addBlockProcessingTimestamp
2020-05-04 09:09:23 +03:00
stasatdaglabs
5b74e51db1 [NOD-956] Increase K to 15. (#710) 2020-05-03 14:56:47 +03:00
stasatdaglabs
2e2492cc5d [NOD-849] Database tests (#695)
* [NOD-849] Cover ffldb/transaction with tests.

* [NOD-849] Cover cursor.go with tests.

* [NOD-849] Cover ldb/transaction with tests.

* [NOD-849] Cover location.go with tests.

* [NOD-849] Write TestFlatFileMultiFileRollback.

* [NOD-849] Fix merge errors.

* [NOD-849] Fix a comment.

* [NOD-849] Fix a comment.

* [NOD-849] Add a test that makes sure that files get deleted on rollback.

* [NOD-849] Add a test that makes sure that serializeLocation serialized to an expected value.

* [NOD-849] Improve TestFlatFileLocationDeserializationErrors.

* [NOD-849] Fix a copy+paste error.

* [NOD-849] Explain maxFileSize = 16.

* [NOD-849] Remove redundant RollbackUnlessClosed call.

* [NOD-849] Extract bucket to a variable in TestCursorSanity.

* [NOD-849] Rename TestKeyValueTransactionCommit to TestTransactionCommitForLevelDBMethods.

* [NOD-849] Extract prepareXXX into separate functions.

* [NOD-849] Simplify function calls in TestTransactionCloseErrors.

* [NOD-849] Extract validateCurrentCursorKeyAndValue to a separate function.

* [NOD-849] Add a comment over TestCursorSanity.

* [NOD-849] Add a comment over function in TestCursorCloseErrors.

* [NOD-849] Add a comment over function in TestTransactionCloseErrors.

* [NOD-849] Separate TestTransactionCloseErrors to TestTransactionCommitErrors and TestTransactionRollbackErrors.

* [NOD-849] Separate TestTransactionCloseErrors to TestTransactionCommitErrors and TestTransactionRollbackErrors.

* [NOD-849] Fix copy+paste error in comments.

* [NOD-849] Fix merge errors.

* [NOD-849] Merge TestTransactionCommitErrors and TestTransactionRollbackErrors into TestTransactionCloseErrors.

* [NOD-849] Move prepareDatabaseForTest into ffldb_test.go.

* [NOD-849] Add cursorKey to Value error messages in validateCurrentCursorKeyAndValue.
2020-05-03 12:19:09 +03:00
Ori Newman
2ef5c2cbac [NOD-915] Check if lockableFile underlying file is nil before closing it (#709) 2020-04-30 14:43:38 +03:00
Ori Newman
3c89e1f7b3 [NOD-952] Fix nil derefernce bug on outboundPeerConnectionFailed (#704) 2020-04-27 13:50:09 +03:00
stasatdaglabs
2910724b49 [NOD-934] Fix addresses not getting their retry attempt counter incremented if they fail to connect (#702)
* [NOD-934] Fix addresses not getting their retry attempt counter incremented if they fail to connect.

* [NOD-922] Inline parseNetAddress.

* [NOD-922] Fix debug logs.
2020-04-23 17:01:09 +03:00
stasatdaglabs
3af945692e [NOD-922] Panic from cursor Next and First (#703)
* [NOD-922] Panic in Cursor First and Next if the cursor is closed.

* [NOD-922] Fix broken tests.

* [NOD-922] Fix a comment.
2020-04-23 16:55:25 +03:00
stasatdaglabs
5fe9dae557 [NOD-863] Write interface tests for the new database (#697)
* [NOD-863] Write TestCursorNext.

* [NOD-863] Write TestCursorFirst.

* [NOD-863] Fix merge errors.

* [NOD-863] Add TestCursorSeek.

* [NOD-863] Add TestCursorCloseErrors.

* [NOD-863] Add TestCursorCloseFirstAndNext.

* [NOD-863] Add TestDataAccessorPut.

* [NOD-863] Add TestDataAccessorGet.

* [NOD-863] Add TestDataAccessorHas.

* [NOD-863] Add TestDatabaseDelete.

* [NOD-863] Add TestDatabaseAppendToStoreAndRetrieveFromStore.

* [NOD-863] Add TestTransactionAppendToStoreAndRetrieveFromStore.

* [NOD-863] Add TestTransactionDelete.

* [NOD-863] Add TestTransactionHas.

* [NOD-863] Add TestTransactionGet.

* [NOD-863] Add TestTransactionPut.

* [NOD-863] Move cursor tests to the bottom of interface_test.go.

* [NOD-863] Move interface_test.go to a database_test package.

* [NOD-863] Make each test in interface_test.go run for every database driver. Currently, only ffldb.

* [NOD-863] Make each cursor test in interface_test.go run for every database driver. Currently, only ffldb.

* [NOD-863] Split interface_test.go into separate files.

* [NOD-863] Rename interface_test.go to common_test.go.

* [NOD-863] Extract testForAllDatabaseTypes to a separate function.

* [NOD-863] Reorganize how test data gets added to the database.

* [NOD-863] Add explanations about testForAllDatabaseTypes.

* [NOD-863] Add tests that make sure that database changes don't affect previously opened transactions.

* [NOD-863] Extract databasePrepareFunc to a type alias.

* [NOD-863] Fix comments.

* [NOD-863] Add cursor exhaustion test to testCursorFirst.

* [NOD-863] Add cursor Next clause to testCursorSeek.

* [NOD-863] Add additional varification to testDatabasePut.

* [NOD-863] Add an additional verification into to testTransactionGet.

* [NOD-863] Add TestTransactionCommit.

* [NOD-863] Add TestTransactionRollback.

* [NOD-863] Add TestTransactionRollbackUnlessClosed.

* [NOD-863] Remove equals sign from databasePrepareFunc declaration.
2020-04-20 12:14:55 +03:00
Svarog
42c53ec3e2 [NOD-869] Add a print after os.Exit(1) to see if it is ever called (#701) 2020-04-16 16:08:32 +03:00
Ori Newman
291df8bfef [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer (#700)
* [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer

* [NOD-858] SetShouldSendBlockLocator(false) on OnBlockLocator

* [NOD-858] Rename shouldSendBlockLocator->wasBlockLocatorRequested

* [NOD-858] Move panic to shouldReplaceSyncPeer
2020-04-13 15:50:55 +03:00
Ori Newman
d015286f65 [NOD-909] Add tests for double spends (#694)
* [NOD-909] Add tests for double spends

* [NOD-909] Add prepareAndProcessBlock that gets parent hashes and transactions as argument

* [NOD-909] Use PrepareAndProcessBlockForTest where possible

* [NOD-909] Use more meaningful names

* [NOD-909] Change a comment

* [NOD-909] Fix comment

* [NOD-909] Fix comment
2020-04-13 12:28:59 +03:00
Ori Newman
fe91b4c878 [NOD-914] Make LevelDB.Cursor receive bucket instead of prefix (#696) 2020-04-12 09:25:40 +03:00
Ori Newman
7609c50641 [NOD-885] Use database.Key and database.Bucket instead of byte slices (#692)
* [NOD-885] Create database.Key type

* [NOD-885] Rename FullKey()->FullKeyBytes() and Key()->KeyBytes()

* [NOD-885] Make Key.String return a hex string

* [NOD-885] Rename key parts

* [NOD-885] Rename separator->bucketSeparator

* [NOD-885] Rename SuffixBytes->Suffix and PrefixBytes->Prefix

* [NOD-885] Change comments

* [NOD-885] Change key prefix to bucket

* [NOD-885] Don't use database.NewKey inside dbaccess

* [NOD-885] Fix nil bug in Bucket.Path()

* [NOD-885] Rename helpers.go -> keys.go

* [NOD-885] Unexport database.NewKey

* [NOD-885] Remove redundant code in Bucket.Path()
2020-04-08 12:12:21 +03:00
Ori Newman
df934990d7 [NOD-822] Don't return rule errors from utxoset code (#693)
* [NOD-822] Remove rule errors from the UTXO diff code

* [NOD-822] Rename applyTransactions -> applyAndVerifyBlockTransactionsToPastUTXO

* [NOD-822] Fix comment
2020-04-07 12:45:12 +03:00
stasatdaglabs
3c4a80f16d [NOD-899] Inside the database, in case we're out of disk space, panic without printing the stack trace (#691)
* [NOD-899] Inside the database, in case we're out of disk space, panic without printing the stack trace.

* [NOD-899] Fix bad variable name.

* [NOD-899] Reduce code duplication.
2020-04-06 16:00:48 +03:00
stasatdaglabs
a31139d4a5 [NOD-895] Break down initDAGState to sub-routines (#690) 2020-04-06 11:08:57 +03:00
Mike Zak
6da3606721 Update to version 0.4.0 2020-04-05 16:23:01 +03:00
Ori Newman
bfbc72724d [NOD-873] Reuse allocated space when updating the UTXO set in database (#688) 2020-04-05 11:46:16 +03:00
stasatdaglabs
956b6f7d95 [NOD-900] Fix bad key in Seek (#687)
* [NOD-900] Fix Seek not working at expected.

* [NOD-900] Wrap error messages.

* [NOD-900] Use ldbIterator.Key instead of LevelDBCursor.Key.

* [NOD-900] Add a comment.
2020-04-02 17:47:51 +03:00
stasatdaglabs
c1a039de3f [NOD-900] Fix Seek not working as expected (#686)
* [NOD-900] Fix Seek not working at expected.

* [NOD-900] Wrap error messages.
2020-04-02 17:05:58 +03:00
stasatdaglabs
f8b18e09d6 [NOD-805] Redesign the database (#685)
* [NOD-828] Reimplement FFLDB (#663)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-887] Add a couple of QoL features to Cursor (#674)

* [NOD-887] Changed First to not return an error.

* [NOD-887] Fix merge error.

* [NOD-887] Make Cursor.Key not return the entire key path.

* [NOD-888] Add RollbackUnlessClosed to Context (#676)

* [NOD-888] Add RollbackUnlessClosed to Context.

* [NOD-888] Fix copy+paste error.

* [NOD-889] Instead of returning a boolean for not-found, return an error (#677)

* [NOD-889] Instead of returning a boolean for not-found, return an error.

* [NOD-889] Wrapped ErrNotFound for Get calls with nicer error messages.

* [NOD-889] Fix format.

* [NOD-889] Fix double space in a comment.

* [NOD-889] Add IsNotFoundError to dbaccess.

* [NOD-862] Replace calls to Tx.StoreBlock, Tx.HasBlock, Tx.FetchBlock with appropriate calls in dbaccess (#672)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-862] Use dbaccess.HasBlock instead of Tx.HasBlock in initDAGState.

* [NOD-862] Use dbaccess.StoreBlock instead of dbStoreBlock.

* [NOD-862] Use dbaccess.FetchBlock instead of various block fetching mechanisms.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Implement a dbaccess.BlockNode struct.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* [NOD-828] Implement toDBBlockNode and fromDBBlockNode.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Implement storing index blocks.

* [NOD-862] Use database transactions where appropriate.

* [NOD-862] Fix tests failing on DAGSetup.

* [NOD-862] Fix bad make call.

* [NOD-862] Fix remaining database opening problems in tests.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Iterate over the new block index in dagio.

* [NOD-862] Fix block index key.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-862] Pass byte slices to dbaccess instead of objects.

* [NOD-862] Fix errors.

* [NOD-862] Fix maybeAcceptBlock not checking block existence.

* [NOD-862] Fix TestAcceptanceIndexRecover.

* [NOD-862] Add comments to StoreBlockIndex and BlockIndexCursor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-862] Fix merge errors.

* [NOD-862] Fix DAGSetup using bad temp directories.

* [NOD-862] Fix TestProcessDelayedBlocks not closing the database properly.

* [NOD-862] Fix merge errors.

* [NOD-862] Merge flushToDBWithContext and flushToDB.

* [NOD-862] Remove TODO.

* [NOD-862] Add prefix to the temp dir in DAGSetup.

* [NOD-862] Bring back dbFetchBlockByHash.

* [NOD-862] Use BlockDAG.BlockByHash in p2p and rpc.

* [NOD-862] Use daghash.Hash in dbaccess.

* [NOD-862] Add defer to RollbackUnlessClosed after NewTx().

* [NOD-862] Extract dbStoreBlock to a separate function.

* [NOD-862] Fix grammar in comment.

* [NOD-862] Fix merge errors.

* [NOD-867] Migrate database logic in blockdag/dagio.go to dbaccess (#675)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-862] Use dbaccess.HasBlock instead of Tx.HasBlock in initDAGState.

* [NOD-862] Use dbaccess.StoreBlock instead of dbStoreBlock.

* [NOD-862] Use dbaccess.FetchBlock instead of various block fetching mechanisms.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Implement a dbaccess.BlockNode struct.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* [NOD-828] Implement toDBBlockNode and fromDBBlockNode.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Implement storing index blocks.

* [NOD-862] Use database transactions where appropriate.

* [NOD-862] Fix tests failing on DAGSetup.

* [NOD-862] Fix bad make call.

* [NOD-862] Fix remaining database opening problems in tests.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Iterate over the new block index in dagio.

* [NOD-862] Fix block index key.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-862] Pass byte slices to dbaccess instead of objects.

* [NOD-862] Fix errors.

* [NOD-862] Fix maybeAcceptBlock not checking block existence.

* [NOD-862] Fix TestAcceptanceIndexRecover.

* [NOD-862] Add comments to StoreBlockIndex and BlockIndexCursor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-862] Fix merge errors.

* [NOD-862] Fix DAGSetup using bad temp directories.

* [NOD-862] Fix TestProcessDelayedBlocks not closing the database properly.

* [NOD-867] Remove blockIndexBucket from dagio.

* [NOD-867] Fix wrong key in StoreIndexBucket.

* [NOD-867] Migrate DAG state to dbaccess.

* [NOD-867] Remove utxoSetVersionKeyName.

* [NOD-862] Fix merge errors.

* [NOD-867] Move localSubnetworkID into dagState.

* [NOD-867] Fix a comment.

* [NOD-867] Remove an unused function.

* [NOD-867] Migrate the database's UTXO set to dbaccess.

* [NOD-867] Add missing error check.

* [NOD-867] Changed First to not return an error.

* [NOD-867] Make Cursor.Key not return the entire key path.

* [NOD-887] Fix the comment above BlockIndexCursorFrom.

* [NOD-862] Merge flushToDBWithContext and flushToDB.

* [NOD-862] Remove TODO.

* [NOD-862] Add prefix to the temp dir in DAGSetup.

* [NOD-862] Bring back dbFetchBlockByHash.

* [NOD-862] Use BlockDAG.BlockByHash in p2p and rpc.

* [NOD-862] Use daghash.Hash in dbaccess.

* [NOD-862] Add defer to RollbackUnlessClosed after NewTx().

* [NOD-862] Extract dbStoreBlock to a separate function.

* [NOD-867] Remove TODOs.

* [NOD-867] Fix merge errors.

* [NOD-867] Fix comments and errors.

* [NOD-867] Unexport blockIndexKey.

* [NOD-867] Fix merge errors.

* [NOD-867] Move a misplaced comment.

* [NOD-867] Fix an error message.

* [NOD-867] Remove preallocation in initDAGState.

* [NOD-866] Migrate database logic in blockdag/indexers package to dbaccess (#682)

* [NOD-865] Delete blockidhash.go.

* [NOD-865] Remove a lot of no-longer relevant logic from indexers.

* [NOD-865] Pass TxContext to ConnectBlock.

* [NOD-865] Migrate the acceptance index to dbaccess.

* [NOD-865] Fix a block not being sent to ConnectBlock.

* [NOD-865] Pass the block's hash instead of the whole block.

* [NOD-865] Add forgotten Commit call.

* [NOD-865] Add comments.

* [NOD-866] Fix a comment.

* [NOD-866] Fix a comment.

* [NOD-866] Remove pointless indirection in acceptanceindex.

* [NOD-866] Fix comment over ForEachHash.

* [NOD-866] Rename ClearAcceptanceIndex to DropAcceptanceIndex.

* [NOD-866] Explain collecting keys before deleting them.

* [NOD-865] Move misc db logic to db access (#681)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-865] Move fee data db operations to dbaccess

* [NOD-865] Move reachability data db operations to dbaccess

* [NOD-865] Move UTXO diff data db operations to dbaccess

* [NOD-865] Move subnetwork data db operations to dbaccess

* [NOD-865] Fix createDAGState

* [NOD-865] Remove old Get signature with "exists"

* [NOD-865] Move multiset db operations to dbaccess

* [NOD-865] Use dbaccess transactions where possible

* [NOD-865] Remove old Get signature with "exists"

* [NOD-881] Recover TestGHOSTDAGErrors

* [NOD-865] Create function for db keys

* [NOD-865] Change Exists to Has, and use accessor.Has where possible

* [NOD-865] Make ClearReachabilityData transactive

* [NOD-865] Don't iterate cursors while changing db data

* [NOD-865] Rename RegisterSubnetwork -> StoreSubnetwork

* [NOD-865] Change bucket from utxodiffs to utxo-diffs

* [NOD-865] Rename SubnetworkExists->HasSubnetwork

* [NOD-865] Change a comment

* [NOD-865] Fix tests

* [NOD-865] Fix comment

* [NOD-865] Remove the prefix "db" from some functions

* [NOD-865] Remove redundant comments

* [NOD-865] Make clearBucket function

* [NOD-865] Make clear functions get a dbTx as an arg

* [NOD-865] Remove erroneous tx commit

Co-authored-by: stasatdaglabs <stas@daglabs.com>

* [NOD-868] Delete the old database package (#683)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-865] Move fee data db operations to dbaccess

* [NOD-865] Move reachability data db operations to dbaccess

* [NOD-865] Move UTXO diff data db operations to dbaccess

* [NOD-865] Move subnetwork data db operations to dbaccess

* [NOD-865] Fix createDAGState

* [NOD-865] Remove old Get signature with "exists"

* [NOD-865] Move multiset db operations to dbaccess

* [NOD-865] Use dbaccess transactions where possible

* [NOD-865] Remove old Get signature with "exists"

* [NOD-881] Recover TestGHOSTDAGErrors

* [NOD-865] Create function for db keys

* [NOD-865] Change Exists to Has, and use accessor.Has where possible

* [NOD-865] Make ClearReachabilityData transactive

* [NOD-865] Don't iterate cursors while changing db data

* [NOD-865] Rename RegisterSubnetwork -> StoreSubnetwork

* [NOD-865] Change bucket from utxodiffs to utxo-diffs

* [NOD-865] Rename SubnetworkExists->HasSubnetwork

* [NOD-865] Change a comment

* [NOD-868] Remove all tests from old database.

* [NOD-868] Remove all unused methods from the old database's interfaces.

* [NOD-865] Fix tests

* [NOD-868] Remove references to DB.

* [NOD-865] Fix comment

* [NOD-868] Remove the old ffldb besides the interface and errors.go.

* [NOD-868] Remove errors.go.

* [NOD-868] Remove the old database package.

* [NOD-868] Add openDB to DAGSetup to emulate the old dbpath in dag.config.

* [NOD-868] Rename database2 to database.

* [NOD-868] Use NewTx instead of NoTx where required.

* [NOD-868] Fix merge errors.

* [NOD-868] Rename dbXXX functions to just xxx.

* [NOD-868] Rename putDAGState to saveDAGState.

* [NOD-868] Replace comments in initDAGState with logs.

* [NOD-868] Explain the openDB parameter in DAGSetup.

* [NOD-868] Fixup doc.go and README.md.

* [NOD-868] Remove pointless transactions.

Co-authored-by: Ori Newman <orinewman1@gmail.com>

* [NOD-805] Fix merge errors.

* [NOD-805] Fix a comment.

* [NOD-805] Don't return virtualTxsAcceptanceData from applyDAGChanges.

* [NOD-805] Add missing error handling in TestAcceptanceDataIndexRecover.

* [NOD-805] Rename blockDAG to dag in indexers/manager.go.

* [NOD-805] Defer cursor.Close() everywhere.

* [NOD-805] Rename scanFlatFiles to findCurrentLocation.

* [NOD-805] Extract crc32ChecksumLength and dataLengthLength to constants.

* [NOD-805] Handle open files properly in rollback.go.

* [NOD-805] Remove unnecessary func wrapper.

* [NOD-805] Remove unnecessary trimming in initialize.

* [NOD-805] Made StoreBlock accept only TxContext.

* [NOD-805] Changed the log level of an error message to Error.

* [NOD-805] Add a note about holding mutexes over deleteFile.

* [NOD-805] Remove a false comment.

* [NOD-805] Fix a comment.

* [NOD-805] Rename blk to block.

* [NOD-805] Extract utxoKey to a separate function.

* [NOD-805] Move dbaccess.xxxKey functions to the tops of their respective files.

* [NOD-805] Fix grammar in dbaccess/db.go.

* [NOD-805] Wrap a failed database corruption recovery error.

* [NOD-805] Split lines with WithStack in them.

* [NOD-805] Fix the comment over initialize.

* [NOD-805] Rename ffdb to flatFileDB and ldb to levelDB.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a comment.

* [NOD-805] Use s.writeCursor instead of cursor.

* [NOD-805] Embed file in lockableFile.

* [NOD-805] the the -> the

* [NOD-805] openDB -> db

* [NOD-805] Use TxContext in all flushToDB functions.

* [NOD-805] Rename context -> dbContext.

* [NOD-805] Reword the comment at the beginning on initDAGState.

* [NOD-805] Explain cursor key trimming.

* [NOD-805] Remove Error from Cursor.

* [NOD-805] Return ErrNotFound from done Cursor Key and Value.

* [NOD-805] Add missing error handling.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a variable name.

* [NOD-805] Remove pointless underscore.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a variable name.

Co-authored-by: Mike Zak <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-04-02 13:56:32 +03:00
Ori Newman
b20a7a679b [NOD-874] If the node is not current call sm.restartSyncIfNeeded() on handleInvMsg (#684)
* [NOD-874] If the node is not current call sm.restartSyncIfNeeded() on handleInvMsg

* [NOD-874] Check haveUnknownInvBlock before restartSyncIfNeeded

* [NOD-874] Fix comment

* [NOD-874] Fix comment

* [NOD-874] Fix comment
2020-04-01 12:56:10 +03:00
Ori Newman
36d866375e [NOD-881] Don't recalculate subtreesize for children (#678)
* [NOD-881] Don't recalculate subtreesize for children

* [NOD-881] Make BenchmarkReindexInterval clearer

* [NOD-881] Use b.ResetTimer

* [NOD-881] Fix BenchmarkReindexInterval to use b.N
2020-03-31 12:43:02 +03:00
Svarog
024edc30a3 [NOD-857] Add generalized profiler package and use it everwhere (#679)
* [NOD-857] Add generalized profiler package and use it everwhere

* [NOD-857] Dependency-inject log into profiling.Start()
2020-03-31 12:41:21 +03:00
Ori Newman
6aa5e0b5a8 [NOD-882] Remove ecc and hdkeychain (#680)
* [NOD-882] Remove ecc and hdkeychain

* [NOD-882] Remove HDCoinType from dagParams
2020-03-31 10:58:11 +03:00
Mike Zak
1a38550fdd Update to version 0.3.0 2020-03-29 14:15:17 +03:00
stasatdaglabs
3e7ebb5a84 [NOD-861] Get rid of dbtool/fetchblockregion.go. (#667) 2020-03-29 12:47:13 +03:00
Svarog
4bca7342d3 [NOD-883] Fix dockerfile in kaspaminer + set real version for go-libsecp256k1 (#673) 2020-03-26 17:50:09 +02:00
Elichai Turkel
f80908fb4e [NOD-876] Replace ecc with go-secp256k1 for public keys (#670)
* Replace ecc with go-secp256k1 in txscript

* Replace ecc with go-secp256k1 in util and cmd

* Replace ecc.Multiset with secp256k1.MultiSet
2020-03-26 17:03:39 +02:00
stasatdaglabs
e000e10738 [NOD-880] Remove CGO_ENABLED=0 from Dockerfile. (#671) 2020-03-26 14:02:57 +02:00
Ori Newman
d83862f36c [NOD-855] Save ECMH for block utxo and not diff utxo (#669)
* [NOD-855] Save ECMH for each block UTXO

* [NOD-855] Remove UpdateExtraNonce method

* [NOD-855] Remove multiset data from UTXO diffs

* [NOD-855] Fix to fetch multiset of selected parent

* [NOD-855] Don't remove coinbase inputs from multiset

* [NOD-855] Create multisetBucketName on startup

* [NOD-855] Remove multiset from UTXO diff tests

* [NOD-855] clear new entries from multisetstore on saveChangesFromBlock

* [NOD-855] Fix tests

* [NOD-855] Use UnacceptedBlueScore when adding current block transactions to multiset

* [NOD-855] Hash utxo before adding it to multiset

* [NOD-855] Pass isCoinbase to NewUTXOEntry

* [NOD-855] Do not use hash when adding entries to multiset

* [NOD-855] When calculating multiset, replace the unaccepted blue score of selected parent transaction with the block blue score

* [NOD-855] Manually add a chained transaction to a block in TestChainedTransactions

* [NOD-855] Change name and comments

* [NOD-855] Use FindAcceptanceData to find a specific block acceptance data

* [NOD-855] Remove redundant copy of txIn.PreviousOutpoint

* [NOD-855] Use fmt.Sprintf when creating internalRPCError
2020-03-26 13:06:12 +02:00
Svarog
1020402b34 [NOD-869] Close panicHandlerDone instead of sending an empty struct + use time.After instead of time.Tick (#668) 2020-03-25 16:14:08 +02:00
Mike Zak
bc6ce6ed53 Update version to v0.2.0 2020-03-25 11:51:14 +02:00
Ori Newman
d3b1953deb [NOD-848] optimize utxo diffs serialize allocations (#666)
* [NOD-848] Optimize allocations when serializing UTXO diffs

* [NOD-848] Use same UTXO serialization everywhere, and use compression as well

* [NOD-848] Fix usage of wrong buffer

* [NOD-848] Fix tests

* [NOD-848] Fix wire tests

* [NOD-848] Fix tests

* [NOD-848] Remove VLQ

* [NOD-848] Fix comments

* [NOD-848] Add varint for big endian encoding

* [NOD-848] In TestVarIntWire, assume the expected decoded value is the same as the serialization input

* [NOD-848] Serialize outpoint index with big endian varint

* [NOD-848] Remove p2pk from compression support

* [NOD-848] Fix comments

* [NOD-848] Remove p2pk from decompression support

* [NOD-848] Make entry compression optional

* [NOD-848] Fix tests

* [NOD-848] Fix comments and var names

* [NOD-848] Remove UTXO compression

* [NOD-848] Fix tests

* [NOD-848] Remove big endian varint

* [NOD-848] Fix comments

* [NOD-848] Rename ReadVarIntLittleEndian->ReadVarInt and fix WriteVarInt comment

* [NOD-848] Add outpointIndexByteOrder variable

* [NOD-848] Remove redundant comment

* [NOD-848] Fix outpointMaxSerializeSize to the correct value

* [NOD-848] Move subBuffer to utils
2020-03-24 16:44:41 +02:00
Svarog
3c67215e76 [NOD-796] Upgrade to go 1.14 (#665) 2020-03-22 14:50:13 +02:00
Svarog
586624c836 [NOD-853] Add profiler server to kaspaminer (#664) 2020-03-19 17:19:31 +02:00
Svarog
49855e6333 [NOD-823] Use WithDiffInPlace for the implementation of WithDiff (#657)
* [NOD-823] Use WithDiffInPlace for the implementation of WithDiff

* [NOD-823] Unexport withDiffInPlace
2020-03-17 11:19:02 +02:00
Ori Newman
624249c0f3 [NOD-842] Use flushToDB with the same transaction as everything else in saveChangesFromBlock and never ignore flushToDB errors (#662) 2020-03-16 11:05:17 +02:00
Ori Newman
1cf443a63b [NOD-841] Fix tests to not be dependent on block rate (#661)
* [NOD-841] Fix TestDifficulty

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Fix TestCheckBlockSanity

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Shorten long lines
2020-03-15 18:08:03 +02:00
Ori Newman
8909679f44 [NOD-818] Remove time adjustment (#658)
* [NOD-818] Remove time adjustment

* [NOD-818] Remove interface ensuring and copyright message

* [NOD-818] Update comment
2020-03-15 17:37:01 +02:00
Ori Newman
e58efbf0ea [NOD-839] Panic from non-rule error from ProcessBlock (#660) 2020-03-15 17:26:53 +02:00
271 changed files with 9538 additions and 27521 deletions

View File

@@ -6,7 +6,7 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
)
@@ -16,7 +16,17 @@ func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error
newNode, _ := dag.newBlockNode(blockHeader, newBlockSet())
newNode.status = statusInvalidAncestor
dag.index.AddNode(newNode)
return dag.index.flushToDB()
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
err = dag.index.flushToDB(dbTx)
if err != nil {
return err
}
return dbTx.Commit()
}
// maybeAcceptBlock potentially accepts a block into the block DAG. It
@@ -62,13 +72,26 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
// expensive connection logic. It also has some other nice properties
// such as making blocks that never become part of the DAG or
// blocks that fail to connect available for further analysis.
err = dag.db.Update(func(dbTx database.Tx) error {
err := dbStoreBlock(dbTx, block)
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
blockExists, err := dbaccess.HasBlock(dbTx, block.Hash())
if err != nil {
return err
}
if !blockExists {
err := storeBlock(dbTx, block)
if err != nil {
return err
}
return dag.index.flushToDBWithTx(dbTx)
})
}
err = dag.index.flushToDB(dbTx)
if err != nil {
return err
}
err = dbTx.Commit()
if err != nil {
return err
}

View File

@@ -10,7 +10,7 @@ import (
func TestMaybeAcceptBlockErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {

View File

@@ -10,7 +10,7 @@ import (
// TestBlockHeap tests pushing, popping, and determining the length of the heap.
func TestBlockHeap(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestBlockHeap", Config{
dag, teardownFunc, err := DAGSetup("TestBlockHeap", true, Config{
DAGParams: &dagconfig.MainnetParams,
})
if err != nil {

View File

@@ -1,136 +0,0 @@
package blockdag
import (
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
var (
// idByHashIndexBucketName is the name of the db bucket used to house
// the block hash -> block id index.
idByHashIndexBucketName = []byte("idbyhashidx")
// hashByIDIndexBucketName is the name of the db bucket used to house
// the block id -> block hash index.
hashByIDIndexBucketName = []byte("hashbyididx")
currentBlockIDKey = []byte("currentblockid")
)
// -----------------------------------------------------------------------------
// This is a mapping between block hashes and unique IDs. The ID
// is simply a sequentially incremented uint64 that is used instead of block hash
// for the indexers. This is useful because it is only 8 bytes versus 32 bytes
// hashes and thus saves a ton of space when a block is referenced in an index.
// It consists of three buckets: the first bucket maps the hash of each
// block to the unique ID and the second maps that ID back to the block hash.
// The third bucket contains the last received block ID, and is used
// when starting the node to check that the enabled indexes are up to date
// with the latest received block, and if not, initiate recovery process.
//
// The serialized format for keys and values in the block hash to ID bucket is:
// <hash> = <ID>
//
// Field Type Size
// hash daghash.Hash 32 bytes
// ID uint64 8 bytes
// -----
// Total: 40 bytes
//
// The serialized format for keys and values in the ID to block hash bucket is:
// <ID> = <hash>
//
// Field Type Size
// ID uint64 8 bytes
// hash daghash.Hash 32 bytes
// -----
// Total: 40 bytes
//
// -----------------------------------------------------------------------------
const blockIDSize = 8 // 8 bytes for block ID
// DBFetchBlockIDByHash uses an existing database transaction to retrieve the
// block id for the provided hash from the index.
func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error) {
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
serializedID := hashIndex.Get(hash[:])
if serializedID == nil {
return 0, errors.Errorf("no entry in the block ID index for block with hash %s", hash)
}
return DeserializeBlockID(serializedID), nil
}
// DBFetchBlockHashBySerializedID uses an existing database transaction to
// retrieve the hash for the provided serialized block id from the index.
func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
hashBytes := idIndex.Get(serializedID)
if hashBytes == nil {
return nil, errors.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
}
var hash daghash.Hash
copy(hash[:], hashBytes)
return &hash, nil
}
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
// the index entries for the hash to id and id to hash mappings for the provided
// values.
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, serializedID []byte) error {
// Add the block hash to ID mapping to the index.
meta := dbTx.Metadata()
hashIndex := meta.Bucket(idByHashIndexBucketName)
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
return err
}
// Add the block ID to hash mapping to the index.
idIndex := meta.Bucket(hashByIDIndexBucketName)
return idIndex.Put(serializedID[:], hash[:])
}
// DBFetchCurrentBlockID returns the last known block ID.
func DBFetchCurrentBlockID(dbTx database.Tx) uint64 {
serializedID := dbTx.Metadata().Get(currentBlockIDKey)
if serializedID == nil {
return 0
}
return DeserializeBlockID(serializedID)
}
// DeserializeBlockID returns a deserialized block id
func DeserializeBlockID(serializedID []byte) uint64 {
return byteOrder.Uint64(serializedID)
}
// SerializeBlockID returns a serialized block id
func SerializeBlockID(blockID uint64) []byte {
serializedBlockID := make([]byte, blockIDSize)
byteOrder.PutUint64(serializedBlockID, blockID)
return serializedBlockID
}
// DBFetchBlockHashByID uses an existing database transaction to retrieve the
// hash for the provided block id from the index.
func DBFetchBlockHashByID(dbTx database.Tx, id uint64) (*daghash.Hash, error) {
return DBFetchBlockHashBySerializedID(dbTx, SerializeBlockID(id))
}
func createBlockID(dbTx database.Tx, blockHash *daghash.Hash) (uint64, error) {
currentBlockID := DBFetchCurrentBlockID(dbTx)
newBlockID := currentBlockID + 1
serializedNewBlockID := SerializeBlockID(newBlockID)
err := dbTx.Metadata().Put(currentBlockIDKey, serializedNewBlockID)
if err != nil {
return 0, err
}
err = dbPutBlockIDIndexEntry(dbTx, blockHash, serializedNewBlockID)
if err != nil {
return 0, err
}
return newBlockID, nil
}

View File

@@ -5,10 +5,10 @@
package blockdag
import (
"github.com/kaspanet/kaspad/dbaccess"
"sync"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
@@ -18,7 +18,6 @@ type blockIndex struct {
// The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a
// separate mutex.
db database.DB
dagParams *dagconfig.Params
sync.RWMutex
@@ -29,9 +28,8 @@ type blockIndex struct {
// newBlockIndex returns a new empty instance of a block index. The index will
// be dynamically populated as block nodes are loaded from the database and
// manually added.
func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
func newBlockIndex(dagParams *dagconfig.Params) *blockIndex {
return &blockIndex{
db: db,
dagParams: dagParams,
index: make(map[daghash.Hash]*blockNode),
dirty: make(map[*blockNode]struct{}),
@@ -111,17 +109,8 @@ func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
bi.dirty[node] = struct{}{}
}
// flushToDB writes all dirty block nodes to the database. If all writes
// succeed, this clears the dirty set.
func (bi *blockIndex) flushToDB() error {
return bi.db.Update(func(dbTx database.Tx) error {
return bi.flushToDBWithTx(dbTx)
})
}
// flushToDBWithTx writes all dirty block nodes to the database. If all
// writes succeed, this clears the dirty set.
func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
// flushToDB writes all dirty block nodes to the database.
func (bi *blockIndex) flushToDB(dbContext *dbaccess.TxContext) error {
bi.Lock()
defer bi.Unlock()
if len(bi.dirty) == 0 {
@@ -129,7 +118,12 @@ func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
}
for node := range bi.dirty {
err := dbStoreBlockNode(dbTx, node)
serializedBlockNode, err := serializeBlockNode(node)
if err != nil {
return err
}
key := blockIndexKey(node.hash, node.blueScore)
err = dbaccess.StoreIndexBlock(dbContext, key, serializedBlockNode)
if err != nil {
return err
}

View File

@@ -10,7 +10,7 @@ import (
func TestAncestorErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", Config{
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", true, Config{
DAGParams: &params,
})
if err != nil {

View File

@@ -110,7 +110,7 @@ func (dag *BlockDAG) newBlockNode(blockHeader *wire.BlockHeader, parents blockSe
parents: parents,
children: make(blockSet),
blueScore: math.MaxUint64, // Initialized to the max value to avoid collisions with the genesis block
timestamp: dag.AdjustedTime().Unix(),
timestamp: dag.Now().Unix(),
bluesAnticoneSizes: make(map[*blockNode]dagconfig.KType),
}

View File

@@ -9,7 +9,7 @@ import (
// This test is to ensure the size BlueAnticoneSizesSize is serialized to the size of KType.
// We verify that by serializing and deserializing the block while making sure that we stay within the expected range.
func TestBlueAnticoneSizesSize(t *testing.T) {
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", Config{
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {

View File

@@ -2,6 +2,7 @@ package blockdag
import (
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/bigintpool"
"github.com/pkg/errors"
"math"
"math/big"
@@ -53,13 +54,19 @@ func (window blockWindow) minMaxTimestamps() (min, max int64) {
return
}
func (window blockWindow) averageTarget() *big.Int {
averageTarget := big.NewInt(0)
func (window blockWindow) averageTarget(averageTarget *big.Int) {
averageTarget.SetInt64(0)
target := bigintpool.Acquire(0)
defer bigintpool.Release(target)
for _, node := range window {
target := util.CompactToBig(node.bits)
util.CompactToBigWithDestination(node.bits, target)
averageTarget.Add(averageTarget, target)
}
return averageTarget.Div(averageTarget, big.NewInt(int64(len(window))))
windowLen := bigintpool.Acquire(int64(len(window)))
defer bigintpool.Release(windowLen)
averageTarget.Div(averageTarget, windowLen)
}
func (window blockWindow) medianTimestamp() (int64, error) {

View File

@@ -12,7 +12,7 @@ import (
func TestBlueBlockWindow(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", Config{
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -53,12 +53,12 @@ func TestBlueBlockWindow(t *testing.T) {
{
parents: []string{"C", "D"},
id: "E",
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"C", "D"},
id: "F",
expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"A"},
@@ -73,37 +73,37 @@ func TestBlueBlockWindow(t *testing.T) {
{
parents: []string{"H", "F"},
id: "I",
expectedWindowWithGenesisPadding: []string{"F", "C", "D", "B", "A", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"F", "D", "C", "B", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"I"},
id: "J",
expectedWindowWithGenesisPadding: []string{"I", "F", "C", "D", "B", "A", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"I", "F", "D", "C", "B", "A", "A", "A", "A", "A"},
},
{
parents: []string{"J"},
id: "K",
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "C", "D", "B", "A", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "D", "C", "B", "A", "A", "A", "A"},
},
{
parents: []string{"K"},
id: "L",
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "C", "D", "B", "A", "A", "A"},
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "D", "C", "B", "A", "A", "A"},
},
{
parents: []string{"L"},
id: "M",
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "C", "D", "B", "A", "A"},
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "D", "C", "B", "A", "A"},
},
{
parents: []string{"M"},
id: "N",
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "C", "D", "B", "A"},
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "D", "C", "B", "A"},
},
{
parents: []string{"N"},
id: "O",
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "B"},
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "B"},
},
}

View File

@@ -4,12 +4,12 @@ import (
"bufio"
"bytes"
"encoding/binary"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/txsort"
@@ -73,55 +73,24 @@ func (cfr *compactFeeIterator) next() (uint64, error) {
}
// The following functions relate to storing and retrieving fee data from the database
var feeBucket = []byte("fees")
// getBluesFeeData returns the compactFeeData for all nodes's blues,
// used to calculate the fees this blockNode needs to pay
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
bluesFeeData := make(map[daghash.Hash]compactFeeData)
err := dag.db.View(func(dbTx database.Tx) error {
for _, blueBlock := range node.blues {
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
if err != nil {
return errors.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
}
bluesFeeData[*blueBlock.hash] = feeData
for _, blueBlock := range node.blues {
feeData, err := dbaccess.FetchFeeData(dbaccess.NoTx(), blueBlock.hash)
if err != nil {
return nil, err
}
return nil
})
if err != nil {
return nil, err
bluesFeeData[*blueBlock.hash] = feeData
}
return bluesFeeData, nil
}
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
if err != nil {
return errors.Errorf("Error creating or retrieving fee bucket: %s", err)
}
return feeBucket.Put(blockHash.CloneBytes(), feeData)
}
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
feeBucket := dbTx.Metadata().Bucket(feeBucket)
if feeBucket == nil {
return nil, errors.New("Fee bucket does not exist")
}
feeData := feeBucket.Get(blockHash.CloneBytes())
if feeData == nil {
return nil, errors.Errorf("No fee data found for block %s", blockHash)
}
return feeData, nil
}
// The following functions deal with building and validating the coinbase transaction
func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {

View File

@@ -7,17 +7,16 @@ package blockdag
import (
"compress/bzip2"
"encoding/binary"
"github.com/pkg/errors"
"io"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"time"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/dagconfig"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
@@ -73,15 +72,8 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
return nil, err
}
// Serialized utxo entry.
serialized := make([]byte, numBytes)
_, err = io.ReadAtLeast(r, serialized, int(numBytes))
if err != nil {
return nil, err
}
// Deserialize it and add it to the view.
entry, err := deserializeUTXOEntry(serialized)
// Deserialize the UTXO entry and add it to the UTXO set.
entry, err := deserializeUTXOEntry(r)
if err != nil {
return nil, err
}
@@ -102,11 +94,11 @@ func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
// it is not usable with all functions and the tests must take care when making
// use of it.
func newTestDAG(params *dagconfig.Params) *BlockDAG {
index := newBlockIndex(nil, params)
index := newBlockIndex(params)
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
dag := &BlockDAG{
dagParams: params,
timeSource: NewMedianTime(),
timeSource: NewTimeSource(),
targetTimePerBlock: targetTimePerBlock,
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
@@ -152,56 +144,37 @@ func addNodeAsChildToParents(node *blockNode) {
// same type (either both nil or both of type RuleError) and their error codes
// match when not nil.
func checkRuleError(gotErr, wantErr error) error {
// Ensure the error code is of the expected type and the error
// code matches the value specified in the test instance.
if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) {
return errors.Errorf("wrong error - got %T (%[1]v), want %T",
gotErr, wantErr)
}
if gotErr == nil {
if wantErr == nil && gotErr == nil {
return nil
}
// Ensure the want error type is a script error.
werr, ok := wantErr.(RuleError)
if !ok {
return errors.Errorf("unexpected test error type %T", wantErr)
var gotRuleErr RuleError
if ok := errors.As(gotErr, &gotRuleErr); !ok {
return errors.Errorf("gotErr expected to be RuleError, but got %+v instead", gotErr)
}
var wantRuleErr RuleError
if ok := errors.As(wantErr, &wantRuleErr); !ok {
return errors.Errorf("wantErr expected to be RuleError, but got %+v instead", wantErr)
}
// Ensure the error codes match. It's safe to use a raw type assert
// here since the code above already proved they are the same type and
// the want error is a script error.
gotErrorCode := gotErr.(RuleError).ErrorCode
if gotErrorCode != werr.ErrorCode {
if gotRuleErr.ErrorCode != wantRuleErr.ErrorCode {
return errors.Errorf("mismatched error code - got %v (%v), want %v",
gotErrorCode, gotErr, werr.ErrorCode)
gotRuleErr.ErrorCode, gotErr, wantRuleErr.ErrorCode)
}
return nil
}
func prepareAndProcessBlock(t *testing.T, dag *BlockDAG, parents ...*wire.MsgBlock) *wire.MsgBlock {
func prepareAndProcessBlockByParentMsgBlocks(t *testing.T, dag *BlockDAG, parents ...*wire.MsgBlock) *wire.MsgBlock {
parentHashes := make([]*daghash.Hash, len(parents))
for i, parent := range parents {
parentHashes[i] = parent.BlockHash()
}
daghash.Sort(parentHashes)
block, err := PrepareBlockForTest(dag, parentHashes, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
utilBlock := util.NewBlock(block)
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
if err != nil {
t.Fatalf("unexpected error in ProcessBlock: %s", err)
}
if isDelayed {
t.Fatalf("block is too far in the future")
}
if isOrphan {
t.Fatalf("block was unexpectedly orphan")
}
return block
return PrepareAndProcessBlockForTest(t, dag, parentHashes, nil)
}
func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNode {
@@ -211,3 +184,15 @@ func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNod
}
return node
}
type fakeTimeSource struct {
time time.Time
}
func (fts *fakeTimeSource) Now() time.Time {
return time.Unix(fts.time.Unix(), 0)
}
func newFakeTimeSource(fakeTime time.Time) TimeSource {
return &fakeTimeSource{time: fakeTime}
}

View File

@@ -1,584 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/txscript"
)
// -----------------------------------------------------------------------------
// A variable length quantity (VLQ) is an encoding that uses an arbitrary number
// of binary octets to represent an arbitrarily large integer. The scheme
// employs a most significant byte (MSB) base-128 encoding where the high bit in
// each byte indicates whether or not the byte is the final one. In addition,
// to ensure there are no redundant encodings, an offset is subtracted every
// time a group of 7 bits is shifted out. Therefore each integer can be
// represented in exactly one way, and each representation stands for exactly
// one integer.
//
// Another nice property of this encoding is that it provides a compact
// representation of values that are typically used to indicate sizes. For
// example, the values 0 - 127 are represented with a single byte, 128 - 16511
// with two bytes, and 16512 - 2113663 with three bytes.
//
// While the encoding allows arbitrarily large integers, it is artificially
// limited in this code to an unsigned 64-bit integer for efficiency purposes.
//
// Example encodings:
// 0 -> [0x00]
// 127 -> [0x7f] * Max 1-byte value
// 128 -> [0x80 0x00]
// 129 -> [0x80 0x01]
// 255 -> [0x80 0x7f]
// 256 -> [0x81 0x00]
// 16511 -> [0xff 0x7f] * Max 2-byte value
// 16512 -> [0x80 0x80 0x00]
// 32895 -> [0x80 0xff 0x7f]
// 2113663 -> [0xff 0xff 0x7f] * Max 3-byte value
// 270549119 -> [0xff 0xff 0xff 0x7f] * Max 4-byte value
// 2^64-1 -> [0x80 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0x7f]
//
// References:
// https://en.wikipedia.org/wiki/Variable-length_quantity
// http://www.codecodex.com/wiki/Variable-Length_Integers
// -----------------------------------------------------------------------------
// serializeSizeVLQ returns the number of bytes it would take to serialize the
// passed number as a variable-length quantity according to the format described
// above.
func serializeSizeVLQ(n uint64) int {
size := 1
for ; n > 0x7f; n = (n >> 7) - 1 {
size++
}
return size
}
// putVLQ serializes the provided number to a variable-length quantity according
// to the format described above and returns the number of bytes of the encoded
// value. The result is placed directly into the passed byte slice which must
// be at least large enough to handle the number of bytes returned by the
// serializeSizeVLQ function or it will panic.
func putVLQ(target []byte, n uint64) int {
offset := 0
for ; ; offset++ {
// The high bit is set when another byte follows.
highBitMask := byte(0x80)
if offset == 0 {
highBitMask = 0x00
}
target[offset] = byte(n&0x7f) | highBitMask
if n <= 0x7f {
break
}
n = (n >> 7) - 1
}
// Reverse the bytes so it is MSB-encoded.
for i, j := 0, offset; i < j; i, j = i+1, j-1 {
target[i], target[j] = target[j], target[i]
}
return offset + 1
}
// deserializeVLQ deserializes the provided variable-length quantity according
// to the format described above. It also returns the number of bytes
// deserialized.
func deserializeVLQ(serialized []byte) (uint64, int) {
var n uint64
var size int
for _, val := range serialized {
size++
n = (n << 7) | uint64(val&0x7f)
if val&0x80 != 0x80 {
break
}
n++
}
return n, size
}
// -----------------------------------------------------------------------------
// In order to reduce the size of stored scripts, a domain specific compression
// algorithm is used which recognizes standard scripts and stores them using
// less bytes than the original script.
//
// The general serialized format is:
//
// <script size or type><script data>
//
// Field Type Size
// script size or type VLQ variable
// script data []byte variable
//
// The specific serialized format for each recognized standard script is:
//
// - Pay-to-pubkey-hash: (21 bytes) - <0><20-byte pubkey hash>
// - Pay-to-script-hash: (21 bytes) - <1><20-byte script hash>
// - Pay-to-pubkey**: (33 bytes) - <2, 3, 4, or 5><32-byte pubkey X value>
// 2, 3 = compressed pubkey with bit 0 specifying the y coordinate to use
// 4, 5 = uncompressed pubkey with bit 0 specifying the y coordinate to use
// ** Only valid public keys starting with 0x02, 0x03, and 0x04 are supported.
//
// Any scripts which are not recognized as one of the aforementioned standard
// scripts are encoded using the general serialized format and encode the script
// size as the sum of the actual size of the script and the number of special
// cases.
// -----------------------------------------------------------------------------
// The following constants specify the special constants used to identify a
// special script type in the domain-specific compressed script encoding.
//
// NOTE: This section specifically does not use iota since these values are
// serialized and must be stable for long-term storage.
const (
// cstPayToPubKeyHash identifies a compressed pay-to-pubkey-hash script.
cstPayToPubKeyHash = 0
// cstPayToScriptHash identifies a compressed pay-to-script-hash script.
cstPayToScriptHash = 1
// cstPayToPubKeyComp2 identifies a compressed pay-to-pubkey script to
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyComp2 = 2
// cstPayToPubKeyComp3 identifies a compressed pay-to-pubkey script to
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyComp3 = 3
// cstPayToPubKeyUncomp4 identifies a compressed pay-to-pubkey script to
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyUncomp4 = 4
// cstPayToPubKeyUncomp5 identifies a compressed pay-to-pubkey script to
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyUncomp5 = 5
// numSpecialScripts is the number of special scripts recognized by the
// domain-specific script compression algorithm.
numSpecialScripts = 6
)
// isPubKeyHash returns whether or not the passed public key script is a
// standard pay-to-pubkey-hash script along with the pubkey hash it is paying to
// if it is.
func isPubKeyHash(script []byte) (bool, []byte) {
if len(script) == 25 && script[0] == txscript.OpDup &&
script[1] == txscript.OpHash160 &&
script[2] == txscript.OpData20 &&
script[23] == txscript.OpEqualVerify &&
script[24] == txscript.OpCheckSig {
return true, script[3:23]
}
return false, nil
}
// isScriptHash returns whether or not the passed public key script is a
// standard pay-to-script-hash script along with the script hash it is paying to
// if it is.
func isScriptHash(script []byte) (bool, []byte) {
if len(script) == 23 && script[0] == txscript.OpHash160 &&
script[1] == txscript.OpData20 &&
script[22] == txscript.OpEqual {
return true, script[2:22]
}
return false, nil
}
// isPubKey returns whether or not the passed public key script is a standard
// pay-to-pubkey script that pays to a valid compressed or uncompressed public
// key along with the serialized pubkey it is paying to if it is.
//
// NOTE: This function ensures the public key is actually valid since the
// compression algorithm requires valid pubkeys. It does not support hybrid
// pubkeys. This means that even if the script has the correct form for a
// pay-to-pubkey script, this function will only return true when it is paying
// to a valid compressed or uncompressed pubkey.
func isPubKey(script []byte) (bool, []byte) {
// Pay-to-compressed-pubkey script.
if len(script) == 35 && script[0] == txscript.OpData33 &&
script[34] == txscript.OpCheckSig && (script[1] == 0x02 ||
script[1] == 0x03) {
// Ensure the public key is valid.
serializedPubKey := script[1:34]
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
if err == nil {
return true, serializedPubKey
}
}
// Pay-to-uncompressed-pubkey script.
if len(script) == 67 && script[0] == txscript.OpData65 &&
script[66] == txscript.OpCheckSig && script[1] == 0x04 {
// Ensure the public key is valid.
serializedPubKey := script[1:66]
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
if err == nil {
return true, serializedPubKey
}
}
return false, nil
}
// compressedScriptSize returns the number of bytes the passed script would take
// when encoded with the domain specific compression algorithm described above.
func compressedScriptSize(scriptPubKey []byte) int {
// Pay-to-pubkey-hash script.
if valid, _ := isPubKeyHash(scriptPubKey); valid {
return 21
}
// Pay-to-script-hash script.
if valid, _ := isScriptHash(scriptPubKey); valid {
return 21
}
// Pay-to-pubkey (compressed or uncompressed) script.
if valid, _ := isPubKey(scriptPubKey); valid {
return 33
}
// When none of the above special cases apply, encode the script as is
// preceded by the sum of its size and the number of special cases
// encoded as a variable length quantity.
return serializeSizeVLQ(uint64(len(scriptPubKey)+numSpecialScripts)) +
len(scriptPubKey)
}
// decodeCompressedScriptSize treats the passed serialized bytes as a compressed
// script, possibly followed by other data, and returns the number of bytes it
// occupies taking into account the special encoding of the script size by the
// domain specific compression algorithm described above.
func decodeCompressedScriptSize(serialized []byte) int {
scriptSize, bytesRead := deserializeVLQ(serialized)
if bytesRead == 0 {
return 0
}
switch scriptSize {
case cstPayToPubKeyHash:
return 21
case cstPayToScriptHash:
return 21
case cstPayToPubKeyComp2, cstPayToPubKeyComp3, cstPayToPubKeyUncomp4,
cstPayToPubKeyUncomp5:
return 33
}
scriptSize -= numSpecialScripts
scriptSize += uint64(bytesRead)
return int(scriptSize)
}
// putCompressedScript compresses the passed script according to the domain
// specific compression algorithm described above directly into the passed
// target byte slice. The target byte slice must be at least large enough to
// handle the number of bytes returned by the compressedScriptSize function or
// it will panic.
func putCompressedScript(target, scriptPubKey []byte) int {
// Pay-to-pubkey-hash script.
if valid, hash := isPubKeyHash(scriptPubKey); valid {
target[0] = cstPayToPubKeyHash
copy(target[1:21], hash)
return 21
}
// Pay-to-script-hash script.
if valid, hash := isScriptHash(scriptPubKey); valid {
target[0] = cstPayToScriptHash
copy(target[1:21], hash)
return 21
}
// Pay-to-pubkey (compressed or uncompressed) script.
if valid, serializedPubKey := isPubKey(scriptPubKey); valid {
pubKeyFormat := serializedPubKey[0]
switch pubKeyFormat {
case 0x02, 0x03:
target[0] = pubKeyFormat
copy(target[1:33], serializedPubKey[1:33])
return 33
case 0x04:
// Encode the oddness of the serialized pubkey into the
// compressed script type.
target[0] = pubKeyFormat | (serializedPubKey[64] & 0x01)
copy(target[1:33], serializedPubKey[1:33])
return 33
}
}
// When none of the above special cases apply, encode the unmodified
// script preceded by the sum of its size and the number of special
// cases encoded as a variable length quantity.
encodedSize := uint64(len(scriptPubKey) + numSpecialScripts)
vlqSizeLen := putVLQ(target, encodedSize)
copy(target[vlqSizeLen:], scriptPubKey)
return vlqSizeLen + len(scriptPubKey)
}
// decompressScript returns the original script obtained by decompressing the
// passed compressed script according to the domain specific compression
// algorithm described above.
//
// NOTE: The script parameter must already have been proven to be long enough
// to contain the number of bytes returned by decodeCompressedScriptSize or it
// will panic. This is acceptable since it is only an internal function.
func decompressScript(compressedScriptPubKey []byte) []byte {
// In practice this function will not be called with a zero-length or
// nil script since the nil script encoding includes the length, however
// the code below assumes the length exists, so just return nil now if
// the function ever ends up being called with a nil script in the
// future.
if len(compressedScriptPubKey) == 0 {
return nil
}
// Decode the script size and examine it for the special cases.
encodedScriptSize, bytesRead := deserializeVLQ(compressedScriptPubKey)
switch encodedScriptSize {
// Pay-to-pubkey-hash script. The resulting script is:
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
case cstPayToPubKeyHash:
scriptPubKey := make([]byte, 25)
scriptPubKey[0] = txscript.OpDup
scriptPubKey[1] = txscript.OpHash160
scriptPubKey[2] = txscript.OpData20
copy(scriptPubKey[3:], compressedScriptPubKey[bytesRead:bytesRead+20])
scriptPubKey[23] = txscript.OpEqualVerify
scriptPubKey[24] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-script-hash script. The resulting script is:
// <OP_HASH160><20 byte script hash><OP_EQUAL>
case cstPayToScriptHash:
scriptPubKey := make([]byte, 23)
scriptPubKey[0] = txscript.OpHash160
scriptPubKey[1] = txscript.OpData20
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+20])
scriptPubKey[22] = txscript.OpEqual
return scriptPubKey
// Pay-to-compressed-pubkey script. The resulting script is:
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
scriptPubKey := make([]byte, 35)
scriptPubKey[0] = txscript.OpData33
scriptPubKey[1] = byte(encodedScriptSize)
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+32])
scriptPubKey[34] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-uncompressed-pubkey script. The resulting script is:
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyUncomp4, cstPayToPubKeyUncomp5:
// Change the leading byte to the appropriate compressed pubkey
// identifier (0x02 or 0x03) so it can be decoded as a
// compressed pubkey. This really should never fail since the
// encoding ensures it is valid before compressing to this type.
compressedKey := make([]byte, 33)
compressedKey[0] = byte(encodedScriptSize - 2)
copy(compressedKey[1:], compressedScriptPubKey[1:])
key, err := ecc.ParsePubKey(compressedKey, ecc.S256())
if err != nil {
return nil
}
scriptPubKey := make([]byte, 67)
scriptPubKey[0] = txscript.OpData65
copy(scriptPubKey[1:], key.SerializeUncompressed())
scriptPubKey[66] = txscript.OpCheckSig
return scriptPubKey
}
// When none of the special cases apply, the script was encoded using
// the general format, so reduce the script size by the number of
// special cases and return the unmodified script.
scriptSize := int(encodedScriptSize - numSpecialScripts)
scriptPubKey := make([]byte, scriptSize)
copy(scriptPubKey, compressedScriptPubKey[bytesRead:bytesRead+scriptSize])
return scriptPubKey
}
// -----------------------------------------------------------------------------
// In order to reduce the size of stored amounts, a domain specific compression
// algorithm is used which relies on there typically being a lot of zeroes at
// end of the amounts.
//
// While this is simply exchanging one uint64 for another, the resulting value
// for typical amounts has a much smaller magnitude which results in fewer bytes
// when encoded as variable length quantity. For example, consider the amount
// of 0.1 KAS which is 10000000 sompi. Encoding 10000000 as a VLQ would take
// 4 bytes while encoding the compressed value of 8 as a VLQ only takes 1 byte.
//
// Essentially the compression is achieved by splitting the value into an
// exponent in the range [0-9] and a digit in the range [1-9], when possible,
// and encoding them in a way that can be decoded. More specifically, the
// encoding is as follows:
// - 0 is 0
// - Find the exponent, e, as the largest power of 10 that evenly divides the
// value up to a maximum of 9
// - When e < 9, the final digit can't be 0 so store it as d and remove it by
// dividing the value by 10 (call the result n). The encoded value is thus:
// 1 + 10*(9*n + d-1) + e
// - When e==9, the only thing known is the amount is not 0. The encoded value
// is thus:
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
//
// Example encodings:
// (The numbers in parenthesis are the number of bytes when serialized as a VLQ)
// 0 (1) -> 0 (1) * 0.00000000 KAS
// 1000 (2) -> 4 (1) * 0.00001000 KAS
// 10000 (2) -> 5 (1) * 0.00010000 KAS
// 12345678 (4) -> 111111101(4) * 0.12345678 KAS
// 50000000 (4) -> 47 (1) * 0.50000000 KAS
// 100000000 (4) -> 9 (1) * 1.00000000 KAS
// 500000000 (5) -> 49 (1) * 5.00000000 KAS
// 1000000000 (5) -> 10 (1) * 10.00000000 KAS
// -----------------------------------------------------------------------------
// compressTxOutAmount compresses the passed amount according to the domain
// specific compression algorithm described above.
func compressTxOutAmount(amount uint64) uint64 {
// No need to do any work if it's zero.
if amount == 0 {
return 0
}
// Find the largest power of 10 (max of 9) that evenly divides the
// value.
exponent := uint64(0)
for amount%10 == 0 && exponent < 9 {
amount /= 10
exponent++
}
// The compressed result for exponents less than 9 is:
// 1 + 10*(9*n + d-1) + e
if exponent < 9 {
lastDigit := amount % 10
amount /= 10
return 1 + 10*(9*amount+lastDigit-1) + exponent
}
// The compressed result for an exponent of 9 is:
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
return 10 + 10*(amount-1)
}
// decompressTxOutAmount returns the original amount the passed compressed
// amount represents according to the domain specific compression algorithm
// described above.
func decompressTxOutAmount(amount uint64) uint64 {
// No need to do any work if it's zero.
if amount == 0 {
return 0
}
// The decompressed amount is either of the following two equations:
// x = 1 + 10*(9*n + d - 1) + e
// x = 1 + 10*(n - 1) + 9
amount--
// The decompressed amount is now one of the following two equations:
// x = 10*(9*n + d - 1) + e
// x = 10*(n - 1) + 9
exponent := amount % 10
amount /= 10
// The decompressed amount is now one of the following two equations:
// x = 9*n + d - 1 | where e < 9
// x = n - 1 | where e = 9
n := uint64(0)
if exponent < 9 {
lastDigit := amount%9 + 1
amount /= 9
n = amount*10 + lastDigit
} else {
n = amount + 1
}
// Apply the exponent.
for ; exponent > 0; exponent-- {
n *= 10
}
return n
}
// -----------------------------------------------------------------------------
// Compressed transaction outputs consist of an amount and a public key script
// both compressed using the domain specific compression algorithms previously
// described.
//
// The serialized format is:
//
// <compressed amount><compressed script>
//
// Field Type Size
// compressed amount VLQ variable
// compressed script []byte variable
// -----------------------------------------------------------------------------
// compressedTxOutSize returns the number of bytes the passed transaction output
// fields would take when encoded with the format described above.
func compressedTxOutSize(amount uint64, scriptPubKey []byte) int {
return serializeSizeVLQ(compressTxOutAmount(amount)) +
compressedScriptSize(scriptPubKey)
}
// putCompressedTxOut compresses the passed amount and script according to their
// domain specific compression algorithms and encodes them directly into the
// passed target byte slice with the format described above. The target byte
// slice must be at least large enough to handle the number of bytes returned by
// the compressedTxOutSize function or it will panic.
func putCompressedTxOut(target []byte, amount uint64, scriptPubKey []byte) int {
offset := putVLQ(target, compressTxOutAmount(amount))
offset += putCompressedScript(target[offset:], scriptPubKey)
return offset
}
// decodeCompressedTxOut decodes the passed compressed txout, possibly followed
// by other data, into its uncompressed amount and script and returns them along
// with the number of bytes they occupied prior to decompression.
func decodeCompressedTxOut(serialized []byte) (uint64, []byte, int, error) {
// Deserialize the compressed amount and ensure there are bytes
// remaining for the compressed script.
compressedAmount, bytesRead := deserializeVLQ(serialized)
if bytesRead >= len(serialized) {
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
"data after compressed amount")
}
// Decode the compressed script size and ensure there are enough bytes
// left in the slice for it.
scriptSize := decodeCompressedScriptSize(serialized[bytesRead:])
if len(serialized[bytesRead:]) < scriptSize {
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
"data after script size")
}
// Decompress and return the amount and script.
amount := decompressTxOutAmount(compressedAmount)
script := decompressScript(serialized[bytesRead : bytesRead+scriptSize])
return amount, script, bytesRead + scriptSize, nil
}

View File

@@ -1,436 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"bytes"
"encoding/hex"
"testing"
)
// hexToBytes converts the passed hex string into bytes and will panic if there
// is an error. This is only provided for the hard-coded constants so errors in
// the source code can be detected. It will only (and must only) be called with
// hard-coded values.
func hexToBytes(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
panic("invalid hex in source file: " + s)
}
return b
}
// TestVLQ ensures the variable length quantity serialization, deserialization,
// and size calculation works as expected.
func TestVLQ(t *testing.T) {
t.Parallel()
tests := []struct {
val uint64
serialized []byte
}{
{0, hexToBytes("00")},
{1, hexToBytes("01")},
{127, hexToBytes("7f")},
{128, hexToBytes("8000")},
{129, hexToBytes("8001")},
{255, hexToBytes("807f")},
{256, hexToBytes("8100")},
{16383, hexToBytes("fe7f")},
{16384, hexToBytes("ff00")},
{16511, hexToBytes("ff7f")}, // Max 2-byte value
{16512, hexToBytes("808000")},
{16513, hexToBytes("808001")},
{16639, hexToBytes("80807f")},
{32895, hexToBytes("80ff7f")},
{2113663, hexToBytes("ffff7f")}, // Max 3-byte value
{2113664, hexToBytes("80808000")},
{270549119, hexToBytes("ffffff7f")}, // Max 4-byte value
{270549120, hexToBytes("8080808000")},
{2147483647, hexToBytes("86fefefe7f")},
{2147483648, hexToBytes("86fefeff00")},
{4294967295, hexToBytes("8efefefe7f")}, // Max uint32, 5 bytes
// Max uint64, 10 bytes
{18446744073709551615, hexToBytes("80fefefefefefefefe7f")},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the value is calculated properly.
gotSize := serializeSizeVLQ(test.val)
if gotSize != len(test.serialized) {
t.Errorf("serializeSizeVLQ: did not get expected size "+
"for %d - got %d, want %d", test.val, gotSize,
len(test.serialized))
continue
}
// Ensure the value serializes to the expected bytes.
gotBytes := make([]byte, gotSize)
gotBytesWritten := putVLQ(gotBytes, test.val)
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("putVLQUnchecked: did not get expected bytes "+
"for %d - got %x, want %x", test.val, gotBytes,
test.serialized)
continue
}
if gotBytesWritten != len(test.serialized) {
t.Errorf("putVLQUnchecked: did not get expected number "+
"of bytes written for %d - got %d, want %d",
test.val, gotBytesWritten, len(test.serialized))
continue
}
// Ensure the serialized bytes deserialize to the expected
// value.
gotVal, gotBytesRead := deserializeVLQ(test.serialized)
if gotVal != test.val {
t.Errorf("deserializeVLQ: did not get expected value "+
"for %x - got %d, want %d", test.serialized,
gotVal, test.val)
continue
}
if gotBytesRead != len(test.serialized) {
t.Errorf("deserializeVLQ: did not get expected number "+
"of bytes read for %d - got %d, want %d",
test.serialized, gotBytesRead,
len(test.serialized))
continue
}
}
}
// TestScriptCompression ensures the domain-specific script compression and
// decompression works as expected.
func TestScriptCompression(t *testing.T) {
t.Parallel()
tests := []struct {
name string
uncompressed []byte
compressed []byte
}{
{
name: "nil",
uncompressed: nil,
compressed: hexToBytes("06"),
},
{
name: "pay-to-pubkey-hash 1",
uncompressed: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
compressed: hexToBytes("001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
},
{
name: "pay-to-pubkey-hash 2",
uncompressed: hexToBytes("76a914e34cce70c86373273efcc54ce7d2a491bb4a0e8488ac"),
compressed: hexToBytes("00e34cce70c86373273efcc54ce7d2a491bb4a0e84"),
},
{
name: "pay-to-script-hash 1",
uncompressed: hexToBytes("a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87"),
compressed: hexToBytes("01da1745e9b549bd0bfa1a569971c77eba30cd5a4b"),
},
{
name: "pay-to-script-hash 2",
uncompressed: hexToBytes("a914f815b036d9bbbce5e9f2a00abd1bf3dc91e9551087"),
compressed: hexToBytes("01f815b036d9bbbce5e9f2a00abd1bf3dc91e95510"),
},
{
name: "pay-to-pubkey compressed 0x02",
uncompressed: hexToBytes("2102192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4ac"),
compressed: hexToBytes("02192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
{
name: "pay-to-pubkey compressed 0x03",
uncompressed: hexToBytes("2103b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65ac"),
compressed: hexToBytes("03b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"),
},
{
name: "pay-to-pubkey uncompressed 0x04 even",
uncompressed: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
compressed: hexToBytes("04192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
{
name: "pay-to-pubkey uncompressed 0x04 odd",
uncompressed: hexToBytes("410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac"),
compressed: hexToBytes("0511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c"),
},
{
name: "pay-to-pubkey invalid pubkey",
uncompressed: hexToBytes("3302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
compressed: hexToBytes("293302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
},
{
name: "requires 2 size bytes - data push 200 bytes",
uncompressed: append(hexToBytes("4cc8"), bytes.Repeat([]byte{0x00}, 200)...),
// [0x80, 0x50] = 208 as a variable length quantity
// [0x4c, 0xc8] = OP_PUSHDATA1 200
compressed: append(hexToBytes("80504cc8"), bytes.Repeat([]byte{0x00}, 200)...),
},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the value is calculated properly.
gotSize := compressedScriptSize(test.uncompressed)
if gotSize != len(test.compressed) {
t.Errorf("compressedScriptSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotSize, len(test.compressed))
continue
}
// Ensure the script compresses to the expected bytes.
gotCompressed := make([]byte, gotSize)
gotBytesWritten := putCompressedScript(gotCompressed,
test.uncompressed)
if !bytes.Equal(gotCompressed, test.compressed) {
t.Errorf("putCompressedScript (%s): did not get "+
"expected bytes - got %x, want %x", test.name,
gotCompressed, test.compressed)
continue
}
if gotBytesWritten != len(test.compressed) {
t.Errorf("putCompressedScript (%s): did not get "+
"expected number of bytes written - got %d, "+
"want %d", test.name, gotBytesWritten,
len(test.compressed))
continue
}
// Ensure the compressed script size is properly decoded from
// the compressed script.
gotDecodedSize := decodeCompressedScriptSize(test.compressed)
if gotDecodedSize != len(test.compressed) {
t.Errorf("decodeCompressedScriptSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotDecodedSize, len(test.compressed))
continue
}
// Ensure the script decompresses to the expected bytes.
gotDecompressed := decompressScript(test.compressed)
if !bytes.Equal(gotDecompressed, test.uncompressed) {
t.Errorf("decompressScript (%s): did not get expected "+
"bytes - got %x, want %x", test.name,
gotDecompressed, test.uncompressed)
continue
}
}
}
// TestScriptCompressionErrors ensures calling various functions related to
// script compression with incorrect data returns the expected results.
func TestScriptCompressionErrors(t *testing.T) {
t.Parallel()
// A nil script must result in a decoded size of 0.
if gotSize := decodeCompressedScriptSize(nil); gotSize != 0 {
t.Fatalf("decodeCompressedScriptSize with nil script did not "+
"return 0 - got %d", gotSize)
}
// A nil script must result in a nil decompressed script.
if gotScript := decompressScript(nil); gotScript != nil {
t.Fatalf("decompressScript with nil script did not return nil "+
"decompressed script - got %x", gotScript)
}
// A compressed script for a pay-to-pubkey (uncompressed) that results
// in an invalid pubkey must result in a nil decompressed script.
compressedScript := hexToBytes("04012d74d0cb94344c9569c2e77901573d8d" +
"7903c3ebec3a957724895dca52c6b4")
if gotScript := decompressScript(compressedScript); gotScript != nil {
t.Fatalf("decompressScript with compressed pay-to-"+
"uncompressed-pubkey that is invalid did not return "+
"nil decompressed script - got %x", gotScript)
}
}
// TestAmountCompression ensures the domain-specific transaction output amount
// compression and decompression works as expected.
func TestAmountCompression(t *testing.T) {
t.Parallel()
tests := []struct {
name string
uncompressed uint64
compressed uint64
}{
{
name: "0 KAS",
uncompressed: 0,
compressed: 0,
},
{
name: "546 Sompi (current network dust value)",
uncompressed: 546,
compressed: 4911,
},
{
name: "0.00001 KAS (typical transaction fee)",
uncompressed: 1000,
compressed: 4,
},
{
name: "0.0001 KAS (typical transaction fee)",
uncompressed: 10000,
compressed: 5,
},
{
name: "0.12345678 KAS",
uncompressed: 12345678,
compressed: 111111101,
},
{
name: "0.5 KAS",
uncompressed: 50000000,
compressed: 48,
},
{
name: "1 KAS",
uncompressed: 100000000,
compressed: 9,
},
{
name: "5 KAS",
uncompressed: 500000000,
compressed: 49,
},
{
name: "21000000 KAS (max minted coins)",
uncompressed: 2100000000000000,
compressed: 21000000,
},
}
for _, test := range tests {
// Ensure the amount compresses to the expected value.
gotCompressed := compressTxOutAmount(test.uncompressed)
if gotCompressed != test.compressed {
t.Errorf("compressTxOutAmount (%s): did not get "+
"expected value - got %d, want %d", test.name,
gotCompressed, test.compressed)
continue
}
// Ensure the value decompresses to the expected value.
gotDecompressed := decompressTxOutAmount(test.compressed)
if gotDecompressed != test.uncompressed {
t.Errorf("decompressTxOutAmount (%s): did not get "+
"expected value - got %d, want %d", test.name,
gotDecompressed, test.uncompressed)
continue
}
}
}
// TestCompressedTxOut ensures the transaction output serialization and
// deserialization works as expected.
func TestCompressedTxOut(t *testing.T) {
t.Parallel()
tests := []struct {
name string
amount uint64
scriptPubKey []byte
compressed []byte
}{
{
name: "pay-to-pubkey-hash dust",
amount: 546,
scriptPubKey: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
},
{
name: "pay-to-pubkey uncompressed 1 KAS",
amount: 100000000,
scriptPubKey: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the txout is calculated properly.
gotSize := compressedTxOutSize(test.amount, test.scriptPubKey)
if gotSize != len(test.compressed) {
t.Errorf("compressedTxOutSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotSize, len(test.compressed))
continue
}
// Ensure the txout compresses to the expected value.
gotCompressed := make([]byte, gotSize)
gotBytesWritten := putCompressedTxOut(gotCompressed,
test.amount, test.scriptPubKey)
if !bytes.Equal(gotCompressed, test.compressed) {
t.Errorf("compressTxOut (%s): did not get expected "+
"bytes - got %x, want %x", test.name,
gotCompressed, test.compressed)
continue
}
if gotBytesWritten != len(test.compressed) {
t.Errorf("compressTxOut (%s): did not get expected "+
"number of bytes written - got %d, want %d",
test.name, gotBytesWritten,
len(test.compressed))
continue
}
// Ensure the serialized bytes are decoded back to the expected
// uncompressed values.
gotAmount, gotScript, gotBytesRead, err := decodeCompressedTxOut(
test.compressed)
if err != nil {
t.Errorf("decodeCompressedTxOut (%s): unexpected "+
"error: %v", test.name, err)
continue
}
if gotAmount != test.amount {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected amount - got %d, want %d",
test.name, gotAmount, test.amount)
continue
}
if !bytes.Equal(gotScript, test.scriptPubKey) {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected script - got %x, want %x",
test.name, gotScript, test.scriptPubKey)
continue
}
if gotBytesRead != len(test.compressed) {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected number of bytes read - got %d, want %d",
test.name, gotBytesRead, len(test.compressed))
continue
}
}
}
// TestTxOutCompressionErrors ensures calling various functions related to
// txout compression with incorrect data returns the expected results.
func TestTxOutCompressionErrors(t *testing.T) {
t.Parallel()
// A compressed txout with missing compressed script must error.
compressedTxOut := hexToBytes("00")
_, _, _, err := decodeCompressedTxOut(compressedTxOut)
if !isDeserializeErr(err) {
t.Fatalf("decodeCompressedTxOut with missing compressed script "+
"did not return expected error type - got %T, want "+
"errDeserialize", err)
}
// A compressed txout with short compressed script must error.
compressedTxOut = hexToBytes("0010")
_, _, _, err = decodeCompressedTxOut(compressedTxOut)
if !isDeserializeErr(err) {
t.Fatalf("decodeCompressedTxOut with short compressed script "+
"did not return expected error type - got %T, want "+
"errDeserialize", err)
}
}

View File

@@ -11,12 +11,14 @@ import (
"sync"
"time"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
@@ -59,9 +61,8 @@ type BlockDAG struct {
// The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a
// separate mutex.
db database.DB
dagParams *dagconfig.Params
timeSource MedianTimeSource
timeSource TimeSource
sigCache *txscript.SigCache
indexManager IndexManager
genesis *blockNode
@@ -150,9 +151,12 @@ type BlockDAG struct {
lastFinalityPoint *blockNode
SubnetworkStore *SubnetworkStore
utxoDiffStore *utxoDiffStore
reachabilityStore *reachabilityStore
multisetStore *multisetStore
recentBlockProcessingTimestamps []time.Time
startTime time.Time
}
// IsKnownBlock returns whether or not the DAG instance has the block represented
@@ -486,25 +490,24 @@ func (dag *BlockDAG) addBlock(node *blockNode,
if err != nil {
if errors.As(err, &RuleError{}) {
dag.index.SetStatusFlags(node, statusValidateFailed)
} else {
return nil, err
}
} else {
dag.blockCount++
}
// Intentionally ignore errors writing updated node status to DB. If
// it fails to write, it's not the end of the world. If the block is
// invalid, the worst that can happen is we revalidate the block
// after a restart.
if writeErr := dag.index.flushToDB(); writeErr != nil {
log.Warnf("Error flushing block index changes to disk: %s",
writeErr)
}
// If dag.connectBlock returned a rule error, return it here after updating DB
if err != nil {
dbTx, err := dbaccess.NewTx()
if err != nil {
return nil, err
}
defer dbTx.RollbackUnlessClosed()
err = dag.index.flushToDB(dbTx)
if err != nil {
return nil, err
}
err = dbTx.Commit()
if err != nil {
return nil, err
}
}
return nil, err
}
dag.blockCount++
return chainUpdates, nil
}
@@ -571,14 +574,14 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
return nil, err
}
newBlockUTXO, txsAcceptanceData, newBlockFeeData, err := node.verifyAndBuildUTXO(dag, block.Transactions(), fastAdd)
newBlockPastUTXO, txsAcceptanceData, newBlockFeeData, newBlockMultiSet, err :=
node.verifyAndBuildUTXO(dag, block.Transactions(), fastAdd)
if err != nil {
newErrString := fmt.Sprintf("error verifying UTXO for %s: %s", node, err)
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); ok {
return nil, ruleError(ruleErr.ErrorCode, newErrString)
return nil, ruleError(ruleErr.ErrorCode, fmt.Sprintf("error verifying UTXO for %s: %s", node, err))
}
return nil, errors.New(newErrString)
return nil, errors.Wrapf(err, "error verifying UTXO for %s", node)
}
err = node.validateCoinbaseTransaction(dag, block, txsAcceptanceData)
@@ -587,7 +590,8 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
}
// Apply all changes to the DAG.
virtualUTXODiff, virtualTxsAcceptanceData, chainUpdates, err := dag.applyDAGChanges(node, newBlockUTXO, selectedParentAnticone)
virtualUTXODiff, chainUpdates, err :=
dag.applyDAGChanges(node, newBlockPastUTXO, newBlockMultiSet, selectedParentAnticone)
if err != nil {
// Since all validation logic has already ran, if applyDAGChanges errors out,
// this means we have a problem in the internal structure of the DAG - a problem which is
@@ -596,7 +600,7 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
panic(err)
}
err = dag.saveChangesFromBlock(block, virtualUTXODiff, txsAcceptanceData, virtualTxsAcceptanceData, newBlockFeeData)
err = dag.saveChangesFromBlock(block, virtualUTXODiff, txsAcceptanceData, newBlockFeeData)
if err != nil {
return nil, err
}
@@ -604,77 +608,167 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
return chainUpdates, nil
}
func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UTXODiff,
txsAcceptanceData MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData,
feeData compactFeeData) error {
// calcMultiset returns the multiset of the past UTXO of the given block.
func (node *blockNode) calcMultiset(dag *BlockDAG, acceptanceData MultiBlockTxsAcceptanceData,
selectedParentPastUTXO UTXOSet) (*secp256k1.MultiSet, error) {
// Write any block status changes to DB before updating the DAG state.
err := dag.index.flushToDB()
return node.pastUTXOMultiSet(dag, acceptanceData, selectedParentPastUTXO)
}
func (node *blockNode) pastUTXOMultiSet(dag *BlockDAG, acceptanceData MultiBlockTxsAcceptanceData,
selectedParentPastUTXO UTXOSet) (*secp256k1.MultiSet, error) {
ms, err := node.selectedParentMultiset(dag)
if err != nil {
return err
return nil, err
}
// Atomically insert info into the database.
err = dag.db.Update(func(dbTx database.Tx) error {
err := dag.utxoDiffStore.flushToDB(dbTx)
if err != nil {
return err
}
for _, blockAcceptanceData := range acceptanceData {
for _, txAcceptanceData := range blockAcceptanceData.TxAcceptanceData {
if !txAcceptanceData.IsAccepted {
continue
}
err = dag.reachabilityStore.flushToDB(dbTx)
if err != nil {
return err
}
tx := txAcceptanceData.Tx.MsgTx()
// Update best block state.
state := &dagState{
TipHashes: dag.TipHashes(),
LastFinalityPoint: dag.lastFinalityPoint.hash,
}
err = dbPutDAGState(dbTx, state)
if err != nil {
return err
}
// Update the UTXO set using the diffSet that was melded into the
// full UTXO set.
err = dbPutUTXODiff(dbTx, virtualUTXODiff)
if err != nil {
return err
}
// Scan all accepted transactions and register any subnetwork registry
// transaction. If any subnetwork registry transaction is not well-formed,
// fail the entire block.
err = registerSubnetworks(dbTx, block.Transactions())
if err != nil {
return err
}
blockID, err := createBlockID(dbTx, block.Hash())
if err != nil {
return err
}
// Allow the index manager to call each of the currently active
// optional indexes with the block being connected so they can
// update themselves accordingly.
if dag.indexManager != nil {
err := dag.indexManager.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData)
var err error
ms, err = addTxToMultiset(ms, tx, selectedParentPastUTXO, node.blueScore)
if err != nil {
return err
return nil, err
}
}
}
return ms, nil
}
// Apply the fee data into the database
return dbStoreFeeData(dbTx, block.Hash(), feeData)
})
// selectedParentMultiset returns the multiset of the node's selected
// parent. If the node is the genesis blockNode then it does not have
// a selected parent, in which case return a new, empty multiset.
func (node *blockNode) selectedParentMultiset(dag *BlockDAG) (*secp256k1.MultiSet, error) {
if node.isGenesis() {
return secp256k1.NewMultiset(), nil
}
ms, err := dag.multisetStore.multisetByBlockNode(node.selectedParent)
if err != nil {
return nil, err
}
return ms, nil
}
func addTxToMultiset(ms *secp256k1.MultiSet, tx *wire.MsgTx, pastUTXO UTXOSet, blockBlueScore uint64) (*secp256k1.MultiSet, error) {
isCoinbase := tx.IsCoinBase()
if !isCoinbase {
for _, txIn := range tx.TxIn {
entry, ok := pastUTXO.Get(txIn.PreviousOutpoint)
if !ok {
return nil, errors.Errorf("Couldn't find entry for outpoint %s", txIn.PreviousOutpoint)
}
var err error
ms, err = removeUTXOFromMultiset(ms, entry, &txIn.PreviousOutpoint)
if err != nil {
return nil, err
}
}
}
for i, txOut := range tx.TxOut {
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
entry := NewUTXOEntry(txOut, isCoinbase, blockBlueScore)
var err error
ms, err = addUTXOToMultiset(ms, entry, &outpoint)
if err != nil {
return nil, err
}
}
return ms, nil
}
func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UTXODiff,
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData compactFeeData) error {
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
err = dag.index.flushToDB(dbTx)
if err != nil {
return err
}
err = dag.utxoDiffStore.flushToDB(dbTx)
if err != nil {
return err
}
err = dag.reachabilityStore.flushToDB(dbTx)
if err != nil {
return err
}
err = dag.multisetStore.flushToDB(dbTx)
if err != nil {
return err
}
// Update DAG state.
state := &dagState{
TipHashes: dag.TipHashes(),
LastFinalityPoint: dag.lastFinalityPoint.hash,
LocalSubnetworkID: dag.subnetworkID,
}
err = saveDAGState(dbTx, state)
if err != nil {
return err
}
// Update the UTXO set using the diffSet that was melded into the
// full UTXO set.
err = updateUTXOSet(dbTx, virtualUTXODiff)
if err != nil {
return err
}
// Scan all accepted transactions and register any subnetwork registry
// transaction. If any subnetwork registry transaction is not well-formed,
// fail the entire block.
err = registerSubnetworks(dbTx, block.Transactions())
if err != nil {
return err
}
// Allow the index manager to call each of the currently active
// optional indexes with the block being connected so they can
// update themselves accordingly.
if dag.indexManager != nil {
err := dag.indexManager.ConnectBlock(dbTx, block.Hash(), txsAcceptanceData)
if err != nil {
return err
}
}
// Apply the fee data into the database
err = dbaccess.StoreFeeData(dbTx, block.Hash(), feeData)
if err != nil {
return err
}
err = dbTx.Commit()
if err != nil {
return err
}
dag.index.clearDirtyEntries()
dag.utxoDiffStore.clearDirtyEntries()
dag.utxoDiffStore.clearOldEntries()
dag.reachabilityStore.clearDirtyEntries()
dag.multisetStore.clearNewEntries()
return nil
}
@@ -698,7 +792,7 @@ func (dag *BlockDAG) validateGasLimit(block *util.Block) error {
if !msgTx.SubnetworkID.IsEqual(currentSubnetworkID) {
currentSubnetworkID = &msgTx.SubnetworkID
currentGasUsage = 0
currentSubnetworkGasLimit, err = dag.SubnetworkStore.GasLimit(currentSubnetworkID)
currentSubnetworkGasLimit, err = GasLimit(currentSubnetworkID)
if err != nil {
return errors.Errorf("Error getting gas limit for subnetworkID '%s': %s", currentSubnetworkID, err)
}
@@ -778,9 +872,9 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
for parent := range dag.lastFinalityPoint.parents {
queue = append(queue, parent)
}
var blockHashesToDelete []*daghash.Hash
var nodesToDelete []*blockNode
if deleteDiffData {
blockHashesToDelete = make([]*daghash.Hash, 0, dag.dagParams.FinalityInterval)
nodesToDelete = make([]*blockNode, 0, dag.dagParams.FinalityInterval)
}
for len(queue) > 0 {
var current *blockNode
@@ -788,7 +882,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
if !current.isFinalized {
current.isFinalized = true
if deleteDiffData {
blockHashesToDelete = append(blockHashesToDelete, current.hash)
nodesToDelete = append(nodesToDelete, current)
}
for parent := range current.parents {
queue = append(queue, parent)
@@ -796,9 +890,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
}
}
if deleteDiffData {
err := dag.db.Update(func(dbTx database.Tx) error {
return dag.utxoDiffStore.removeBlocksDiffData(dbTx, blockHashesToDelete)
})
err := dag.utxoDiffStore.removeBlocksDiffData(dbaccess.NoTx(), nodesToDelete)
if err != nil {
panic(fmt.Sprintf("Error removing diff data from utxoDiffStore: %s", err))
}
@@ -851,7 +943,7 @@ func (dag *BlockDAG) NextAcceptedIDMerkleRootNoLock() (*daghash.Hash, error) {
//
// This function MUST be called with the DAG read-lock held
func (dag *BlockDAG) TxsAcceptedByVirtual() (MultiBlockTxsAcceptanceData, error) {
_, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
_, _, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
return txsAcceptanceData, err
}
@@ -863,7 +955,7 @@ func (dag *BlockDAG) TxsAcceptedByBlockHash(blockHash *daghash.Hash) (MultiBlock
if node == nil {
return nil, errors.Errorf("Couldn't find block %s", blockHash)
}
_, txsAcceptanceData, err := dag.pastUTXO(node)
_, _, txsAcceptanceData, err := dag.pastUTXO(node)
return txsAcceptanceData, err
}
@@ -874,38 +966,41 @@ func (dag *BlockDAG) TxsAcceptedByBlockHash(blockHash *daghash.Hash) (MultiBlock
// 4. Updates each of the tips' utxoDiff.
// 5. Applies the new virtual's blue score to all the unaccepted UTXOs
// 6. Adds the block to the reachability structures
// 7. Updates the finality point of the DAG (if required).
// 7. Adds the multiset of the block to the multiset store.
// 8. Updates the finality point of the DAG (if required).
//
// It returns the diff in the virtual block's UTXO set.
//
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, selectedParentAnticone []*blockNode) (
virtualUTXODiff *UTXODiff, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData,
chainUpdates *chainUpdates, err error) {
func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockPastUTXO UTXOSet,
newBlockMultiset *secp256k1.MultiSet, selectedParentAnticone []*blockNode) (
virtualUTXODiff *UTXODiff, chainUpdates *chainUpdates, err error) {
// Add the block to the reachability structures
err = dag.updateReachability(node, selectedParentAnticone)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "failed updating reachability")
return nil, nil, errors.Wrap(err, "failed updating reachability")
}
if err = node.updateParents(dag, newBlockUTXO); err != nil {
return nil, nil, nil, errors.Wrapf(err, "failed updating parents of %s", node)
dag.multisetStore.setMultiset(node, newBlockMultiset)
if err = node.updateParents(dag, newBlockPastUTXO); err != nil {
return nil, nil, errors.Wrapf(err, "failed updating parents of %s", node)
}
// Update the virtual block's parents (the DAG tips) to include the new block.
chainUpdates = dag.virtual.AddTip(node)
// Build a UTXO set for the new virtual block
newVirtualUTXO, virtualTxsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
newVirtualUTXO, _, _, err := dag.pastUTXO(&dag.virtual.blockNode)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "could not restore past UTXO for virtual")
return nil, nil, errors.Wrap(err, "could not restore past UTXO for virtual")
}
// Apply new utxoDiffs to all the tips
err = updateTipsUTXO(dag, newVirtualUTXO)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "failed updating the tips' UTXO")
return nil, nil, errors.Wrap(err, "failed updating the tips' UTXO")
}
// It is now safe to meld the UTXO set to base.
@@ -913,7 +1008,7 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, sele
virtualUTXODiff = diffSet.UTXODiff
err = dag.meldVirtualUTXO(diffSet)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "failed melding the virtual UTXO")
return nil, nil, errors.Wrap(err, "failed melding the virtual UTXO")
}
dag.index.SetStatusFlags(node, statusValid)
@@ -921,7 +1016,7 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, sele
// And now we can update the finality point of the DAG (if required)
dag.updateFinalityPoint()
return virtualUTXODiff, virtualTxsAcceptanceData, chainUpdates, nil
return virtualUTXODiff, chainUpdates, nil
}
func (dag *BlockDAG) meldVirtualUTXO(newVirtualUTXODiffSet *DiffUTXOSet) error {
@@ -930,60 +1025,60 @@ func (dag *BlockDAG) meldVirtualUTXO(newVirtualUTXODiffSet *DiffUTXOSet) error {
return newVirtualUTXODiffSet.meldToBase()
}
func (node *blockNode) diffFromTxs(pastUTXO UTXOSet, transactions []*util.Tx) (*UTXODiff, error) {
diff := NewUTXODiff()
for _, tx := range transactions {
txDiff, err := pastUTXO.diffFromTx(tx.MsgTx(), UnacceptedBlueScore)
if err != nil {
return nil, err
// checkDoubleSpendsWithBlockPast checks that each block transaction
// has a corresponding UTXO in the block pastUTXO.
func checkDoubleSpendsWithBlockPast(pastUTXO UTXOSet, blockTransactions []*util.Tx) error {
for _, tx := range blockTransactions {
if tx.IsCoinBase() {
continue
}
diff, err = diff.WithDiff(txDiff)
if err != nil {
return nil, err
for _, txIn := range tx.MsgTx().TxIn {
if _, ok := pastUTXO.Get(txIn.PreviousOutpoint); !ok {
return ruleError(ErrMissingTxOut, fmt.Sprintf("missing transaction "+
"output %s in the utxo set", txIn.PreviousOutpoint))
}
}
}
return diff, nil
return nil
}
// verifyAndBuildUTXO verifies all transactions in the given block and builds its UTXO
// to save extra traversals it returns the transactions acceptance data and the compactFeeData for the new block
// to save extra traversals it returns the transactions acceptance data, the compactFeeData
// for the new block and its multiset.
func (node *blockNode) verifyAndBuildUTXO(dag *BlockDAG, transactions []*util.Tx, fastAdd bool) (
newBlockUTXO UTXOSet, txsAcceptanceData MultiBlockTxsAcceptanceData, newBlockFeeData compactFeeData, err error) {
newBlockUTXO UTXOSet, txsAcceptanceData MultiBlockTxsAcceptanceData, newBlockFeeData compactFeeData, multiset *secp256k1.MultiSet, err error) {
pastUTXO, txsAcceptanceData, err := dag.pastUTXO(node)
pastUTXO, selectedParentPastUTXO, txsAcceptanceData, err := dag.pastUTXO(node)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
err = node.validateAcceptedIDMerkleRoot(dag, txsAcceptanceData)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
feeData, err := dag.checkConnectToPastUTXO(node, pastUTXO, transactions, fastAdd)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
diffFromTxs, err := node.diffFromTxs(pastUTXO, transactions)
multiset, err = node.calcMultiset(dag, txsAcceptanceData, selectedParentPastUTXO)
if err != nil {
return nil, nil, nil, err
}
utxo, err := pastUTXO.WithDiff(diffFromTxs)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
calculatedMultisetHash := utxo.Multiset().Hash()
calculatedMultisetHash := daghash.Hash(*multiset.Finalize())
if !calculatedMultisetHash.IsEqual(node.utxoCommitment) {
str := fmt.Sprintf("block %s UTXO commitment is invalid - block "+
"header indicates %s, but calculated value is %s", node.hash,
node.utxoCommitment, calculatedMultisetHash)
return nil, nil, nil, ruleError(ErrBadUTXOCommitment, str)
return nil, nil, nil, nil, ruleError(ErrBadUTXOCommitment, str)
}
return utxo, txsAcceptanceData, feeData, nil
return pastUTXO, txsAcceptanceData, feeData, multiset, nil
}
// TxAcceptanceData stores a transaction together with an indication
@@ -1026,51 +1121,46 @@ func genesisPastUTXO(virtual *virtualBlock) UTXOSet {
return genesisPastUTXO
}
func (node *blockNode) fetchBlueBlocks(db database.DB) ([]*util.Block, error) {
func (node *blockNode) fetchBlueBlocks() ([]*util.Block, error) {
blueBlocks := make([]*util.Block, len(node.blues))
err := db.View(func(dbTx database.Tx) error {
for i, blueBlockNode := range node.blues {
blueBlock, err := dbFetchBlockByNode(dbTx, blueBlockNode)
if err != nil {
return err
}
blueBlocks[i] = blueBlock
for i, blueBlockNode := range node.blues {
blueBlock, err := fetchBlockByHash(dbaccess.NoTx(), blueBlockNode.hash)
if err != nil {
return nil, err
}
return nil
})
return blueBlocks, err
blueBlocks[i] = blueBlock
}
return blueBlocks, nil
}
// applyBlueBlocks adds all transactions in the blue blocks to the selectedParent's UTXO set
// applyBlueBlocks adds all transactions in the blue blocks to the selectedParent's past UTXO set
// Purposefully ignoring failures - these are just unaccepted transactions
// Writing down which transactions were accepted or not in txsAcceptanceData
func (node *blockNode) applyBlueBlocks(acceptedSelectedParentUTXO UTXOSet, selectedParentAcceptanceData []TxAcceptanceData, blueBlocks []*util.Block) (
func (node *blockNode) applyBlueBlocks(selectedParentPastUTXO UTXOSet, blueBlocks []*util.Block) (
pastUTXO UTXOSet, multiBlockTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) {
pastUTXO = acceptedSelectedParentUTXO
multiBlockTxsAcceptanceData = MultiBlockTxsAcceptanceData{BlockTxsAcceptanceData{
BlockHash: *node.selectedParent.hash,
TxAcceptanceData: selectedParentAcceptanceData,
}}
pastUTXO = selectedParentPastUTXO.(*DiffUTXOSet).cloneWithoutBase()
multiBlockTxsAcceptanceData = make(MultiBlockTxsAcceptanceData, len(blueBlocks))
// Add blueBlocks to multiBlockTxsAcceptanceData in topological order. This
// is so that anyone who iterates over it would process blocks (and transactions)
// in their order of appearance in the DAG.
// We skip the selected parent, because we calculated its UTXO in acceptSelectedParentTransactions.
for i := 1; i < len(blueBlocks); i++ {
for i := 0; i < len(blueBlocks); i++ {
blueBlock := blueBlocks[i]
transactions := blueBlock.Transactions()
blockTxsAcceptanceData := BlockTxsAcceptanceData{
BlockHash: *blueBlock.Hash(),
TxAcceptanceData: make([]TxAcceptanceData, len(transactions)),
}
for i, tx := range blueBlock.Transactions() {
isSelectedParent := i == 0
for j, tx := range blueBlock.Transactions() {
var isAccepted bool
// Coinbase transaction outputs are added to the UTXO
// only if they are in the selected parent chain.
if tx.IsCoinBase() {
if !isSelectedParent && tx.IsCoinBase() {
isAccepted = false
} else {
isAccepted, err = pastUTXO.AddTx(tx.MsgTx(), node.blueScore)
@@ -1078,9 +1168,9 @@ func (node *blockNode) applyBlueBlocks(acceptedSelectedParentUTXO UTXOSet, selec
return nil, nil, err
}
}
blockTxsAcceptanceData.TxAcceptanceData[i] = TxAcceptanceData{Tx: tx, IsAccepted: isAccepted}
blockTxsAcceptanceData.TxAcceptanceData[j] = TxAcceptanceData{Tx: tx, IsAccepted: isAccepted}
}
multiBlockTxsAcceptanceData = append(multiBlockTxsAcceptanceData, blockTxsAcceptanceData)
multiBlockTxsAcceptanceData[i] = blockTxsAcceptanceData
}
return pastUTXO, multiBlockTxsAcceptanceData, nil
@@ -1111,7 +1201,7 @@ func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) e
return err
}
if diffChild == nil {
parentUTXO, err := dag.restoreUTXO(parent)
parentPastUTXO, err := dag.restorePastUTXO(parent)
if err != nil {
return err
}
@@ -1119,7 +1209,7 @@ func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) e
if err != nil {
return err
}
diff, err := newBlockUTXO.diffFrom(parentUTXO)
diff, err := newBlockUTXO.diffFrom(parentPastUTXO)
if err != nil {
return err
}
@@ -1137,56 +1227,32 @@ func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) e
// To save traversals over the blue blocks, it also returns the transaction acceptance data for
// all blue blocks
func (dag *BlockDAG) pastUTXO(node *blockNode) (
pastUTXO UTXOSet, bluesTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) {
pastUTXO, selectedParentPastUTXO UTXOSet, bluesTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) {
if node.isGenesis() {
return genesisPastUTXO(dag.virtual), MultiBlockTxsAcceptanceData{}, nil
}
selectedParentUTXO, err := dag.restoreUTXO(node.selectedParent)
if err != nil {
return nil, nil, err
return genesisPastUTXO(dag.virtual), nil, MultiBlockTxsAcceptanceData{}, nil
}
blueBlocks, err := node.fetchBlueBlocks(dag.db)
selectedParentPastUTXO, err = dag.restorePastUTXO(node.selectedParent)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
selectedParent := blueBlocks[0]
acceptedSelectedParentUTXO, selectedParentAcceptanceData, err := node.acceptSelectedParentTransactions(selectedParent, selectedParentUTXO)
blueBlocks, err := node.fetchBlueBlocks()
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
return node.applyBlueBlocks(acceptedSelectedParentUTXO, selectedParentAcceptanceData, blueBlocks)
pastUTXO, bluesTxsAcceptanceData, err = node.applyBlueBlocks(selectedParentPastUTXO, blueBlocks)
if err != nil {
return nil, nil, nil, err
}
return pastUTXO, selectedParentPastUTXO, bluesTxsAcceptanceData, nil
}
func (node *blockNode) acceptSelectedParentTransactions(selectedParent *util.Block, selectedParentUTXO UTXOSet) (acceptedSelectedParentUTXO UTXOSet, txAcceptanceData []TxAcceptanceData, err error) {
diff := NewUTXODiff()
txAcceptanceData = make([]TxAcceptanceData, len(selectedParent.Transactions()))
for i, tx := range selectedParent.Transactions() {
txAcceptanceData[i] = TxAcceptanceData{
Tx: tx,
IsAccepted: true,
}
acceptanceDiff, err := selectedParentUTXO.diffFromAcceptedTx(tx.MsgTx(), node.blueScore)
if err != nil {
return nil, nil, err
}
diff, err = diff.WithDiff(acceptanceDiff)
if err != nil {
return nil, nil, err
}
}
acceptedSelectedParentUTXO, err = selectedParentUTXO.WithDiff(diff)
if err != nil {
return nil, nil, err
}
return acceptedSelectedParentUTXO, txAcceptanceData, nil
}
// restoreUTXO restores the UTXO of a given block from its diff
func (dag *BlockDAG) restoreUTXO(node *blockNode) (UTXOSet, error) {
// restorePastUTXO restores the UTXO of a given block from its diff
func (dag *BlockDAG) restorePastUTXO(node *blockNode) (UTXOSet, error) {
stack := []*blockNode{}
// Iterate over the chain of diff-childs from node till virtual and add them
@@ -1214,8 +1280,8 @@ func (dag *BlockDAG) restoreUTXO(node *blockNode) (UTXOSet, error) {
if err != nil {
return nil, err
}
// Use WithDiffInPlace, otherwise copying the diffs again and again create a polynomial overhead
err = accumulatedDiff.WithDiffInPlace(diff)
// Use withDiffInPlace, otherwise copying the diffs again and again create a polynomial overhead
err = accumulatedDiff.withDiffInPlace(diff)
if err != nil {
return nil, err
}
@@ -1227,11 +1293,11 @@ func (dag *BlockDAG) restoreUTXO(node *blockNode) (UTXOSet, error) {
// updateTipsUTXO builds and applies new diff UTXOs for all the DAG's tips
func updateTipsUTXO(dag *BlockDAG, virtualUTXO UTXOSet) error {
for tip := range dag.virtual.parents {
tipUTXO, err := dag.restoreUTXO(tip)
tipPastUTXO, err := dag.restorePastUTXO(tip)
if err != nil {
return err
}
diff, err := virtualUTXO.diffFrom(tipUTXO)
diff, err := virtualUTXO.diffFrom(tipPastUTXO)
if err != nil {
return err
}
@@ -1265,14 +1331,14 @@ func (dag *BlockDAG) isCurrent() bool {
dagTimestamp = selectedTip.timestamp
}
dagTime := time.Unix(dagTimestamp, 0)
return dag.AdjustedTime().Sub(dagTime) <= isDAGCurrentMaxDiff
return dag.Now().Sub(dagTime) <= isDAGCurrentMaxDiff
}
// AdjustedTime returns the adjusted time according to
// dag.timeSource. See MedianTimeSource.AdjustedTime for
// Now returns the adjusted time according to
// dag.timeSource. See TimeSource.Now for
// more details.
func (dag *BlockDAG) AdjustedTime() time.Time {
return dag.timeSource.AdjustedTime()
func (dag *BlockDAG) Now() time.Time {
return dag.timeSource.Now()
}
// IsCurrent returns whether or not the DAG believes it is current. Several
@@ -1349,6 +1415,21 @@ func (dag *BlockDAG) BlueScoreByBlockHash(hash *daghash.Hash) (uint64, error) {
return node.blueScore, nil
}
// BluesByBlockHash returns the blues of the block for the given hash.
func (dag *BlockDAG) BluesByBlockHash(hash *daghash.Hash) ([]*daghash.Hash, error) {
node := dag.index.LookupNode(hash)
if node == nil {
return nil, errors.Errorf("block %s is unknown", hash)
}
hashes := make([]*daghash.Hash, len(node.blues))
for i, blue := range node.blues {
hashes[i] = blue.hash
}
return hashes, nil
}
// BlockConfirmationsByHash returns the confirmations number for a block with the
// given hash. See blockConfirmations for further details.
//
@@ -1393,11 +1474,6 @@ func (dag *BlockDAG) UTXOConfirmations(outpoint *wire.Outpoint) (uint64, bool) {
return confirmations, true
}
// UTXOCommitment returns a commitment to the dag's current UTXOSet
func (dag *BlockDAG) UTXOCommitment() string {
return dag.UTXOSet().UTXOMultiset.Hash().String()
}
// blockConfirmations returns the current confirmations number of the given node
// The confirmations number is defined as follows:
// * If the node is in the selected tip red set -> 0
@@ -1810,8 +1886,23 @@ func (dag *BlockDAG) SubnetworkID() *subnetworkid.SubnetworkID {
return dag.subnetworkID
}
// ForEachHash runs the given fn on every hash that's currently known to
// the DAG.
//
// This function is NOT safe for concurrent access. It is meant to be
// used either on initialization or when the dag lock is held for reads.
func (dag *BlockDAG) ForEachHash(fn func(hash daghash.Hash) error) error {
for hash := range dag.index.index {
err := fn(hash)
if err != nil {
return err
}
}
return nil
}
func (dag *BlockDAG) addDelayedBlock(block *util.Block, delay time.Duration) error {
processTime := dag.AdjustedTime().Add(delay)
processTime := dag.Now().Add(delay)
log.Debugf("Adding block to delayed blocks queue (block hash: %s, process time: %s)", block.Hash().String(), processTime)
delayedBlock := &delayedBlock{
block: block,
@@ -1829,7 +1920,7 @@ func (dag *BlockDAG) processDelayedBlocks() error {
// Check if the delayed block with the earliest process time should be processed
for dag.delayedBlocksQueue.Len() > 0 {
earliestDelayedBlockProcessTime := dag.peekDelayedBlock().processTime
if earliestDelayedBlockProcessTime.After(dag.AdjustedTime()) {
if earliestDelayedBlockProcessTime.After(dag.Now()) {
break
}
delayedBlock := dag.popDelayedBlock()
@@ -1863,25 +1954,16 @@ func (dag *BlockDAG) peekDelayedBlock() *delayedBlock {
// connected to the DAG for the purpose of supporting optional indexes.
type IndexManager interface {
// Init is invoked during DAG initialize in order to allow the index
// manager to initialize itself and any indexes it is managing. The
// channel parameter specifies a channel the caller can close to signal
// that the process should be interrupted. It can be nil if that
// behavior is not desired.
Init(database.DB, *BlockDAG, <-chan struct{}) error
// manager to initialize itself and any indexes it is managing.
Init(*BlockDAG) error
// ConnectBlock is invoked when a new block has been connected to the
// DAG.
ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *BlockDAG, acceptedTxsData MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData) error
ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash, acceptedTxsData MultiBlockTxsAcceptanceData) error
}
// Config is a descriptor which specifies the blockDAG instance configuration.
type Config struct {
// DB defines the database which houses the blocks and will be used to
// store all metadata created by this package such as the utxo set.
//
// This field is required.
DB database.DB
// Interrupt specifies a channel the caller can close to signal that
// long running operations, such as catching up indexes or performing
// database migrations, should be interrupted.
@@ -1895,13 +1977,9 @@ type Config struct {
// This field is required.
DAGParams *dagconfig.Params
// TimeSource defines the median time source to use for things such as
// TimeSource defines the time source to use for things such as
// block processing and determining whether or not the DAG is current.
//
// The caller is expected to keep a reference to the time source as well
// and add time samples from other peers on the network so the local
// time is adjusted to be in agreement with other peers.
TimeSource MedianTimeSource
TimeSource TimeSource
// SigCache defines a signature cache to use when when validating
// signatures. This is typically most useful when individual
@@ -1929,22 +2007,18 @@ type Config struct {
// New returns a BlockDAG instance using the provided configuration details.
func New(config *Config) (*BlockDAG, error) {
// Enforce required config fields.
if config.DB == nil {
return nil, AssertError("BlockDAG.New database is nil")
}
if config.DAGParams == nil {
return nil, AssertError("BlockDAG.New DAG parameters nil")
return nil, errors.New("BlockDAG.New DAG parameters nil")
}
if config.TimeSource == nil {
return nil, AssertError("BlockDAG.New timesource is nil")
return nil, errors.New("BlockDAG.New timesource is nil")
}
params := config.DAGParams
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
index := newBlockIndex(config.DB, params)
index := newBlockIndex(params)
dag := &BlockDAG{
db: config.DB,
dagParams: params,
timeSource: config.TimeSource,
sigCache: config.SigCache,
@@ -1961,13 +2035,14 @@ func New(config *Config) (*BlockDAG, error) {
warningCaches: newThresholdCaches(vbNumBits),
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
blockCount: 0,
SubnetworkStore: newSubnetworkStore(config.DB),
subnetworkID: config.SubnetworkID,
startTime: time.Now(),
}
dag.virtual = newVirtualBlock(dag, nil)
dag.utxoDiffStore = newUTXODiffStore(dag)
dag.reachabilityStore = newReachabilityStore(dag)
dag.multisetStore = newMultisetStore(dag)
// Initialize the DAG state from the passed database. When the db
// does not yet contain any DAG state, both it and the DAG state
@@ -1976,19 +2051,11 @@ func New(config *Config) (*BlockDAG, error) {
if err != nil {
return nil, err
}
defer func() {
if err != nil {
err := dag.removeDAGState()
if err != nil {
panic(fmt.Sprintf("Couldn't remove the DAG State: %s", err))
}
}
}()
// Initialize and catch up all of the currently active optional indexes
// as needed.
if config.IndexManager != nil {
err = config.IndexManager.Init(dag.db, dag, config.Interrupt)
err = config.IndexManager.Init(dag)
if err != nil {
return nil, err
}

View File

@@ -6,14 +6,17 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
"math"
"os"
"path/filepath"
"reflect"
"testing"
"time"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
@@ -40,7 +43,7 @@ func TestBlockCount(t *testing.T) {
}
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestBlockCount", Config{
dag, teardownFunc, err := DAGSetup("TestBlockCount", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -93,7 +96,7 @@ func TestIsKnownBlock(t *testing.T) {
}
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("haveblock", Config{
dag, teardownFunc, err := DAGSetup("haveblock", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -204,7 +207,7 @@ func TestIsKnownBlock(t *testing.T) {
{hash: dagconfig.SimnetParams.GenesisHash.String(), want: true},
// Block 3b should be present (as a second child of Block 2).
{hash: "264176fb6072e2362db18f92d3f4b739cff071a206736df7c407c0bf9a1d7fef", want: true},
{hash: "48a752afbe36ad66357f751f8dee4f75665d24e18f644d83a3409b398405b46b", want: true},
// Block 100000 should be present (as an orphan).
{hash: "65b20b048a074793ebfd1196e49341c8d194dabfc6b44a4fd0c607406e122baf", want: true},
@@ -550,18 +553,17 @@ func TestNew(t *testing.T) {
dbPath := filepath.Join(tempDir, "TestNew")
_ = os.RemoveAll(dbPath)
db, err := database.Create(testDbType, dbPath, blockDataNet)
err := dbaccess.Open(dbPath)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
defer func() {
db.Close()
dbaccess.Close()
os.RemoveAll(dbPath)
}()
config := &Config{
DAGParams: &dagconfig.SimnetParams,
DB: db,
TimeSource: NewMedianTime(),
TimeSource: NewTimeSource(),
SigCache: txscript.NewSigCache(1000),
}
_, err = New(config)
@@ -590,20 +592,19 @@ func TestAcceptingInInit(t *testing.T) {
// Create a test database
dbPath := filepath.Join(tempDir, "TestAcceptingInInit")
_ = os.RemoveAll(dbPath)
db, err := database.Create(testDbType, dbPath, blockDataNet)
err := dbaccess.Open(dbPath)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
defer func() {
db.Close()
dbaccess.Close()
os.RemoveAll(dbPath)
}()
// Create a DAG to add the test block into
config := &Config{
DAGParams: &dagconfig.SimnetParams,
DB: db,
TimeSource: NewMedianTime(),
TimeSource: NewTimeSource(),
SigCache: txscript.NewSigCache(1000),
}
dag, err := New(config)
@@ -625,16 +626,30 @@ func TestAcceptingInInit(t *testing.T) {
testNode.status = statusDataStored
// Manually add the test block to the database
err = db.Update(func(dbTx database.Tx) error {
err := dbStoreBlock(dbTx, testBlock)
if err != nil {
return err
}
return dbStoreBlockNode(dbTx, testNode)
})
dbTx, err := dbaccess.NewTx()
if err != nil {
t.Fatalf("Failed to open database "+
"transaction: %s", err)
}
defer dbTx.RollbackUnlessClosed()
err = storeBlock(dbTx, testBlock)
if err != nil {
t.Fatalf("Failed to store block: %s", err)
}
dbTestNode, err := serializeBlockNode(testNode)
if err != nil {
t.Fatalf("Failed to serialize blockNode: %s", err)
}
key := blockIndexKey(testNode.hash, testNode.blueScore)
err = dbaccess.StoreIndexBlock(dbTx, key, dbTestNode)
if err != nil {
t.Fatalf("Failed to update block index: %s", err)
}
err = dbTx.Commit()
if err != nil {
t.Fatalf("Failed to commit database "+
"transaction: %s", err)
}
// Create a new DAG. We expect this DAG to process the
// test node
@@ -654,7 +669,7 @@ func TestConfirmations(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestConfirmations", Config{
dag, teardownFunc, err := DAGSetup("TestConfirmations", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -676,7 +691,7 @@ func TestConfirmations(t *testing.T) {
chainBlocks := make([]*wire.MsgBlock, 5)
chainBlocks[0] = dag.dagParams.GenesisBlock
for i := uint32(1); i < 5; i++ {
chainBlocks[i] = prepareAndProcessBlock(t, dag, chainBlocks[i-1])
chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1])
}
// Make sure that each one of the chain blocks has the expected confirmations number
@@ -695,8 +710,8 @@ func TestConfirmations(t *testing.T) {
branchingBlocks := make([]*wire.MsgBlock, 2)
// Add two branching blocks
branchingBlocks[0] = prepareAndProcessBlock(t, dag, chainBlocks[1])
branchingBlocks[1] = prepareAndProcessBlock(t, dag, branchingBlocks[0])
branchingBlocks[0] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[1])
branchingBlocks[1] = prepareAndProcessBlockByParentMsgBlocks(t, dag, branchingBlocks[0])
// Check that the genesis has a confirmations number == len(chainBlocks)
genesisConfirmations, err = dag.blockConfirmations(dag.genesis)
@@ -726,7 +741,7 @@ func TestConfirmations(t *testing.T) {
// Generate 100 blocks to force the "main" chain to become red
branchingChainTip := branchingBlocks[1]
for i := uint32(0); i < 100; i++ {
nextBranchingChainTip := prepareAndProcessBlock(t, dag, branchingChainTip)
nextBranchingChainTip := prepareAndProcessBlockByParentMsgBlocks(t, dag, branchingChainTip)
branchingChainTip = nextBranchingChainTip
}
@@ -757,7 +772,7 @@ func TestAcceptingBlock(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 3
dag, teardownFunc, err := DAGSetup("TestAcceptingBlock", Config{
dag, teardownFunc, err := DAGSetup("TestAcceptingBlock", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -785,7 +800,7 @@ func TestAcceptingBlock(t *testing.T) {
chainBlocks := make([]*wire.MsgBlock, numChainBlocks)
chainBlocks[0] = dag.dagParams.GenesisBlock
for i := uint32(1); i <= numChainBlocks-1; i++ {
chainBlocks[i] = prepareAndProcessBlock(t, dag, chainBlocks[i-1])
chainBlocks[i] = prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[i-1])
}
// Make sure that each chain block (including the genesis) is accepted by its child
@@ -813,7 +828,7 @@ func TestAcceptingBlock(t *testing.T) {
// Generate a chain tip that will be in the anticone of the selected tip and
// in dag.virtual.blues.
branchingChainTip := prepareAndProcessBlock(t, dag, chainBlocks[len(chainBlocks)-3])
branchingChainTip := prepareAndProcessBlockByParentMsgBlocks(t, dag, chainBlocks[len(chainBlocks)-3])
// Make sure that branchingChainTip is not in the selected parent chain
isBranchingChainTipInSelectedParentChain, err := dag.IsInSelectedParentChain(branchingChainTip.BlockHash())
@@ -851,7 +866,7 @@ func TestAcceptingBlock(t *testing.T) {
intersectionBlock := chainBlocks[1]
sideChainTip := intersectionBlock
for i := 0; i < len(chainBlocks)-3; i++ {
sideChainTip = prepareAndProcessBlock(t, dag, sideChainTip)
sideChainTip = prepareAndProcessBlockByParentMsgBlocks(t, dag, sideChainTip)
}
// Make sure that the accepting block of the parent of the branching block didn't change
@@ -867,7 +882,7 @@ func TestAcceptingBlock(t *testing.T) {
// Make sure that a block that is found in the red set of the selected tip
// doesn't have an accepting block
prepareAndProcessBlock(t, dag, sideChainTip, chainBlocks[len(chainBlocks)-1])
prepareAndProcessBlockByParentMsgBlocks(t, dag, sideChainTip, chainBlocks[len(chainBlocks)-1])
sideChainTipAcceptingBlock, err := acceptingBlockByMsgBlock(sideChainTip)
if err != nil {
@@ -887,7 +902,7 @@ func TestFinalizeNodesBelowFinalityPoint(t *testing.T) {
func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("testFinalizeNodesBelowFinalityPoint", Config{
dag, teardownFunc, err := DAGSetup("testFinalizeNodesBelowFinalityPoint", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -899,13 +914,20 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
blockTime := dag.genesis.Header().Timestamp
flushUTXODiffStore := func() {
err := dag.db.Update(func(dbTx database.Tx) error {
return dag.utxoDiffStore.flushToDB(dbTx)
})
dbTx, err := dbaccess.NewTx()
if err != nil {
t.Fatalf("Failed to open database transaction: %s", err)
}
defer dbTx.RollbackUnlessClosed()
err = dag.utxoDiffStore.flushToDB(dbTx)
if err != nil {
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
}
dag.utxoDiffStore.clearDirtyEntries()
err = dbTx.Commit()
if err != nil {
t.Fatalf("Failed to commit database transaction: %s", err)
}
}
addNode := func(parent *blockNode) *blockNode {
@@ -934,6 +956,11 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
// Manually set the last finality point
dag.lastFinalityPoint = nodes[finalityInterval-1]
// Don't unload diffData
currentDifference := maxBlueScoreDifferenceToKeepLoaded
maxBlueScoreDifferenceToKeepLoaded = math.MaxUint64
defer func() { maxBlueScoreDifferenceToKeepLoaded = currentDifference }()
dag.finalizeNodesBelowFinalityPoint(deleteDiffData)
flushUTXODiffStore()
@@ -941,17 +968,27 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
if !node.isFinalized {
t.Errorf("Node with blue score %d expected to be finalized", node.blueScore)
}
if _, ok := dag.utxoDiffStore.loaded[*node.hash]; deleteDiffData && ok {
if _, ok := dag.utxoDiffStore.loaded[node]; deleteDiffData && ok {
t.Errorf("The diff data of node with blue score %d should have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData)
} else if !deleteDiffData && !ok {
t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData)
}
if diffData, err := dag.utxoDiffStore.diffDataFromDB(node.hash); err != nil {
_, err := dag.utxoDiffStore.diffDataFromDB(node.hash)
exists := !dbaccess.IsNotFoundError(err)
if exists && err != nil {
t.Errorf("diffDataFromDB: %s", err)
} else if deleteDiffData && diffData != nil {
continue
}
if deleteDiffData && exists {
t.Errorf("The diff data of node with blue score %d should have been deleted from the database if deleteDiffData is %T", node.blueScore, deleteDiffData)
} else if !deleteDiffData && diffData == nil {
continue
}
if !deleteDiffData && !exists {
t.Errorf("The diff data of node with blue score %d shouldn't have been deleted from the database if deleteDiffData is %T", node.blueScore, deleteDiffData)
continue
}
}
@@ -959,7 +996,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
if node.isFinalized {
t.Errorf("Node with blue score %d wasn't expected to be finalized", node.blueScore)
}
if _, ok := dag.utxoDiffStore.loaded[*node.hash]; !ok {
if _, ok := dag.utxoDiffStore.loaded[node]; !ok {
t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded", node.blueScore)
}
if diffData, err := dag.utxoDiffStore.diffDataFromDB(node.hash); err != nil {
@@ -972,7 +1009,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
func TestDAGIndexFailedStatus(t *testing.T) {
params := dagconfig.SimnetParams
dag, teardownFunc, err := DAGSetup("TestDAGIndexFailedStatus", Config{
dag, teardownFunc, err := DAGSetup("TestDAGIndexFailedStatus", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -1088,3 +1125,277 @@ func TestIsDAGCurrentMaxDiff(t *testing.T) {
}
}
}
func testProcessBlockRuleError(t *testing.T, dag *BlockDAG, block *wire.MsgBlock, expectedRuleErr error) {
isOrphan, isDelayed, err := dag.ProcessBlock(util.NewBlock(block), BFNoPoWCheck)
err = checkRuleError(err, expectedRuleErr)
if err != nil {
t.Errorf("checkRuleError: %s", err)
}
if isDelayed {
t.Fatalf("ProcessBlock: block " +
"is too far in the future")
}
if isOrphan {
t.Fatalf("ProcessBlock: block got unexpectedly orphaned")
}
}
func TestDoubleSpends(t *testing.T) {
params := dagconfig.SimnetParams
params.BlockCoinbaseMaturity = 0
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestDoubleSpends", true, Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("Failed to setup dag instance: %v", err)
}
defer teardownFunc()
fundingBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{params.GenesisHash}, nil)
cbTx := fundingBlock.Transactions[0]
signatureScript, err := txscript.PayToScriptHashSignatureScript(OpTrueScript, nil)
if err != nil {
t.Fatalf("Failed to build signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
SignatureScript: signatureScript,
Sequence: wire.MaxTxInSequenceNum,
}
txOut := &wire.TxOut{
ScriptPubKey: OpTrueScript,
Value: uint64(1),
}
tx1 := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
doubleSpendTxOut := &wire.TxOut{
ScriptPubKey: OpTrueScript,
Value: uint64(2),
}
doubleSpendTx1 := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{doubleSpendTxOut})
blockWithTx1 := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{fundingBlock.BlockHash()}, []*wire.MsgTx{tx1})
// Check that a block will be rejected if it has a transaction that already exists in its past.
anotherBlockWithTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{blockWithTx1.BlockHash()}, nil)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add tx1.
anotherBlockWithTx1.Transactions = append(anotherBlockWithTx1.Transactions, tx1)
anotherBlockWithTx1UtilTxs := make([]*util.Tx, len(anotherBlockWithTx1.Transactions))
for i, tx := range anotherBlockWithTx1.Transactions {
anotherBlockWithTx1UtilTxs[i] = util.NewTx(tx)
}
anotherBlockWithTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(anotherBlockWithTx1UtilTxs).Root()
testProcessBlockRuleError(t, dag, anotherBlockWithTx1, ruleError(ErrOverwriteTx, ""))
// Check that a block will be rejected if it has a transaction that double spends
// a transaction from its past.
blockWithDoubleSpendForTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{blockWithTx1.BlockHash()}, nil)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add a transaction that double spends the block past.
blockWithDoubleSpendForTx1.Transactions = append(blockWithDoubleSpendForTx1.Transactions, doubleSpendTx1)
blockWithDoubleSpendForTx1UtilTxs := make([]*util.Tx, len(blockWithDoubleSpendForTx1.Transactions))
for i, tx := range blockWithDoubleSpendForTx1.Transactions {
blockWithDoubleSpendForTx1UtilTxs[i] = util.NewTx(tx)
}
blockWithDoubleSpendForTx1.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDoubleSpendForTx1UtilTxs).Root()
testProcessBlockRuleError(t, dag, blockWithDoubleSpendForTx1, ruleError(ErrMissingTxOut, ""))
blockInAnticoneOfBlockWithTx1, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, []*wire.MsgTx{doubleSpendTx1})
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Check that a block will not get rejected if it has a transaction that double spends
// a transaction from its anticone.
testProcessBlockRuleError(t, dag, blockInAnticoneOfBlockWithTx1, nil)
// Check that a block will be rejected if it has two transactions that spend the same UTXO.
blockWithDoubleSpendWithItself, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add tx1 and doubleSpendTx1.
blockWithDoubleSpendWithItself.Transactions = append(blockWithDoubleSpendWithItself.Transactions, tx1, doubleSpendTx1)
blockWithDoubleSpendWithItselfUtilTxs := make([]*util.Tx, len(blockWithDoubleSpendWithItself.Transactions))
for i, tx := range blockWithDoubleSpendWithItself.Transactions {
blockWithDoubleSpendWithItselfUtilTxs[i] = util.NewTx(tx)
}
blockWithDoubleSpendWithItself.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDoubleSpendWithItselfUtilTxs).Root()
testProcessBlockRuleError(t, dag, blockWithDoubleSpendWithItself, ruleError(ErrDoubleSpendInSameBlock, ""))
// Check that a block will be rejected if it has the same transaction twice.
blockWithDuplicateTransaction, err := PrepareBlockForTest(dag, []*daghash.Hash{fundingBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add tx1 twice.
blockWithDuplicateTransaction.Transactions = append(blockWithDuplicateTransaction.Transactions, tx1, tx1)
blockWithDuplicateTransactionUtilTxs := make([]*util.Tx, len(blockWithDuplicateTransaction.Transactions))
for i, tx := range blockWithDuplicateTransaction.Transactions {
blockWithDuplicateTransactionUtilTxs[i] = util.NewTx(tx)
}
blockWithDuplicateTransaction.Header.HashMerkleRoot = BuildHashMerkleTreeStore(blockWithDuplicateTransactionUtilTxs).Root()
testProcessBlockRuleError(t, dag, blockWithDuplicateTransaction, ruleError(ErrDuplicateTx, ""))
}
func TestUTXOCommitment(t *testing.T) {
// Create a new database and dag instance to run tests against.
params := dagconfig.DevnetParams
params.BlockCoinbaseMaturity = 0
dag, teardownFunc, err := DAGSetup("TestUTXOCommitment", true, Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("TestUTXOCommitment: Failed to setup dag instance: %v", err)
}
defer teardownFunc()
resetExtraNonceForTest()
createTx := func(txToSpend *wire.MsgTx) *wire.MsgTx {
scriptPubKey, err := txscript.PayToScriptHashScript(OpTrueScript)
if err != nil {
t.Fatalf("TestUTXOCommitment: failed to build script pub key: %s", err)
}
signatureScript, err := txscript.PayToScriptHashSignatureScript(OpTrueScript, nil)
if err != nil {
t.Fatalf("TestUTXOCommitment: failed to build signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutpoint: wire.Outpoint{TxID: *txToSpend.TxID(), Index: 0},
SignatureScript: signatureScript,
Sequence: wire.MaxTxInSequenceNum,
}
txOut := &wire.TxOut{
ScriptPubKey: scriptPubKey,
Value: uint64(1),
}
return wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
}
// Build the following DAG:
// G <- A <- B <- D
// <- C <-
genesis := params.GenesisBlock
// Block A:
blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{genesis.BlockHash()}, nil)
// Block B:
blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil)
// Block C:
txSpendBlockACoinbase := createTx(blockA.Transactions[0])
blockCTxs := []*wire.MsgTx{txSpendBlockACoinbase}
blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, blockCTxs)
// Block D:
txSpendTxInBlockC := createTx(txSpendBlockACoinbase)
blockDTxs := []*wire.MsgTx{txSpendTxInBlockC}
blockD := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash(), blockC.BlockHash()}, blockDTxs)
// Get the pastUTXO of blockD
blockNodeD := dag.index.LookupNode(blockD.BlockHash())
if blockNodeD == nil {
t.Fatalf("TestUTXOCommitment: blockNode for block D not found")
}
blockDPastUTXO, _, _, _ := dag.pastUTXO(blockNodeD)
blockDPastDiffUTXOSet := blockDPastUTXO.(*DiffUTXOSet)
// Build a Multiset for block D
multiset := secp256k1.NewMultiset()
for outpoint, entry := range blockDPastDiffUTXOSet.base.utxoCollection {
var err error
multiset, err = addUTXOToMultiset(multiset, entry, &outpoint)
if err != nil {
t.Fatalf("TestUTXOCommitment: addUTXOToMultiset unexpectedly failed")
}
}
for outpoint, entry := range blockDPastDiffUTXOSet.UTXODiff.toAdd {
var err error
multiset, err = addUTXOToMultiset(multiset, entry, &outpoint)
if err != nil {
t.Fatalf("TestUTXOCommitment: addUTXOToMultiset unexpectedly failed")
}
}
for outpoint, entry := range blockDPastDiffUTXOSet.UTXODiff.toRemove {
var err error
multiset, err = removeUTXOFromMultiset(multiset, entry, &outpoint)
if err != nil {
t.Fatalf("TestUTXOCommitment: removeUTXOFromMultiset unexpectedly failed")
}
}
// Turn the multiset into a UTXO commitment
utxoCommitment := daghash.Hash(*multiset.Finalize())
// Make sure that the two commitments are equal
if !utxoCommitment.IsEqual(blockNodeD.utxoCommitment) {
t.Fatalf("TestUTXOCommitment: calculated UTXO commitment and "+
"actual UTXO commitment don't match. Want: %s, got: %s",
utxoCommitment, blockNodeD.utxoCommitment)
}
}
func TestPastUTXOMultiSet(t *testing.T) {
// Create a new database and dag instance to run tests against.
params := dagconfig.SimnetParams
dag, teardownFunc, err := DAGSetup("TestPastUTXOMultiSet", true, Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("TestPastUTXOMultiSet: Failed to setup dag instance: %v", err)
}
defer teardownFunc()
// Build a short chain
genesis := params.GenesisBlock
blockA := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{genesis.BlockHash()}, nil)
blockB := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockA.BlockHash()}, nil)
blockC := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockB.BlockHash()}, nil)
// Take blockC's selectedParentMultiset
blockNodeC := dag.index.LookupNode(blockC.BlockHash())
if blockNodeC == nil {
t.Fatalf("TestPastUTXOMultiSet: blockNode for blockC not found")
}
blockCSelectedParentMultiset, err := blockNodeC.selectedParentMultiset(dag)
if err != nil {
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset unexpectedly failed: %s", err)
}
// Copy the multiset
blockCSelectedParentMultisetCopy := *blockCSelectedParentMultiset
blockCSelectedParentMultiset = &blockCSelectedParentMultisetCopy
// Add a block on top of blockC
PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{blockC.BlockHash()}, nil)
// Get blockC's selectedParentMultiset again
blockCSelectedParentMultiSetAfterAnotherBlock, err := blockNodeC.selectedParentMultiset(dag)
if err != nil {
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset unexpectedly failed: %s", err)
}
// Make sure that blockC's selectedParentMultiset had not changed
if !reflect.DeepEqual(blockCSelectedParentMultiset, blockCSelectedParentMultiSetAfterAnotherBlock) {
t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset appears to have changed")
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -6,11 +6,11 @@ package blockdag
import (
"bytes"
"encoding/hex"
"github.com/pkg/errors"
"reflect"
"testing"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
@@ -36,9 +36,21 @@ func TestErrNotInDAG(t *testing.T) {
}
}
// TestUtxoSerialization ensures serializing and deserializing unspent
// hexToBytes converts the passed hex string into bytes and will panic if there
// is an error. This is only provided for the hard-coded constants so errors in
// the source code can be detected. It will only (and must only) be called with
// hard-coded values.
func hexToBytes(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
panic("invalid hex in source file: " + s)
}
return b
}
// TestUTXOSerialization ensures serializing and deserializing unspent
// trasaction output entries works as expected.
func TestUtxoSerialization(t *testing.T) {
func TestUTXOSerialization(t *testing.T) {
t.Parallel()
tests := []struct {
@@ -54,7 +66,7 @@ func TestUtxoSerialization(t *testing.T) {
blockBlueScore: 1,
packedFlags: tfCoinbase,
},
serialized: hexToBytes("03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"),
serialized: hexToBytes("01000000000000000100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
},
{
name: "blue score 100001, not coinbase",
@@ -64,13 +76,21 @@ func TestUtxoSerialization(t *testing.T) {
blockBlueScore: 100001,
packedFlags: 0,
},
serialized: hexToBytes("8b99420700ee8bd501094a7d5ca318da2506de35e1cb025ddc"),
serialized: hexToBytes("a1860100000000000040420f00000000001976a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
},
}
for i, test := range tests {
// Ensure the utxo entry serializes to the expected value.
gotBytes := serializeUTXOEntry(test.entry)
w := &bytes.Buffer{}
err := serializeUTXOEntry(w, test.entry)
if err != nil {
t.Errorf("serializeUTXOEntry #%d (%s) unexpected "+
"error: %v", i, test.name, err)
continue
}
gotBytes := w.Bytes()
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("serializeUTXOEntry #%d (%s): mismatched "+
"bytes - got %x, want %x", i, test.name,
@@ -78,8 +98,8 @@ func TestUtxoSerialization(t *testing.T) {
continue
}
// Deserialize to a utxo entry.
utxoEntry, err := deserializeUTXOEntry(test.serialized)
// Deserialize to a utxo entry.gotBytes
utxoEntry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
if err != nil {
t.Errorf("deserializeUTXOEntry #%d (%s) unexpected "+
"error: %v", i, test.name, err)
@@ -124,28 +144,24 @@ func TestUtxoEntryDeserializeErrors(t *testing.T) {
tests := []struct {
name string
serialized []byte
errType error
}{
{
name: "no data after header code",
serialized: hexToBytes("02"),
errType: errDeserialize(""),
},
{
name: "incomplete compressed txout",
serialized: hexToBytes("0232"),
errType: errDeserialize(""),
},
}
for _, test := range tests {
// Ensure the expected error type is returned and the returned
// entry is nil.
entry, err := deserializeUTXOEntry(test.serialized)
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
t.Errorf("deserializeUTXOEntry (%s): expected error "+
"type does not match - got %T, want %T",
test.name, err, test.errType)
entry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
if err == nil {
t.Errorf("deserializeUTXOEntry (%s): didn't return an error",
test.name)
continue
}
if entry != nil {
@@ -172,7 +188,7 @@ func TestDAGStateSerialization(t *testing.T) {
TipHashes: []*daghash.Hash{newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")},
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
},
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
},
{
name: "block 1",
@@ -180,7 +196,7 @@ func TestDAGStateSerialization(t *testing.T) {
TipHashes: []*daghash.Hash{newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")},
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
},
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
},
}
@@ -217,51 +233,6 @@ func TestDAGStateSerialization(t *testing.T) {
}
}
// TestDAGStateDeserializeErrors performs negative tests against
// deserializing the DAG state to ensure error paths work as expected.
func TestDAGStateDeserializeErrors(t *testing.T) {
t.Parallel()
tests := []struct {
name string
serialized []byte
errType error
}{
{
name: "nothing serialized",
serialized: hexToBytes(""),
errType: database.Error{ErrorCode: database.ErrCorruption},
},
{
name: "corrupted data",
serialized: []byte("[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,7"),
errType: database.Error{ErrorCode: database.ErrCorruption},
},
}
for _, test := range tests {
// Ensure the expected error type and code is returned.
_, err := deserializeDAGState(test.serialized)
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
t.Errorf("deserializeDAGState (%s): expected "+
"error type does not match - got %T, want %T",
test.name, err, test.errType)
continue
}
var dbErr database.Error
if ok := errors.As(err, &dbErr); ok {
tderr := test.errType.(database.Error)
if dbErr.ErrorCode != tderr.ErrorCode {
t.Errorf("deserializeDAGState (%s): "+
"wrong error code got: %v, want: %v",
test.name, dbErr.ErrorCode,
tderr.ErrorCode)
continue
}
}
}
}
// newHashFromStr converts the passed big-endian hex string into a
// daghash.Hash. It only differs from the one available in daghash in that
// it panics in case of an error since it will only (and must only) be

View File

@@ -5,7 +5,7 @@
package blockdag
import (
"math/big"
"github.com/kaspanet/kaspad/util/bigintpool"
"time"
"github.com/kaspanet/kaspad/util"
@@ -30,11 +30,20 @@ func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime ti
// averageWindowTarget * (windowMinTimestamp / (targetTimePerBlock * windowSize))
// The result uses integer division which means it will be slightly
// rounded down.
newTarget := targetsWindow.averageTarget()
newTarget := bigintpool.Acquire(0)
defer bigintpool.Release(newTarget)
windowTimeStampDifference := bigintpool.Acquire(windowMaxTimeStamp - windowMinTimestamp)
defer bigintpool.Release(windowTimeStampDifference)
targetTimePerBlock := bigintpool.Acquire(dag.targetTimePerBlock)
defer bigintpool.Release(targetTimePerBlock)
difficultyAdjustmentWindowSize := bigintpool.Acquire(int64(dag.difficultyAdjustmentWindowSize))
defer bigintpool.Release(difficultyAdjustmentWindowSize)
targetsWindow.averageTarget(newTarget)
newTarget.
Mul(newTarget, big.NewInt(windowMaxTimeStamp-windowMinTimestamp)).
Div(newTarget, big.NewInt(dag.targetTimePerBlock)).
Div(newTarget, big.NewInt(int64(dag.difficultyAdjustmentWindowSize)))
Mul(newTarget, windowTimeStampDifference).
Div(newTarget, targetTimePerBlock).
Div(newTarget, difficultyAdjustmentWindowSize)
if newTarget.Cmp(dag.dagParams.PowMax) > 0 {
return dag.powMaxBits
}

View File

@@ -82,7 +82,7 @@ func TestDifficulty(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.DifficultyAdjustmentWindowSize = 264
dag, teardownFunc, err := DAGSetup("TestDifficulty", Config{
dag, teardownFunc, err := DAGSetup("TestDifficulty", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -94,7 +94,8 @@ func TestDifficulty(t *testing.T) {
addNode := func(parents blockSet, blockTime time.Time) *blockNode {
bluestParent := parents.bluest()
if blockTime == zeroTime {
blockTime = time.Unix(bluestParent.timestamp+1, 0)
blockTime = time.Unix(bluestParent.timestamp, 0)
blockTime = blockTime.Add(params.TargetTimePerBlock)
}
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
if err != nil {
@@ -119,7 +120,8 @@ func TestDifficulty(t *testing.T) {
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != dag.genesis.bits {
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment window size, the difficulty should be the same as genesis'")
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment " +
"window size, the difficulty should be the same as genesis'")
}
}
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize+100; i++ {
@@ -140,7 +142,8 @@ func TestDifficulty(t *testing.T) {
}
tip = addNode(blockSetFromSlice(tip), zeroTime)
if compareBits(tip.bits, nodeInThePast.bits) >= 0 {
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the block rate, so the difficulty should increase as well")
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the " +
"block rate, so the difficulty should increase as well")
}
expectedBits := uint32(0x207f83df)
if tip.bits != expectedBits {
@@ -167,7 +170,9 @@ func TestDifficulty(t *testing.T) {
sameBitsCount = 0
}
}
slowNode := addNode(blockSetFromSlice(tip), time.Unix(tip.timestamp+2, 0))
slowBlockTime := time.Unix(tip.timestamp, 0)
slowBlockTime = slowBlockTime.Add(params.TargetTimePerBlock + time.Second)
slowNode := addNode(blockSetFromSlice(tip), slowBlockTime)
if slowNode.bits != tip.bits {
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
}
@@ -180,7 +185,8 @@ func TestDifficulty(t *testing.T) {
}
tip = addNode(blockSetFromSlice(tip), zeroTime)
if compareBits(tip.bits, slowNode.bits) <= 0 {
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block rate, so the difficulty should decrease as well")
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block" +
" rate, so the difficulty should decrease as well")
}
splitNode := addNode(blockSetFromSlice(tip), zeroTime)
@@ -197,7 +203,8 @@ func TestDifficulty(t *testing.T) {
tipWithRedPast := addNode(blockSetFromSlice(redChainTip, blueTip), zeroTime)
tipWithoutRedPast := addNode(blockSetFromSlice(blueTip), zeroTime)
if tipWithoutRedPast.bits != tipWithRedPast.bits {
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks shouldn't affect the difficulty")
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks" +
" shouldn't affect the difficulty")
}
}

View File

@@ -6,28 +6,10 @@ package blockdag
import (
"fmt"
"github.com/pkg/errors"
)
// DeploymentError identifies an error that indicates a deployment ID was
// specified that does not exist.
type DeploymentError uint32
// Error returns the assertion error as a human-readable string and satisfies
// the error interface.
func (e DeploymentError) Error() string {
return fmt.Sprintf("deployment ID %d does not exist", uint32(e))
}
// AssertError identifies an error that indicates an internal code consistency
// issue and should be treated as a critical and unrecoverable error.
type AssertError string
// Error returns the assertion error as a human-readable string and satisfies
// the error interface.
func (e AssertError) Error() string {
return "assertion failed: " + string(e)
}
// ErrorCode identifies a kind of error.
type ErrorCode int
@@ -121,6 +103,11 @@ const (
// either does not exist or has already been spent.
ErrMissingTxOut
// ErrDoubleSpendInSameBlock indicates a transaction
// that spends an output that was already spent by another
// transaction in the same block.
ErrDoubleSpendInSameBlock
// ErrUnfinalizedTx indicates a transaction has not been finalized.
// A valid block may only contain finalized transactions.
ErrUnfinalizedTx
@@ -245,6 +232,7 @@ var errorCodeStrings = map[ErrorCode]string{
ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
ErrBadTxInput: "ErrBadTxInput",
ErrMissingTxOut: "ErrMissingTxOut",
ErrDoubleSpendInSameBlock: "ErrDoubleSpendInSameBlock",
ErrUnfinalizedTx: "ErrUnfinalizedTx",
ErrDuplicateTx: "ErrDuplicateTx",
ErrOverwriteTx: "ErrOverwriteTx",
@@ -294,7 +282,6 @@ func (e RuleError) Error() string {
return e.Description
}
// ruleError creates an RuleError given a set of arguments.
func ruleError(c ErrorCode, desc string) RuleError {
return RuleError{ErrorCode: c, Description: desc}
func ruleError(c ErrorCode, desc string) error {
return errors.WithStack(RuleError{ErrorCode: c, Description: desc})
}

View File

@@ -5,7 +5,6 @@
package blockdag
import (
"fmt"
"testing"
)
@@ -99,46 +98,3 @@ func TestRuleError(t *testing.T) {
}
}
}
// TestDeploymentError tests the stringized output for the DeploymentError type.
func TestDeploymentError(t *testing.T) {
t.Parallel()
tests := []struct {
in DeploymentError
want string
}{
{
DeploymentError(0),
"deployment ID 0 does not exist",
},
{
DeploymentError(10),
"deployment ID 10 does not exist",
},
{
DeploymentError(123),
"deployment ID 123 does not exist",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.Error()
if result != test.want {
t.Errorf("Error #%d\n got: %s want: %s", i, result,
test.want)
continue
}
}
}
func TestAssertError(t *testing.T) {
message := "abc 123"
err := AssertError(message)
expectedMessage := fmt.Sprintf("assertion failed: %s", message)
if expectedMessage != err.Error() {
t.Errorf("Unexpected AssertError message. "+
"Got: %s, want: %s", err.Error(), expectedMessage)
}
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"github.com/pkg/errors"
"math"
"strings"
"testing"
"github.com/kaspanet/kaspad/util/subnetworkid"
@@ -40,7 +41,7 @@ func TestFinality(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.FinalityInterval = 100
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -185,7 +186,7 @@ func TestSubnetworkRegistry(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.BlockCoinbaseMaturity = 0
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -198,7 +199,7 @@ func TestSubnetworkRegistry(t *testing.T) {
if err != nil {
t.Fatalf("could not register network: %s", err)
}
limit, err := dag.SubnetworkStore.GasLimit(subnetworkID)
limit, err := blockdag.GasLimit(subnetworkID)
if err != nil {
t.Fatalf("could not retrieve gas limit: %s", err)
}
@@ -211,7 +212,7 @@ func TestChainedTransactions(t *testing.T) {
params := dagconfig.SimnetParams
params.BlockCoinbaseMaturity = 0
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -267,11 +268,19 @@ func TestChainedTransactions(t *testing.T) {
}
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
block2, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true)
block2, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx}, false)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add a chained transaction to block2
block2.Transactions = append(block2.Transactions, chainedTx)
block2UtilTxs := make([]*util.Tx, len(block2.Transactions))
for i, tx := range block2.Transactions {
block2UtilTxs[i] = util.NewTx(tx)
}
block2.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(block2UtilTxs).Root()
//Checks that dag.ProcessBlock fails because we don't allow a transaction to spend another transaction from the same block
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
if err == nil {
@@ -331,7 +340,7 @@ func TestOrderInDiffFromAcceptanceData(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = math.MaxUint8
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -401,7 +410,7 @@ func TestGasLimit(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.BlockCoinbaseMaturity = 0
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -548,7 +557,7 @@ func TestGasLimit(t *testing.T) {
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
expectedErrStr := fmt.Sprintf("Error getting gas limit for subnetworkID '%s': subnetwork '%s' not found",
nonExistentSubnetwork, nonExistentSubnetwork)
if err.Error() != expectedErrStr {
if strings.Contains(err.Error(), expectedErrStr) {
t.Fatalf("ProcessBlock expected error \"%v\" but got \"%v\"", expectedErrStr, err)
}
if isDelayed {

View File

@@ -3,7 +3,7 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"reflect"
@@ -33,7 +33,7 @@ func TestGHOSTDAG(t *testing.T) {
}{
{
k: 3,
expectedReds: []string{"F", "G", "H", "I", "O", "P"},
expectedReds: []string{"F", "G", "H", "I", "N", "O"},
dagData: []*testBlockData{
{
parents: []string{"A"},
@@ -166,7 +166,7 @@ func TestGHOSTDAG(t *testing.T) {
id: "T",
expectedScore: 13,
expectedSelectedParent: "S",
expectedBlues: []string{"S", "N", "Q"},
expectedBlues: []string{"S", "P", "Q"},
},
},
},
@@ -176,7 +176,7 @@ func TestGHOSTDAG(t *testing.T) {
func() {
resetExtraNonceForTest()
dagParams.K = test.k
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), Config{
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), true, Config{
DAGParams: &dagParams,
})
if err != nil {
@@ -282,7 +282,7 @@ func checkReds(expectedReds []string, reds map[string]bool) bool {
func TestBlueAnticoneSizeErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", Config{
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -293,14 +293,14 @@ func TestBlueAnticoneSizeErrors(t *testing.T) {
// Prepare a block chain with size K beginning with the genesis block
currentBlockA := dag.dagParams.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
newBlock := prepareAndProcessBlock(t, dag, currentBlockA)
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockA)
currentBlockA = newBlock
}
// Prepare another block chain with size K beginning with the genesis block
currentBlockB := dag.dagParams.GenesisBlock
for i := dagconfig.KType(0); i < dag.dagParams.K; i++ {
newBlock := prepareAndProcessBlock(t, dag, currentBlockB)
newBlock := prepareAndProcessBlockByParentMsgBlocks(t, dag, currentBlockB)
currentBlockB = newBlock
}
@@ -323,7 +323,7 @@ func TestBlueAnticoneSizeErrors(t *testing.T) {
func TestGHOSTDAGErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", Config{
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -332,27 +332,29 @@ func TestGHOSTDAGErrors(t *testing.T) {
defer teardownFunc()
// Add two child blocks to the genesis
block1 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
block2 := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
block1 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
block2 := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
// Add a child block to the previous two blocks
block3 := prepareAndProcessBlock(t, dag, block1, block2)
block3 := prepareAndProcessBlockByParentMsgBlocks(t, dag, block1, block2)
// Clear the reachability store
dag.reachabilityStore.loaded = map[daghash.Hash]*reachabilityData{}
err = dag.db.Update(func(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
cursor := bucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
err := bucket.Delete(cursor.Key())
if err != nil {
return err
}
}
return nil
})
dbTx, err := dbaccess.NewTx()
if err != nil {
t.Fatalf("TestGHOSTDAGErrors: db.Update failed: %s", err)
t.Fatalf("NewTx: %s", err)
}
defer dbTx.RollbackUnlessClosed()
err = dbaccess.ClearReachabilityData(dbTx)
if err != nil {
t.Fatalf("ClearReachabilityData: %s", err)
}
err = dbTx.Commit()
if err != nil {
t.Fatalf("Commit: %s", err)
}
// Try to rerun GHOSTDAG on the last block. GHOSTDAG uses

View File

@@ -4,29 +4,16 @@ import (
"bytes"
"encoding/gob"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
const (
// acceptanceIndexName is the human-readable name for the index.
acceptanceIndexName = "acceptance index"
)
var (
// acceptanceIndexKey is the key of the acceptance index and the db bucket used
// to house it.
acceptanceIndexKey = []byte("acceptanceidx")
)
// AcceptanceIndex implements a txAcceptanceData by block hash index. That is to say,
// it stores a mapping between a block's hash and the set of transactions that the
// block accepts among its blue blocks.
type AcceptanceIndex struct {
db database.DB
dag *blockdag.BlockDAG
}
@@ -43,122 +30,82 @@ func NewAcceptanceIndex() *AcceptanceIndex {
return &AcceptanceIndex{}
}
// DropAcceptanceIndex drops the acceptance index from the provided database if it
// exists.
func DropAcceptanceIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, acceptanceIndexKey, acceptanceIndexName, interrupt)
}
// DropAcceptanceIndex drops the acceptance index.
func DropAcceptanceIndex() error {
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
// Key returns the database key to use for the index as a byte slice.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Key() []byte {
return acceptanceIndexKey
}
err = dbaccess.DropAcceptanceIndex(dbTx)
if err != nil {
return err
}
// Name returns the human-readable name of the index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Name() string {
return acceptanceIndexName
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the bucket for the
// acceptance index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Create(dbTx database.Tx) error {
_, err := dbTx.Metadata().CreateBucket(acceptanceIndexKey)
return err
return dbTx.Commit()
}
// Init initializes the hash-based acceptance index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
idx.db = db
func (idx *AcceptanceIndex) Init(dag *blockdag.BlockDAG) error {
idx.dag = dag
return nil
return idx.recover()
}
// recover attempts to insert any data that's missing from the
// acceptance index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) recover() error {
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
err = idx.dag.ForEachHash(func(hash daghash.Hash) error {
exists, err := dbaccess.HasAcceptanceData(dbTx, &hash)
if err != nil {
return err
}
if exists {
return nil
}
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(&hash)
if err != nil {
return err
}
return idx.ConnectBlock(dbTx, &hash, txAcceptanceData)
})
if err != nil {
return err
}
return dbTx.Commit()
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the DAG.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) ConnectBlock(dbTx database.Tx, _ *util.Block, blockID uint64, _ *blockdag.BlockDAG,
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
return dbPutTxsAcceptanceData(dbTx, blockID, txsAcceptanceData)
}
// TxsAcceptanceData returns the acceptance data of all the transactions that
// were accepted by the block with hash blockHash.
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
var txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData
err := idx.db.View(func(dbTx database.Tx) error {
var err error
txsAcceptanceData, err = dbFetchTxsAcceptanceDataByHash(dbTx, blockHash)
return err
})
if err != nil {
return nil, err
}
return txsAcceptanceData, nil
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
for blockID := currentBlockID + 1; blockID <= lastKnownBlockID; blockID++ {
hash, err := blockdag.DBFetchBlockHashByID(dbTx, currentBlockID)
if err != nil {
return err
}
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(hash)
if err != nil {
return err
}
err = idx.ConnectBlock(dbTx, nil, blockID, nil, txAcceptanceData, nil)
if err != nil {
return err
}
}
return nil
}
func dbPutTxsAcceptanceData(dbTx database.Tx, blockID uint64,
func (idx *AcceptanceIndex) ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash,
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(txsAcceptanceData)
if err != nil {
return err
}
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedTxsAcceptanceData)
return dbaccess.StoreAcceptanceData(dbContext, blockHash, serializedTxsAcceptanceData)
}
func dbFetchTxsAcceptanceDataByHash(dbTx database.Tx,
hash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
blockID, err := blockdag.DBFetchBlockIDByHash(dbTx, hash)
// TxsAcceptanceData returns the acceptance data of all the transactions that
// were accepted by the block with hash blockHash.
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
serializedTxsAcceptanceData, err := dbaccess.FetchAcceptanceData(dbaccess.NoTx(), blockHash)
if err != nil {
return nil, err
}
return dbFetchTxsAcceptanceDataByID(dbTx, blockID)
}
func dbFetchTxsAcceptanceDataByID(dbTx database.Tx,
blockID uint64) (blockdag.MultiBlockTxsAcceptanceData, error) {
serializedBlockID := blockdag.SerializeBlockID(blockID)
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
serializedTxsAcceptanceData := bucket.Get(serializedBlockID)
if serializedTxsAcceptanceData == nil {
return nil, errors.Errorf("no entry in the accpetance index for block id %d", blockID)
}
return deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
}

View File

@@ -3,7 +3,7 @@ package indexers
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
@@ -96,7 +96,7 @@ func TestAcceptanceIndexRecover(t *testing.T) {
}
defer os.RemoveAll(db1Path)
db1, err := database.Create("ffldb", db1Path, params.Net)
err = dbaccess.Open(db1Path)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
@@ -104,10 +104,9 @@ func TestAcceptanceIndexRecover(t *testing.T) {
db1Config := blockdag.Config{
IndexManager: db1IndexManager,
DAGParams: params,
DB: db1,
}
db1DAG, teardown, err := blockdag.DAGSetup("", db1Config)
db1DAG, teardown, err := blockdag.DAGSetup("", false, db1Config)
if err != nil {
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
}
@@ -130,11 +129,6 @@ func TestAcceptanceIndexRecover(t *testing.T) {
}
}
err = db1.FlushCache()
if err != nil {
t.Fatalf("Error flushing database to disk: %s", err)
}
db2Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover2")
if err != nil {
t.Fatalf("Error creating temporary directory: %s", err)
@@ -166,17 +160,20 @@ func TestAcceptanceIndexRecover(t *testing.T) {
t.Fatalf("Error fetching acceptance data: %s", err)
}
db2, err := database.Open("ffldb", db2Path, params.Net)
err = dbaccess.Close()
if err != nil {
t.Fatalf("Error opening database: %s", err)
t.Fatalf("Error closing the database: %s", err)
}
err = dbaccess.Open(db2Path)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
db2Config := blockdag.Config{
DAGParams: params,
DB: db2,
}
db2DAG, teardown, err := blockdag.DAGSetup("", db2Config)
db2DAG, teardown, err := blockdag.DAGSetup("", false, db2Config)
if err != nil {
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
}
@@ -199,10 +196,6 @@ func TestAcceptanceIndexRecover(t *testing.T) {
}
}
err = db2.FlushCache()
if err != nil {
t.Fatalf("Error flushing database to disk: %s", err)
}
db3Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover3")
if err != nil {
t.Fatalf("Error creating temporary directory: %s", err)
@@ -213,9 +206,13 @@ func TestAcceptanceIndexRecover(t *testing.T) {
t.Fatalf("copyDirectory: %s", err)
}
db3, err := database.Open("ffldb", db3Path, params.Net)
err = dbaccess.Close()
if err != nil {
t.Fatalf("Error opening database: %s", err)
t.Fatalf("Error closing the database: %s", err)
}
err = dbaccess.Open(db3Path)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
db3AcceptanceIndex := NewAcceptanceIndex()
@@ -223,10 +220,9 @@ func TestAcceptanceIndexRecover(t *testing.T) {
db3Config := blockdag.Config{
IndexManager: db3IndexManager,
DAGParams: params,
DB: db3,
}
_, teardown, err = blockdag.DAGSetup("", db3Config)
_, teardown, err = blockdag.DAGSetup("", false, db3Config)
if err != nil {
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
}

View File

@@ -1,112 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package indexers implements optional block DAG indexes.
*/
package indexers
import (
"encoding/binary"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
)
var (
// byteOrder is the preferred byte order used for serializing numeric
// fields for storage in the database.
byteOrder = binary.LittleEndian
// errInterruptRequested indicates that an operation was cancelled due
// to a user-requested interrupt.
errInterruptRequested = errors.New("interrupt requested")
)
// NeedsInputser provides a generic interface for an indexer to specify the it
// requires the ability to look up inputs for a transaction.
type NeedsInputser interface {
NeedsInputs() bool
}
// Indexer provides a generic interface for an indexer that is managed by an
// index manager such as the Manager type provided by this package.
type Indexer interface {
// Key returns the key of the index as a byte slice.
Key() []byte
// Name returns the human-readable name of the index.
Name() string
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time.
Create(dbTx database.Tx) error
// Init is invoked when the index manager is first initializing the
// index. This differs from the Create method in that it is called on
// every load, including the case the index was just created.
Init(db database.DB, dag *blockdag.BlockDAG) error
// ConnectBlock is invoked when the index manager is notified that a new
// block has been connected to the DAG.
ConnectBlock(dbTx database.Tx,
block *util.Block,
blockID uint64,
dag *blockdag.BlockDAG,
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData,
virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error
}
// AssertError identifies an error that indicates an internal code consistency
// issue and should be treated as a critical and unrecoverable error.
type AssertError string
// Error returns the assertion error as a huma-readable string and satisfies
// the error interface.
func (e AssertError) Error() string {
return "assertion failed: " + string(e)
}
// errDeserialize signifies that a problem was encountered when deserializing
// data.
type errDeserialize string
// Error implements the error interface.
func (e errDeserialize) Error() string {
return string(e)
}
// isDeserializeErr returns whether or not the passed error is an errDeserialize
// error.
func isDeserializeErr(err error) bool {
var deserializeErr errDeserialize
return errors.As(err, &deserializeErr)
}
// internalBucket is an abstraction over a database bucket. It is used to make
// the code easier to test since it allows mock objects in the tests to only
// implement these functions instead of everything a database.Bucket supports.
type internalBucket interface {
Get(key []byte) []byte
Put(key []byte, value []byte) error
Delete(key []byte) error
}
// interruptRequested returns true when the provided channel has been closed.
// This simplifies early shutdown slightly since the caller can just use an if
// statement instead of a select.
func interruptRequested(interrupted <-chan struct{}) bool {
select {
case <-interrupted:
return true
default:
}
return false
}

View File

@@ -0,0 +1,28 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package indexers implements optional block DAG indexes.
*/
package indexers
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
)
// Indexer provides a generic interface for an indexer that is managed by an
// index manager such as the Manager type provided by this package.
type Indexer interface {
// Init is invoked when the index manager is first initializing the
// index.
Init(dag *blockdag.BlockDAG) error
// ConnectBlock is invoked when the index manager is notified that a new
// block has been connected to the DAG.
ConnectBlock(dbContext *dbaccess.TxContext,
blockHash *daghash.Hash,
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData) error
}

View File

@@ -6,190 +6,30 @@ package indexers
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
)
var (
// indexTipsBucketName is the name of the db bucket used to house the
// current tip of each index.
indexTipsBucketName = []byte("idxtips")
indexCurrentBlockIDBucketName = []byte("idxcurrentblockid")
)
// Manager defines an index manager that manages multiple optional indexes and
// implements the blockdag.IndexManager interface so it can be seamlessly
// plugged into normal DAG processing.
type Manager struct {
db database.DB
enabledIndexes []Indexer
}
// Ensure the Manager type implements the blockdag.IndexManager interface.
var _ blockdag.IndexManager = (*Manager)(nil)
// indexDropKey returns the key for an index which indicates it is in the
// process of being dropped.
func indexDropKey(idxKey []byte) []byte {
dropKey := make([]byte, len(idxKey)+1)
dropKey[0] = 'd'
copy(dropKey[1:], idxKey)
return dropKey
}
// maybeFinishDrops determines if each of the enabled indexes are in the middle
// of being dropped and finishes dropping them when the are. This is necessary
// because dropping and index has to be done in several atomic steps rather than
// one big atomic step due to the massive number of entries.
func (m *Manager) maybeFinishDrops(interrupt <-chan struct{}) error {
indexNeedsDrop := make([]bool, len(m.enabledIndexes))
err := m.db.View(func(dbTx database.Tx) error {
// None of the indexes needs to be dropped if the index tips
// bucket hasn't been created yet.
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
if indexesBucket == nil {
return nil
}
// Mark the indexer as requiring a drop if one is already in
// progress.
for i, indexer := range m.enabledIndexes {
dropKey := indexDropKey(indexer.Key())
if indexesBucket.Get(dropKey) != nil {
indexNeedsDrop[i] = true
}
}
return nil
})
if err != nil {
return err
}
if interruptRequested(interrupt) {
return errInterruptRequested
}
// Finish dropping any of the enabled indexes that are already in the
// middle of being dropped.
for i, indexer := range m.enabledIndexes {
if !indexNeedsDrop[i] {
continue
}
log.Infof("Resuming %s drop", indexer.Name())
err := dropIndex(m.db, indexer.Key(), indexer.Name(), interrupt)
if err != nil {
return err
}
}
return nil
}
// maybeCreateIndexes determines if each of the enabled indexes have already
// been created and creates them if not.
func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error {
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
for _, indexer := range m.enabledIndexes {
// Nothing to do if the index tip already exists.
idxKey := indexer.Key()
if indexesBucket.Get(idxKey) != nil {
continue
}
// The tip for the index does not exist, so create it and
// invoke the create callback for the index so it can perform
// any one-time initialization it requires.
if err := indexer.Create(dbTx); err != nil {
return err
}
// TODO (Mike): this is temporary solution to prevent node from not starting
// because it thinks indexers are not initialized.
// Indexers, however, do not work properly, and a general solution to their work operation is required
indexesBucket.Put(idxKey, []byte{0})
}
return nil
}
// Init initializes the enabled indexes. This is called during DAG
// initialization and primarily consists of catching up all indexes to the
// current tips. This is necessary since each index can be disabled
// and re-enabled at any time and attempting to catch-up indexes at the same
// time new blocks are being downloaded would lead to an overall longer time to
// catch up due to the I/O contention.
//
// Init initializes the enabled indexes.
// This is part of the blockdag.IndexManager interface.
func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
// Nothing to do when no indexes are enabled.
if len(m.enabledIndexes) == 0 {
return nil
}
if interruptRequested(interrupt) {
return errInterruptRequested
}
m.db = db
// Finish and drops that were previously interrupted.
if err := m.maybeFinishDrops(interrupt); err != nil {
return err
}
// Create the initial state for the indexes as needed.
err := m.db.Update(func(dbTx database.Tx) error {
// Create the bucket for the current tips as needed.
meta := dbTx.Metadata()
_, err := meta.CreateBucketIfNotExists(indexTipsBucketName)
if err != nil {
return err
}
if _, err := meta.CreateBucketIfNotExists(indexCurrentBlockIDBucketName); err != nil {
return err
}
return m.maybeCreateIndexes(dbTx)
})
if err != nil {
return err
}
// Initialize each of the enabled indexes.
func (m *Manager) Init(dag *blockdag.BlockDAG) error {
for _, indexer := range m.enabledIndexes {
if err := indexer.Init(db, blockDAG); err != nil {
if err := indexer.Init(dag); err != nil {
return err
}
}
return m.recoverIfNeeded()
}
// recoverIfNeeded checks if the node worked for some time
// without one of the current enabled indexes, and if it's
// the case, recovers the missing blocks from the index.
func (m *Manager) recoverIfNeeded() error {
return m.db.Update(func(dbTx database.Tx) error {
lastKnownBlockID := blockdag.DBFetchCurrentBlockID(dbTx)
for _, indexer := range m.enabledIndexes {
serializedCurrentIdxBlockID := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Get(indexer.Key())
currentIdxBlockID := uint64(0)
if serializedCurrentIdxBlockID != nil {
currentIdxBlockID = blockdag.DeserializeBlockID(serializedCurrentIdxBlockID)
}
if lastKnownBlockID > currentIdxBlockID {
err := indexer.Recover(dbTx, currentIdxBlockID, lastKnownBlockID)
if err != nil {
return err
}
}
}
return nil
})
return nil
}
// ConnectBlock must be invoked when a block is added to the DAG. It
@@ -197,32 +37,13 @@ func (m *Manager) recoverIfNeeded() error {
// checks, and invokes each indexer.
//
// This is part of the blockdag.IndexManager interface.
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
func (m *Manager) ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// Call each of the currently active optional indexes with the block
// being connected so they can update accordingly.
for _, index := range m.enabledIndexes {
// Notify the indexer with the connected block so it can index it.
if err := index.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData); err != nil {
return err
}
}
// Add the new block ID index entry for the block being connected and
// update the current internal block ID accordingly.
err := m.updateIndexersWithCurrentBlockID(dbTx, block.Hash(), blockID)
if err != nil {
return err
}
return nil
}
func (m *Manager) updateIndexersWithCurrentBlockID(dbTx database.Tx, blockHash *daghash.Hash, blockID uint64) error {
serializedBlockID := blockdag.SerializeBlockID(blockID)
for _, index := range m.enabledIndexes {
err := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Put(index.Key(), serializedBlockID)
if err != nil {
if err := index.ConnectBlock(dbContext, blockHash, txsAcceptanceData); err != nil {
return err
}
}
@@ -238,155 +59,3 @@ func NewManager(enabledIndexes []Indexer) *Manager {
enabledIndexes: enabledIndexes,
}
}
// dropIndex drops the passed index from the database. Since indexes can be
// massive, it deletes the index in multiple database transactions in order to
// keep memory usage to reasonable levels. It also marks the drop in progress
// so the drop can be resumed if it is stopped before it is done before the
// index can be used again.
func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan struct{}) error {
// Nothing to do if the index doesn't already exist.
var needsDelete bool
err := db.View(func(dbTx database.Tx) error {
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
if indexesBucket != nil && indexesBucket.Get(idxKey) != nil {
needsDelete = true
}
return nil
})
if err != nil {
return err
}
if !needsDelete {
log.Infof("Not dropping %s because it does not exist", idxName)
return nil
}
// Mark that the index is in the process of being dropped so that it
// can be resumed on the next start if interrupted before the process is
// complete.
log.Infof("Dropping all %s entries. This might take a while...",
idxName)
err = db.Update(func(dbTx database.Tx) error {
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
return indexesBucket.Put(indexDropKey(idxKey), idxKey)
})
if err != nil {
return err
}
// Since the indexes can be so large, attempting to simply delete
// the bucket in a single database transaction would result in massive
// memory usage and likely crash many systems due to ulimits. In order
// to avoid this, use a cursor to delete a maximum number of entries out
// of the bucket at a time. Recurse buckets depth-first to delete any
// sub-buckets.
const maxDeletions = 2000000
var totalDeleted uint64
// Recurse through all buckets in the index, cataloging each for
// later deletion.
var subBuckets [][][]byte
var subBucketClosure func(database.Tx, []byte, [][]byte) error
subBucketClosure = func(dbTx database.Tx,
subBucket []byte, tlBucket [][]byte) error {
// Get full bucket name and append to subBuckets for later
// deletion.
var bucketName [][]byte
if (tlBucket == nil) || (len(tlBucket) == 0) {
bucketName = append(bucketName, subBucket)
} else {
bucketName = append(tlBucket, subBucket)
}
subBuckets = append(subBuckets, bucketName)
// Recurse sub-buckets to append to subBuckets slice.
bucket := dbTx.Metadata()
for _, subBucketName := range bucketName {
bucket = bucket.Bucket(subBucketName)
}
return bucket.ForEachBucket(func(k []byte) error {
return subBucketClosure(dbTx, k, bucketName)
})
}
// Call subBucketClosure with top-level bucket.
err = db.View(func(dbTx database.Tx) error {
return subBucketClosure(dbTx, idxKey, nil)
})
if err != nil {
return nil
}
// Iterate through each sub-bucket in reverse, deepest-first, deleting
// all keys inside them and then dropping the buckets themselves.
for i := range subBuckets {
bucketName := subBuckets[len(subBuckets)-1-i]
// Delete maxDeletions key/value pairs at a time.
for numDeleted := maxDeletions; numDeleted == maxDeletions; {
numDeleted = 0
err := db.Update(func(dbTx database.Tx) error {
subBucket := dbTx.Metadata()
for _, subBucketName := range bucketName {
subBucket = subBucket.Bucket(subBucketName)
}
cursor := subBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() &&
numDeleted < maxDeletions {
if err := cursor.Delete(); err != nil {
return err
}
numDeleted++
}
return nil
})
if err != nil {
return err
}
if numDeleted > 0 {
totalDeleted += uint64(numDeleted)
log.Infof("Deleted %d keys (%d total) from %s",
numDeleted, totalDeleted, idxName)
}
}
if interruptRequested(interrupt) {
return errInterruptRequested
}
// Drop the bucket itself.
err = db.Update(func(dbTx database.Tx) error {
bucket := dbTx.Metadata()
for j := 0; j < len(bucketName)-1; j++ {
bucket = bucket.Bucket(bucketName[j])
}
return bucket.DeleteBucket(bucketName[len(bucketName)-1])
})
if err != nil {
return err
}
}
// Remove the index tip, index bucket, and in-progress drop flag now
// that all index entries have been removed.
err = db.Update(func(dbTx database.Tx) error {
meta := dbTx.Metadata()
indexesBucket := meta.Bucket(indexTipsBucketName)
if err := indexesBucket.Delete(idxKey); err != nil {
return err
}
if err := meta.Bucket(indexCurrentBlockIDBucketName).Delete(idxKey); err != nil {
return err
}
return indexesBucket.Delete(indexDropKey(idxKey))
})
if err != nil {
return err
}
log.Infof("Dropped %s", idxName)
return nil
}

View File

@@ -1,206 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"math"
"sort"
"sync"
"time"
)
const (
// maxAllowedOffsetSeconds is the maximum number of seconds in either
// direction that local clock will be adjusted. When the median time
// of the network is outside of this range, no offset will be applied.
maxAllowedOffsetSecs = 70 * 60 // 1 hour 10 minutes
// similarTimeSecs is the number of seconds in either direction from the
// local clock that is used to determine that it is likley wrong and
// hence to show a warning.
similarTimeSecs = 5 * 60 // 5 minutes
)
var (
// maxMedianTimeEntries is the maximum number of entries allowed in the
// median time data. This is a variable as opposed to a constant so the
// test code can modify it.
maxMedianTimeEntries = 200
)
// MedianTimeSource provides a mechanism to add several time samples which are
// used to determine a median time which is then used as an offset to the local
// clock.
type MedianTimeSource interface {
// AdjustedTime returns the current time adjusted by the median time
// offset as calculated from the time samples added by AddTimeSample.
AdjustedTime() time.Time
// AddTimeSample adds a time sample that is used when determining the
// median time of the added samples.
AddTimeSample(id string, timeVal time.Time)
// Offset returns the number of seconds to adjust the local clock based
// upon the median of the time samples added by AddTimeData.
Offset() time.Duration
}
// int64Sorter implements sort.Interface to allow a slice of 64-bit integers to
// be sorted.
type int64Sorter []int64
// Len returns the number of 64-bit integers in the slice. It is part of the
// sort.Interface implementation.
func (s int64Sorter) Len() int {
return len(s)
}
// Swap swaps the 64-bit integers at the passed indices. It is part of the
// sort.Interface implementation.
func (s int64Sorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less returns whether the 64-bit integer with index i should sort before the
// 64-bit integer with index j. It is part of the sort.Interface
// implementation.
func (s int64Sorter) Less(i, j int) bool {
return s[i] < s[j]
}
// medianTime provides an implementation of the MedianTimeSource interface.
type medianTime struct {
mtx sync.Mutex
knownIDs map[string]struct{}
offsets []int64
offsetSecs int64
invalidTimeChecked bool
}
// Ensure the medianTime type implements the MedianTimeSource interface.
var _ MedianTimeSource = (*medianTime)(nil)
// AdjustedTime returns the current time adjusted by the median time offset as
// calculated from the time samples added by AddTimeSample.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) AdjustedTime() time.Time {
m.mtx.Lock()
defer m.mtx.Unlock()
// Limit the adjusted time to 1 second precision.
now := time.Unix(time.Now().Unix(), 0)
return now.Add(time.Duration(m.offsetSecs) * time.Second)
}
// AddTimeSample adds a time sample that is used when determining the median
// time of the added samples.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) AddTimeSample(sourceID string, timeVal time.Time) {
m.mtx.Lock()
defer m.mtx.Unlock()
// Don't add time data from the same source.
if _, exists := m.knownIDs[sourceID]; exists {
return
}
m.knownIDs[sourceID] = struct{}{}
// Truncate the provided offset to seconds and append it to the slice
// of offsets while respecting the maximum number of allowed entries by
// replacing the oldest entry with the new entry once the maximum number
// of entries is reached.
now := time.Unix(time.Now().Unix(), 0)
offsetSecs := int64(timeVal.Sub(now).Seconds())
numOffsets := len(m.offsets)
if numOffsets == maxMedianTimeEntries && maxMedianTimeEntries > 0 {
m.offsets = m.offsets[1:]
numOffsets--
}
m.offsets = append(m.offsets, offsetSecs)
numOffsets++
// Sort the offsets so the median can be obtained as needed later.
sortedOffsets := make([]int64, numOffsets)
copy(sortedOffsets, m.offsets)
sort.Sort(int64Sorter(sortedOffsets))
offsetDuration := time.Duration(offsetSecs) * time.Second
log.Debugf("Added time sample of %s (total: %d)", offsetDuration,
numOffsets)
// The median offset is only updated when there are enough offsets and
// the number of offsets is odd so the middle value is the true median.
// Thus, there is nothing to do when those conditions are not met.
if numOffsets < 5 || numOffsets&0x01 != 1 {
return
}
// At this point the number of offsets in the list is odd, so the
// middle value of the sorted offsets is the median.
median := sortedOffsets[numOffsets/2]
// Set the new offset when the median offset is within the allowed
// offset range.
if math.Abs(float64(median)) < maxAllowedOffsetSecs {
m.offsetSecs = median
} else {
// The median offset of all added time data is larger than the
// maximum allowed offset, so don't use an offset. This
// effectively limits how far the local clock can be skewed.
m.offsetSecs = 0
if !m.invalidTimeChecked {
m.invalidTimeChecked = true
// Find if any time samples have a time that is close
// to the local time.
var remoteHasCloseTime bool
for _, offset := range sortedOffsets {
if math.Abs(float64(offset)) < similarTimeSecs {
remoteHasCloseTime = true
break
}
}
// Warn if none of the time samples are close.
if !remoteHasCloseTime {
log.Warnf("Please check your date and time " +
"are correct! kaspad will not work " +
"properly with an invalid time")
}
}
}
medianDuration := time.Duration(m.offsetSecs) * time.Second
log.Debugf("New time offset: %d", medianDuration)
}
// Offset returns the number of seconds to adjust the local clock based upon the
// median of the time samples added by AddTimeData.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) Offset() time.Duration {
m.mtx.Lock()
defer m.mtx.Unlock()
return time.Duration(m.offsetSecs) * time.Second
}
// NewMedianTime returns a new instance of concurrency-safe implementation of
// the MedianTimeSource interface. The returned implementation contains the
// rules necessary for proper time handling in the DAG consensus rules and
// expects the time samples to be added from the timestamp field of the version
// message received from remote peers that successfully connect and negotiate.
func NewMedianTime() MedianTimeSource {
return &medianTime{
knownIDs: make(map[string]struct{}),
offsets: make([]int64, 0, maxMedianTimeEntries),
}
}

View File

@@ -1,102 +0,0 @@
// Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"strconv"
"testing"
"time"
)
// TestMedianTime tests the medianTime implementation.
func TestMedianTime(t *testing.T) {
tests := []struct {
in []int64
wantOffset int64
useDupID bool
}{
// Not enough samples must result in an offset of 0.
{in: []int64{1}, wantOffset: 0},
{in: []int64{1, 2}, wantOffset: 0},
{in: []int64{1, 2, 3}, wantOffset: 0},
{in: []int64{1, 2, 3, 4}, wantOffset: 0},
// Various number of entries. The expected offset is only
// updated on odd number of elements.
{in: []int64{-13, 57, -4, -23, -12}, wantOffset: -12},
{in: []int64{55, -13, 61, -52, 39, 55}, wantOffset: 39},
{in: []int64{-62, -58, -30, -62, 51, -30, 15}, wantOffset: -30},
{in: []int64{29, -47, 39, 54, 42, 41, 8, -33}, wantOffset: 39},
{in: []int64{37, 54, 9, -21, -56, -36, 5, -11, -39}, wantOffset: -11},
{in: []int64{57, -28, 25, -39, 9, 63, -16, 19, -60, 25}, wantOffset: 9},
{in: []int64{-5, -4, -3, -2, -1}, wantOffset: -3, useDupID: true},
// The offset stops being updated once the max number of entries
// has been reached.
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52}, wantOffset: 17},
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45}, wantOffset: 17},
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45, 4}, wantOffset: 17},
// Offsets that are too far away from the local time should
// be ignored.
{in: []int64{-4201, 4202, -4203, 4204, -4205}, wantOffset: 0},
// Exercise the condition where the median offset is greater
// than the max allowed adjustment, but there is at least one
// sample that is close enough to the current time to avoid
// triggering a warning about an invalid local clock.
{in: []int64{4201, 4202, 4203, 4204, -299}, wantOffset: 0},
}
// Modify the max number of allowed median time entries for these tests.
maxMedianTimeEntries = 10
defer func() { maxMedianTimeEntries = 200 }()
for i, test := range tests {
filter := NewMedianTime()
for j, offset := range test.in {
id := strconv.Itoa(j)
now := time.Unix(time.Now().Unix(), 0)
tOffset := now.Add(time.Duration(offset) * time.Second)
filter.AddTimeSample(id, tOffset)
// Ensure the duplicate IDs are ignored.
if test.useDupID {
// Modify the offsets to ensure the final median
// would be different if the duplicate is added.
tOffset = tOffset.Add(time.Duration(offset) *
time.Second)
filter.AddTimeSample(id, tOffset)
}
}
// Since it is possible that the time.Now call in AddTimeSample
// and the time.Now calls here in the tests will be off by one
// second, allow a fudge factor to compensate.
gotOffset := filter.Offset()
wantOffset := time.Duration(test.wantOffset) * time.Second
wantOffset2 := time.Duration(test.wantOffset-1) * time.Second
if gotOffset != wantOffset && gotOffset != wantOffset2 {
t.Errorf("Offset #%d: unexpected offset -- got %v, "+
"want %v or %v", i, gotOffset, wantOffset,
wantOffset2)
continue
}
// Since it is possible that the time.Now call in AdjustedTime
// and the time.Now call here in the tests will be off by one
// second, allow a fudge factor to compensate.
adjustedTime := filter.AdjustedTime()
now := time.Unix(time.Now().Unix(), 0)
wantTime := now.Add(filter.Offset())
wantTime2 := now.Add(filter.Offset() - time.Second)
if !adjustedTime.Equal(wantTime) && !adjustedTime.Equal(wantTime2) {
t.Errorf("AdjustedTime #%d: unexpected result -- got %v, "+
"want %v or %v", i, adjustedTime, wantTime,
wantTime2)
continue
}
}
}

View File

@@ -3,8 +3,10 @@ package blockdag
import (
"bytes"
"encoding/binary"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"time"
)
@@ -12,6 +14,8 @@ import (
// BlockForMining returns a block with the given transactions
// that points to the current DAG tips, that is valid from
// all aspects except proof of work.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, error) {
blockTimestamp := dag.NextBlockTime()
requiredDifficulty := dag.NextRequiredDifficulty(blockTimestamp)
@@ -34,18 +38,17 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
msgBlock.AddTransaction(tx.MsgTx())
}
utxoWithTransactions, err := dag.UTXOSet().WithTransactions(msgBlock.Transactions, UnacceptedBlueScore, false)
multiset, err := dag.NextBlockMultiset()
if err != nil {
return nil, err
}
utxoCommitment := utxoWithTransactions.Multiset().Hash()
msgBlock.Header = wire.BlockHeader{
Version: nextBlockVersion,
ParentHashes: dag.TipHashes(),
HashMerkleRoot: hashMerkleTree.Root(),
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
UTXOCommitment: utxoCommitment,
UTXOCommitment: (*daghash.Hash)(multiset.Finalize()),
Timestamp: blockTimestamp,
Bits: requiredDifficulty,
}
@@ -53,6 +56,19 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
return &msgBlock, nil
}
// NextBlockMultiset returns the multiset of an assumed next block
// built on top of the current tips.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) NextBlockMultiset() (*secp256k1.MultiSet, error) {
_, selectedParentPastUTXO, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
if err != nil {
return nil, err
}
return dag.virtual.blockNode.calcMultiset(dag, txsAcceptanceData, selectedParentPastUTXO)
}
// CoinbasePayloadExtraData returns coinbase payload extra data parameter
// which is built from extra nonce and coinbase flags.
func CoinbasePayloadExtraData(extraNonce uint64, coinbaseFlags string) ([]byte, error) {
@@ -101,7 +117,7 @@ func (dag *BlockDAG) NextBlockTime() time.Time {
// timestamp is truncated to a second boundary before comparison since a
// block timestamp does not supported a precision greater than one
// second.
newTimestamp := dag.AdjustedTime()
newTimestamp := dag.Now()
minTimestamp := dag.NextBlockMinimumTime()
if newTimestamp.Before(minTimestamp) {
newTimestamp = minTimestamp

29
blockdag/multisetio.go Normal file
View File

@@ -0,0 +1,29 @@
package blockdag
import (
"encoding/binary"
"github.com/kaspanet/go-secp256k1"
"io"
)
const multisetPointSize = 32
// serializeMultiset serializes an ECMH multiset.
func serializeMultiset(w io.Writer, ms *secp256k1.MultiSet) error {
serialized := ms.Serialize()
err := binary.Write(w, byteOrder, serialized)
if err != nil {
return err
}
return nil
}
// deserializeMultiset deserializes an EMCH multiset.
func deserializeMultiset(r io.Reader) (*secp256k1.MultiSet, error) {
serialized := &secp256k1.SerializedMultiSet{}
err := binary.Read(r, byteOrder, serialized[:])
if err != nil {
return nil, err
}
return secp256k1.DeserializeMultiSet(serialized)
}

131
blockdag/multisetstore.go Normal file
View File

@@ -0,0 +1,131 @@
package blockdag
import (
"bytes"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/locks"
"github.com/pkg/errors"
)
type multisetStore struct {
dag *BlockDAG
new map[daghash.Hash]struct{}
loaded map[daghash.Hash]secp256k1.MultiSet
mtx *locks.PriorityMutex
}
func newMultisetStore(dag *BlockDAG) *multisetStore {
return &multisetStore{
dag: dag,
new: make(map[daghash.Hash]struct{}),
loaded: make(map[daghash.Hash]secp256k1.MultiSet),
}
}
func (store *multisetStore) setMultiset(node *blockNode, ms *secp256k1.MultiSet) {
store.loaded[*node.hash] = *ms
store.addToNewBlocks(node.hash)
}
func (store *multisetStore) addToNewBlocks(blockHash *daghash.Hash) {
store.new[*blockHash] = struct{}{}
}
func multisetNotFoundError(blockHash *daghash.Hash) error {
return errors.Errorf("Couldn't find multiset data for block %s", blockHash)
}
func (store *multisetStore) multisetByBlockNode(node *blockNode) (*secp256k1.MultiSet, error) {
ms, exists := store.multisetByBlockHash(node.hash)
if !exists {
return nil, multisetNotFoundError(node.hash)
}
return ms, nil
}
func (store *multisetStore) multisetByBlockHash(hash *daghash.Hash) (*secp256k1.MultiSet, bool) {
ms, ok := store.loaded[*hash]
return &ms, ok
}
// flushToDB writes all new multiset data to the database.
func (store *multisetStore) flushToDB(dbContext *dbaccess.TxContext) error {
if len(store.new) == 0 {
return nil
}
w := &bytes.Buffer{}
for hash := range store.new {
hash := hash // Copy hash to a new variable to avoid passing the same pointer
w.Reset()
ms, exists := store.loaded[hash]
if !exists {
return multisetNotFoundError(&hash)
}
err := serializeMultiset(w, &ms)
if err != nil {
return err
}
err = store.storeMultiset(dbContext, &hash, w.Bytes())
if err != nil {
return err
}
}
return nil
}
func (store *multisetStore) clearNewEntries() {
store.new = make(map[daghash.Hash]struct{})
}
func (store *multisetStore) init(dbContext dbaccess.Context) error {
cursor, err := dbaccess.MultisetCursor(dbContext)
if err != nil {
return err
}
defer cursor.Close()
for ok := cursor.First(); ok; ok = cursor.Next() {
key, err := cursor.Key()
if err != nil {
return err
}
hash, err := daghash.NewHash(key.Suffix())
if err != nil {
return err
}
serializedMS, err := cursor.Value()
if err != nil {
return err
}
ms, err := deserializeMultiset(bytes.NewReader(serializedMS))
if err != nil {
return err
}
store.loaded[*hash] = *ms
}
return nil
}
// storeMultiset stores the multiset data to the database.
func (store *multisetStore) storeMultiset(dbContext dbaccess.Context, blockHash *daghash.Hash, serializedMS []byte) error {
exists, err := dbaccess.HasMultiset(dbContext, blockHash)
if err != nil {
return err
}
if exists {
return errors.Errorf("Can't override an existing multiset database entry for block %s", blockHash)
}
return dbaccess.StoreMultiset(dbContext, blockHash, serializedMS)
}

View File

@@ -19,7 +19,7 @@ func TestNotifications(t *testing.T) {
}
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("notifications", Config{
dag, teardownFunc, err := DAGSetup("notifications", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {

View File

@@ -253,6 +253,8 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
}
}
dag.addBlockProcessingTimestamp()
log.Debugf("Accepted block %s", blockHash)
return false, false, nil
@@ -264,7 +266,7 @@ func (dag *BlockDAG) maxDelayOfParents(parentHashes []*daghash.Hash) (delay time
for _, parentHash := range parentHashes {
if delayedParent, exists := dag.delayedBlocks[*parentHash]; exists {
isDelayed = true
parentDelay := delayedParent.processTime.Sub(dag.AdjustedTime())
parentDelay := delayedParent.processTime.Sub(dag.Now())
if parentDelay > delay {
delay = parentDelay
}

View File

@@ -11,7 +11,7 @@ import (
)
func TestProcessOrphans(t *testing.T) {
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", Config{
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -72,45 +72,35 @@ func TestProcessOrphans(t *testing.T) {
}
}
type fakeTimeSource struct {
time time.Time
}
func (fts *fakeTimeSource) AdjustedTime() time.Time {
return fts.time
}
func (fts *fakeTimeSource) AddTimeSample(_ string, _ time.Time) {
}
func (fts *fakeTimeSource) Offset() time.Duration {
return 0
}
func TestProcessDelayedBlocks(t *testing.T) {
// We use dag1 so we can build the test blocks with the proper
// block header (UTXO commitment, acceptedIDMerkleroot, etc), and
// then we use dag2 for the actual test.
dag1, teardownFunc, err := DAGSetup("TestProcessDelayedBlocks1", Config{
dag1, teardownFunc, err := DAGSetup("TestProcessDelayedBlocks1", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
isDAG1Open := true
defer func() {
if isDAG1Open {
teardownFunc()
}
}()
initialTime := dag1.dagParams.GenesisBlock.Header.Timestamp
// Here we use a fake time source that returns a timestamp
// one hour into the future to make delayedBlock artificially
// valid.
dag1.timeSource = &fakeTimeSource{initialTime.Add(time.Hour)}
dag1.timeSource = newFakeTimeSource(initialTime.Add(time.Hour))
delayedBlock, err := PrepareBlockForTest(dag1, []*daghash.Hash{dag1.dagParams.GenesisBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
blockDelay := time.Duration(dag1.dagParams.TimestampDeviationTolerance+5) * time.Second
blockDelay := time.Duration(dag1.dagParams.TimestampDeviationTolerance*uint64(dag1.targetTimePerBlock)+5) * time.Second
delayedBlock.Header.Timestamp = initialTime.Add(blockDelay)
isOrphan, isDelayed, err := dag1.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
@@ -131,18 +121,21 @@ func TestProcessDelayedBlocks(t *testing.T) {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
teardownFunc()
isDAG1Open = false
// Here the actual test begins. We add a delayed block and
// its child and check that they are not added to the DAG,
// and check that they're added only if we add a new block
// after the delayed block timestamp is valid.
dag2, teardownFunc2, err := DAGSetup("TestProcessDelayedBlocks2", Config{
dag2, teardownFunc2, err := DAGSetup("TestProcessDelayedBlocks2", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc2()
dag2.timeSource = &fakeTimeSource{initialTime}
dag2.timeSource = newFakeTimeSource(initialTime)
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
if err != nil {
@@ -209,10 +202,13 @@ func TestProcessDelayedBlocks(t *testing.T) {
}
// We advance the clock to the point where delayedBlock timestamp is valid.
secondsUntilDelayedBlockIsValid := delayedBlock.Header.Timestamp.Unix() - int64(dag2.TimestampDeviationTolerance) - dag2.AdjustedTime().Unix() + 1
dag2.timeSource = &fakeTimeSource{initialTime.Add(time.Duration(secondsUntilDelayedBlockIsValid) * time.Second)}
deviationTolerance := int64(dag2.TimestampDeviationTolerance) * dag2.targetTimePerBlock
secondsUntilDelayedBlockIsValid := delayedBlock.Header.Timestamp.Unix() - deviationTolerance - dag2.Now().Unix() + 1
dag2.timeSource = newFakeTimeSource(initialTime.Add(time.Duration(secondsUntilDelayedBlockIsValid) * time.Second))
blockAfterDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()}, nil)
blockAfterDelay, err := PrepareBlockForTest(dag2,
[]*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()},
nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}

View File

@@ -311,7 +311,7 @@ func (rtn *reachabilityTreeNode) countSubtrees(subTreeSizeMap map[*reachabilityT
if len(current.children) == 0 {
// We reached a leaf
subTreeSizeMap[current] = 1
} else if calculatedChildrenCount[current] <= uint64(len(current.children)) {
} else if _, ok := subTreeSizeMap[current]; !ok {
// We haven't yet calculated the subtree size of
// the current node. Add all its children to the
// queue

View File

@@ -609,6 +609,46 @@ func TestReindexIntervalErrors(t *testing.T) {
}
}
func BenchmarkReindexInterval(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
root := newReachabilityTreeNode(&blockNode{})
const subTreeSize = 70000
// We set the interval of the root to subTreeSize*2 because
// its first child gets half of the interval, so a reindex
// from the root should happen after adding subTreeSize
// nodes.
root.setInterval(newReachabilityInterval(0, subTreeSize*2))
currentTreeNode := root
for i := 0; i < subTreeSize; i++ {
childTreeNode := newReachabilityTreeNode(&blockNode{})
_, err := currentTreeNode.addChild(childTreeNode)
if err != nil {
b.Fatalf("addChild: %s", err)
}
currentTreeNode = childTreeNode
}
remainingIntervalBefore := *root.remainingInterval
// After we added subTreeSize nodes, adding the next
// node should lead to a reindex from root.
fullReindexTriggeringNode := newReachabilityTreeNode(&blockNode{})
b.StartTimer()
_, err := currentTreeNode.addChild(fullReindexTriggeringNode)
b.StopTimer()
if err != nil {
b.Fatalf("addChild: %s", err)
}
if *root.remainingInterval == remainingIntervalBefore {
b.Fatal("Expected a reindex from root, but it didn't happen")
}
}
}
func TestFutureCoveringBlockSetString(t *testing.T) {
treeNodeA := newReachabilityTreeNode(&blockNode{})
treeNodeA.setInterval(newReachabilityInterval(123, 456))

View File

@@ -3,6 +3,7 @@ package blockdag
import (
"bytes"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
@@ -82,7 +83,7 @@ func (store *reachabilityStore) reachabilityDataByHash(hash *daghash.Hash) (*rea
}
// flushToDB writes all dirty reachability data to the database.
func (store *reachabilityStore) flushToDB(dbTx database.Tx) error {
func (store *reachabilityStore) flushToDB(dbContext *dbaccess.TxContext) error {
if len(store.dirty) == 0 {
return nil
}
@@ -90,7 +91,7 @@ func (store *reachabilityStore) flushToDB(dbTx database.Tx) error {
for hash := range store.dirty {
hash := hash // Copy hash to a new variable to avoid passing the same pointer
reachabilityData := store.loaded[hash]
err := store.dbStoreReachabilityData(dbTx, &hash, reachabilityData)
err := store.storeReachabilityData(dbContext, &hash, reachabilityData)
if err != nil {
return err
}
@@ -102,22 +103,25 @@ func (store *reachabilityStore) clearDirtyEntries() {
store.dirty = make(map[daghash.Hash]struct{})
}
func (store *reachabilityStore) init(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
func (store *reachabilityStore) init(dbContext dbaccess.Context) error {
// TODO: (Stas) This is a quick and dirty hack.
// We iterate over the entire bucket twice:
// * First, populate the loaded set with all entries
// * Second, connect the parent/children pointers in each entry
// with other nodes, which are now guaranteed to exist
cursor := bucket.Cursor()
cursor, err := dbaccess.ReachabilityDataCursor(dbContext)
if err != nil {
return err
}
defer cursor.Close()
for ok := cursor.First(); ok; ok = cursor.Next() {
err := store.initReachabilityData(cursor)
if err != nil {
return err
}
}
cursor = bucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
err := store.loadReachabilityDataFromCursor(cursor)
if err != nil {
@@ -128,7 +132,12 @@ func (store *reachabilityStore) init(dbTx database.Tx) error {
}
func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) error {
hash, err := daghash.NewHash(cursor.Key())
key, err := cursor.Key()
if err != nil {
return err
}
hash, err := daghash.NewHash(key.Suffix())
if err != nil {
return err
}
@@ -141,7 +150,12 @@ func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) err
}
func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.Cursor) error {
hash, err := daghash.NewHash(cursor.Key())
key, err := cursor.Key()
if err != nil {
return err
}
hash, err := daghash.NewHash(key.Suffix())
if err != nil {
return err
}
@@ -151,7 +165,12 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C
return errors.Errorf("cannot find reachability data for block hash: %s", hash)
}
err = store.deserializeReachabilityData(cursor.Value(), reachabilityData)
serializedReachabilityData, err := cursor.Value()
if err != nil {
return err
}
err = store.deserializeReachabilityData(serializedReachabilityData, reachabilityData)
if err != nil {
return err
}
@@ -162,15 +181,15 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C
return nil
}
// dbStoreReachabilityData stores the reachability data to the database.
// storeReachabilityData stores the reachability data to the database.
// This overwrites the current entry if there exists one.
func (store *reachabilityStore) dbStoreReachabilityData(dbTx database.Tx, hash *daghash.Hash, reachabilityData *reachabilityData) error {
func (store *reachabilityStore) storeReachabilityData(dbContext dbaccess.Context, hash *daghash.Hash, reachabilityData *reachabilityData) error {
serializedReachabilyData, err := store.serializeReachabilityData(reachabilityData)
if err != nil {
return err
}
return dbTx.Metadata().Bucket(reachabilityDataBucketName).Put(hash[:], serializedReachabilyData)
return dbaccess.StoreReachabilityData(dbContext, hash, serializedReachabilyData)
}
func (store *reachabilityStore) serializeReachabilityData(reachabilityData *reachabilityData) ([]byte, error) {

View File

@@ -4,31 +4,20 @@ import (
"bytes"
"encoding/binary"
"fmt"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
)
// SubnetworkStore stores the subnetworks data
type SubnetworkStore struct {
db database.DB
}
func newSubnetworkStore(db database.DB) *SubnetworkStore {
return &SubnetworkStore{
db: db,
}
}
// registerSubnetworks scans a list of transactions, singles out
// subnetwork registry transactions, validates them, and registers a new
// subnetwork based on it.
// This function returns an error if one or more transactions are invalid
func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
func registerSubnetworks(dbContext dbaccess.Context, txs []*util.Tx) error {
subnetworkRegistryTxs := make([]*wire.MsgTx, 0)
for _, tx := range txs {
msgTx := tx.MsgTx()
@@ -50,13 +39,13 @@ func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
if err != nil {
return err
}
sNet, err := dbGetSubnetwork(dbTx, subnetworkID)
exists, err := dbaccess.HasSubnetwork(dbContext, subnetworkID)
if err != nil {
return err
}
if sNet == nil {
if !exists {
createdSubnetwork := newSubnetwork(registryTx)
err := dbRegisterSubnetwork(dbTx, subnetworkID, createdSubnetwork)
err := registerSubnetwork(dbContext, subnetworkID, createdSubnetwork)
if err != nil {
return errors.Errorf("failed registering subnetwork"+
"for tx '%s': %s", registryTx.TxHash(), err)
@@ -85,66 +74,39 @@ func TxToSubnetworkID(tx *wire.MsgTx) (*subnetworkid.SubnetworkID, error) {
return subnetworkid.New(util.Hash160(txHash[:]))
}
// subnetwork returns a registered subnetwork. If the subnetwork does not exist
// this method returns an error.
func (s *SubnetworkStore) subnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
var sNet *subnetwork
var err error
dbErr := s.db.View(func(dbTx database.Tx) error {
sNet, err = dbGetSubnetwork(dbTx, subnetworkID)
return nil
})
if dbErr != nil {
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, dbErr)
}
// fetchSubnetwork returns a registered subnetwork.
func fetchSubnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
serializedSubnetwork, err := dbaccess.FetchSubnetworkData(dbaccess.NoTx(), subnetworkID)
if err != nil {
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, err)
return nil, err
}
return sNet, nil
subnet, err := deserializeSubnetwork(serializedSubnetwork)
if err != nil {
return nil, err
}
return subnet, nil
}
// GasLimit returns the gas limit of a registered subnetwork. If the subnetwork does not
// exist this method returns an error.
func (s *SubnetworkStore) GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
sNet, err := s.subnetwork(subnetworkID)
func GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
sNet, err := fetchSubnetwork(subnetworkID)
if err != nil {
return 0, err
}
if sNet == nil {
return 0, errors.Errorf("subnetwork '%s' not found", subnetworkID)
}
return sNet.gasLimit, nil
}
// dbRegisterSubnetwork stores mappings from ID of the subnetwork to the subnetwork data.
func dbRegisterSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
// Serialize the subnetwork
func registerSubnetwork(dbContext dbaccess.Context, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
serializedSubnetwork, err := serializeSubnetwork(network)
if err != nil {
return errors.Errorf("failed to serialize sub-netowrk '%s': %s", subnetworkID, err)
}
// Store the subnetwork
subnetworksBucket := dbTx.Metadata().Bucket(subnetworksBucketName)
err = subnetworksBucket.Put(subnetworkID[:], serializedSubnetwork)
if err != nil {
return errors.Errorf("failed to write sub-netowrk '%s': %s", subnetworkID, err)
}
return nil
}
// dbGetSubnetwork returns the subnetwork associated with subnetworkID or nil if the subnetwork was not found.
func dbGetSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
bucket := dbTx.Metadata().Bucket(subnetworksBucketName)
serializedSubnetwork := bucket.Get(subnetworkID[:])
if serializedSubnetwork == nil {
return nil, nil
}
return deserializeSubnetwork(serializedSubnetwork)
return dbaccess.StoreSubnetwork(dbContext, subnetworkID, serializedSubnetwork)
}
type subnetwork struct {

57
blockdag/sync_rate.go Normal file
View File

@@ -0,0 +1,57 @@
package blockdag
import "time"
const syncRateWindowDuration = 15 * time.Minute
// addBlockProcessingTimestamp adds the last block processing timestamp in order to measure the recent sync rate.
//
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) addBlockProcessingTimestamp() {
now := time.Now()
dag.recentBlockProcessingTimestamps = append(dag.recentBlockProcessingTimestamps, now)
dag.removeNonRecentTimestampsFromRecentBlockProcessingTimestamps()
}
// removeNonRecentTimestampsFromRecentBlockProcessingTimestamps removes timestamps older than syncRateWindowDuration
// from dag.recentBlockProcessingTimestamps
//
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) removeNonRecentTimestampsFromRecentBlockProcessingTimestamps() {
dag.recentBlockProcessingTimestamps = dag.recentBlockProcessingTimestampsRelevantWindow()
}
func (dag *BlockDAG) recentBlockProcessingTimestampsRelevantWindow() []time.Time {
minTime := time.Now().Add(-syncRateWindowDuration)
windowStartIndex := len(dag.recentBlockProcessingTimestamps)
for i, processTime := range dag.recentBlockProcessingTimestamps {
if processTime.After(minTime) {
windowStartIndex = i
break
}
}
return dag.recentBlockProcessingTimestamps[windowStartIndex:]
}
// syncRate returns the rate of processed
// blocks in the last syncRateWindowDuration
// duration.
func (dag *BlockDAG) syncRate() float64 {
dag.RLock()
defer dag.RUnlock()
return float64(len(dag.recentBlockProcessingTimestampsRelevantWindow())) / syncRateWindowDuration.Seconds()
}
// IsSyncRateBelowThreshold checks whether the sync rate
// is below the expected threshold.
func (dag *BlockDAG) IsSyncRateBelowThreshold(maxDeviation float64) bool {
if dag.uptime() < syncRateWindowDuration {
return false
}
return dag.syncRate() < 1/dag.dagParams.TargetTimePerBlock.Seconds()*maxDeviation
}
func (dag *BlockDAG) uptime() time.Duration {
return time.Now().Sub(dag.startTime)
}

View File

@@ -5,45 +5,27 @@ package blockdag
import (
"compress/bzip2"
"encoding/binary"
"github.com/kaspanet/kaspad/database/ffldb/ldb"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"testing"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb" // blank import ffldb so that its init() function runs before tests
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const (
// testDbType is the database backend type to use for the tests.
testDbType = "ffldb"
// blockDataNet is the expected network in the test block data.
blockDataNet = wire.Mainnet
)
// isSupportedDbType returns whether or not the passed database type is
// currently supported.
func isSupportedDbType(dbType string) bool {
supportedDrivers := database.SupportedDrivers()
for _, driver := range supportedDrivers {
if dbType == driver {
return true
}
}
return false
}
// FileExists returns whether or not the named file or directory exists.
func FileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
@@ -57,11 +39,10 @@ func FileExists(name string) bool {
// DAGSetup is used to create a new db and DAG instance with the genesis
// block already inserted. In addition to the new DAG instance, it returns
// a teardown function the caller should invoke when done testing to clean up.
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
if !isSupportedDbType(testDbType) {
return nil, nil, errors.Errorf("unsupported db type %s", testDbType)
}
// The openDB parameter instructs DAGSetup whether or not to also open the
// database. Setting it to false is useful in tests that handle database
// opening/closing by themselves.
func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), error) {
var teardown func()
// To make sure that the teardown function is not called before any goroutines finished to run -
@@ -76,13 +57,25 @@ func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
})
}
if config.DB == nil {
tmpDir := os.TempDir()
if openDb {
var err error
tmpDir, err := ioutil.TempDir("", "DAGSetup")
if err != nil {
return nil, nil, errors.Errorf("error creating temp dir: %s", err)
}
// We set ldb.Options here to return nil because normally
// the database is initialized with very large caches that
// can make opening/closing the database for every test
// quite heavy.
originalLDBOptions := ldb.Options
ldb.Options = func() *opt.Options {
return nil
}
dbPath := filepath.Join(tmpDir, dbName)
_ = os.RemoveAll(dbPath)
var err error
config.DB, err = database.Create(testDbType, dbPath, blockDataNet)
err = dbaccess.Open(dbPath)
if err != nil {
return nil, nil, errors.Errorf("error creating db: %s", err)
}
@@ -92,18 +85,18 @@ func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
teardown = func() {
spawnWaitGroup.Wait()
spawn = realSpawn
config.DB.Close()
dbaccess.Close()
ldb.Options = originalLDBOptions
os.RemoveAll(dbPath)
}
} else {
teardown = func() {
spawnWaitGroup.Wait()
spawn = realSpawn
config.DB.Close()
}
}
config.TimeSource = NewMedianTime()
config.TimeSource = NewTimeSource()
config.SigCache = txscript.NewSigCache(1000)
// Create the DAG instance.
@@ -173,7 +166,7 @@ func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (
}
virtual := newVirtualBlock(dag, parents)
pastUTXO, _, err := dag.pastUTXO(&virtual.blockNode)
pastUTXO, _, _, err := dag.pastUTXO(&virtual.blockNode)
if err != nil {
return nil, err
}
@@ -299,6 +292,28 @@ func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactio
return block, nil
}
// PrepareAndProcessBlockForTest prepares a block that points to the given parent
// hashes and process it.
func PrepareAndProcessBlockForTest(t *testing.T, dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*wire.MsgTx) *wire.MsgBlock {
daghash.Sort(parentHashes)
block, err := PrepareBlockForTest(dag, parentHashes, transactions)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
utilBlock := util.NewBlock(block)
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, BFNoPoWCheck)
if err != nil {
t.Fatalf("unexpected error in ProcessBlock: %s", err)
}
if isDelayed {
t.Fatalf("block is too far in the future")
}
if isOrphan {
t.Fatalf("block was unexpectedly orphan")
}
return block
}
// generateDeterministicExtraNonceForTest returns a unique deterministic extra nonce for coinbase data, in order to create unique coinbase transactions.
func generateDeterministicExtraNonceForTest() uint64 {
extraNonceForTest++

View File

@@ -1,14 +0,0 @@
package blockdag
import (
"testing"
)
func TestIsSupportedDbType(t *testing.T) {
if !isSupportedDbType("ffldb") {
t.Errorf("ffldb should be a supported DB driver")
}
if isSupportedDbType("madeUpDb") {
t.Errorf("madeUpDb should not be a supported DB driver")
}
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -8,6 +8,7 @@ import (
"fmt"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
// ThresholdState define the various threshold states used when voting on
@@ -177,9 +178,9 @@ func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdCondit
var ok bool
state, ok = cache.Lookup(prevNode.hash)
if !ok {
return ThresholdFailed, AssertError(fmt.Sprintf(
return ThresholdFailed, errors.Errorf(
"thresholdState: cache lookup failed for %s",
prevNode.hash))
prevNode.hash)
}
}
@@ -297,7 +298,7 @@ func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) {
if deploymentID > uint32(len(dag.dagParams.Deployments)) {
return ThresholdFailed, DeploymentError(deploymentID)
return ThresholdFailed, errors.Errorf("deployment ID %d does not exist", deploymentID)
}
deployment := &dag.dagParams.Deployments[deploymentID]

25
blockdag/timesource.go Normal file
View File

@@ -0,0 +1,25 @@
package blockdag
import (
"time"
)
// TimeSource is the interface to access time.
type TimeSource interface {
// Now returns the current time.
Now() time.Time
}
// timeSource provides an implementation of the TimeSource interface
// that simply returns the current local time.
type timeSource struct{}
// Now returns the current local time, with one second precision.
func (m *timeSource) Now() time.Time {
return time.Unix(time.Now().Unix(), 0)
}
// NewTimeSource returns a new instance of a TimeSource
func NewTimeSource() TimeSource {
return &timeSource{}
}

View File

@@ -2,31 +2,26 @@ package blockdag
import (
"bytes"
"github.com/golang/groupcache/lru"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/wire"
)
const ecmhCacheSize = 4_000_000
var (
utxoToECMHCache = lru.New(ecmhCacheSize)
)
func utxoMultiset(entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
func addUTXOToMultiset(ms *secp256k1.MultiSet, entry *UTXOEntry, outpoint *wire.Outpoint) (*secp256k1.MultiSet, error) {
w := &bytes.Buffer{}
err := serializeUTXO(w, entry, outpoint)
if err != nil {
return nil, err
}
serializedUTXO := w.Bytes()
utxoHash := daghash.DoubleHashH(serializedUTXO)
if cachedMSPoint, ok := utxoToECMHCache.Get(utxoHash); ok {
return cachedMSPoint.(*ecc.Multiset), nil
}
msPoint := ecc.NewMultiset(ecc.S256()).Add(serializedUTXO)
utxoToECMHCache.Add(utxoHash, msPoint)
return msPoint, nil
ms.Add(w.Bytes())
return ms, nil
}
func removeUTXOFromMultiset(ms *secp256k1.MultiSet, entry *UTXOEntry, outpoint *wire.Outpoint) (*secp256k1.MultiSet, error) {
w := &bytes.Buffer{}
err := serializeUTXO(w, entry, outpoint)
if err != nil {
return nil, err
}
ms.Remove(w.Bytes())
return ms, nil
}

View File

@@ -2,14 +2,11 @@ package blockdag
import (
"bytes"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/locks"
"github.com/pkg/errors"
)
var multisetPointSize = 32
type blockUTXODiffData struct {
diff *UTXODiff
diffChild *blockNode
@@ -17,16 +14,16 @@ type blockUTXODiffData struct {
type utxoDiffStore struct {
dag *BlockDAG
dirty map[daghash.Hash]struct{}
loaded map[daghash.Hash]*blockUTXODiffData
dirty map[*blockNode]struct{}
loaded map[*blockNode]*blockUTXODiffData
mtx *locks.PriorityMutex
}
func newUTXODiffStore(dag *BlockDAG) *utxoDiffStore {
return &utxoDiffStore{
dag: dag,
dirty: make(map[daghash.Hash]struct{}),
loaded: make(map[daghash.Hash]*blockUTXODiffData),
dirty: make(map[*blockNode]struct{}),
loaded: make(map[*blockNode]*blockUTXODiffData),
mtx: locks.NewPriorityMutex(),
}
}
@@ -35,16 +32,15 @@ func (diffStore *utxoDiffStore) setBlockDiff(node *blockNode, diff *UTXODiff) er
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
// load the diff data from DB to diffStore.loaded
_, exists, err := diffStore.diffDataByHash(node.hash)
if err != nil {
_, err := diffStore.diffDataByBlockNode(node)
if dbaccess.IsNotFoundError(err) {
diffStore.loaded[node] = &blockUTXODiffData{}
} else if err != nil {
return err
}
if !exists {
diffStore.loaded[*node.hash] = &blockUTXODiffData{}
}
diffStore.loaded[*node.hash].diff = diff
diffStore.setBlockAsDirty(node.hash)
diffStore.loaded[node].diff = diff
diffStore.setBlockAsDirty(node)
return nil
}
@@ -52,22 +48,19 @@ func (diffStore *utxoDiffStore) setBlockDiffChild(node *blockNode, diffChild *bl
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
// load the diff data from DB to diffStore.loaded
_, exists, err := diffStore.diffDataByHash(node.hash)
_, err := diffStore.diffDataByBlockNode(node)
if err != nil {
return err
}
if !exists {
return diffNotFoundError(node)
}
diffStore.loaded[*node.hash].diffChild = diffChild
diffStore.setBlockAsDirty(node.hash)
diffStore.loaded[node].diffChild = diffChild
diffStore.setBlockAsDirty(node)
return nil
}
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbTx database.Tx, blockHashes []*daghash.Hash) error {
for _, hash := range blockHashes {
err := diffStore.removeBlockDiffData(dbTx, hash)
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbContext dbaccess.Context, nodes []*blockNode) error {
for _, node := range nodes {
err := diffStore.removeBlockDiffData(dbContext, node)
if err != nil {
return err
}
@@ -75,87 +68,64 @@ func (diffStore *utxoDiffStore) removeBlocksDiffData(dbTx database.Tx, blockHash
return nil
}
func (diffStore *utxoDiffStore) removeBlockDiffData(dbTx database.Tx, blockHash *daghash.Hash) error {
func (diffStore *utxoDiffStore) removeBlockDiffData(dbContext dbaccess.Context, node *blockNode) error {
diffStore.mtx.LowPriorityWriteLock()
defer diffStore.mtx.LowPriorityWriteUnlock()
delete(diffStore.loaded, *blockHash)
err := dbRemoveDiffData(dbTx, blockHash)
delete(diffStore.loaded, node)
err := dbaccess.RemoveDiffData(dbContext, node.hash)
if err != nil {
return err
}
return nil
}
func (diffStore *utxoDiffStore) setBlockAsDirty(blockHash *daghash.Hash) {
diffStore.dirty[*blockHash] = struct{}{}
func (diffStore *utxoDiffStore) setBlockAsDirty(node *blockNode) {
diffStore.dirty[node] = struct{}{}
}
func (diffStore *utxoDiffStore) diffDataByHash(hash *daghash.Hash) (*blockUTXODiffData, bool, error) {
if diffData, ok := diffStore.loaded[*hash]; ok {
return diffData, true, nil
func (diffStore *utxoDiffStore) diffDataByBlockNode(node *blockNode) (*blockUTXODiffData, error) {
if diffData, ok := diffStore.loaded[node]; ok {
return diffData, nil
}
diffData, err := diffStore.diffDataFromDB(hash)
diffData, err := diffStore.diffDataFromDB(node.hash)
if err != nil {
return nil, false, err
return nil, err
}
exists := diffData != nil
if exists {
diffStore.loaded[*hash] = diffData
}
return diffData, exists, nil
}
func diffNotFoundError(node *blockNode) error {
return errors.Errorf("Couldn't find diff data for block %s", node.hash)
diffStore.loaded[node] = diffData
return diffData, nil
}
func (diffStore *utxoDiffStore) diffByNode(node *blockNode) (*UTXODiff, error) {
diffStore.mtx.HighPriorityReadLock()
defer diffStore.mtx.HighPriorityReadUnlock()
diffData, exists, err := diffStore.diffDataByHash(node.hash)
diffData, err := diffStore.diffDataByBlockNode(node)
if err != nil {
return nil, err
}
if !exists {
return nil, diffNotFoundError(node)
}
return diffData.diff, nil
}
func (diffStore *utxoDiffStore) diffChildByNode(node *blockNode) (*blockNode, error) {
diffStore.mtx.HighPriorityReadLock()
defer diffStore.mtx.HighPriorityReadUnlock()
diffData, exists, err := diffStore.diffDataByHash(node.hash)
diffData, err := diffStore.diffDataByBlockNode(node)
if err != nil {
return nil, err
}
if !exists {
return nil, diffNotFoundError(node)
}
return diffData.diffChild, nil
}
func (diffStore *utxoDiffStore) diffDataFromDB(hash *daghash.Hash) (*blockUTXODiffData, error) {
var diffData *blockUTXODiffData
err := diffStore.dag.db.View(func(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(utxoDiffsBucketName)
serializedBlockDiffData := bucket.Get(hash[:])
if serializedBlockDiffData != nil {
var err error
diffData, err = diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
return err
}
return nil
})
serializedBlockDiffData, err := dbaccess.FetchUTXODiffData(dbaccess.NoTx(), hash)
if err != nil {
return nil, err
}
return diffData, nil
return diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
}
// flushToDB writes all dirty diff data to the database. If all writes
// succeed, this clears the dirty set.
func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
// flushToDB writes all dirty diff data to the database.
func (diffStore *utxoDiffStore) flushToDB(dbContext *dbaccess.TxContext) error {
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
if len(diffStore.dirty) == 0 {
@@ -165,11 +135,10 @@ func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
// Allocate a buffer here to avoid needless allocations/grows
// while writing each entry.
buffer := &bytes.Buffer{}
for hash := range diffStore.dirty {
hash := hash // Copy hash to a new variable to avoid passing the same pointer
for node := range diffStore.dirty {
buffer.Reset()
diffData := diffStore.loaded[hash]
err := dbStoreDiffData(dbTx, buffer, &hash, diffData)
diffData := diffStore.loaded[node]
err := storeDiffData(dbContext, buffer, node.hash, diffData)
if err != nil {
return err
}
@@ -178,31 +147,53 @@ func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
}
func (diffStore *utxoDiffStore) clearDirtyEntries() {
diffStore.dirty = make(map[daghash.Hash]struct{})
diffStore.dirty = make(map[*blockNode]struct{})
}
// dbStoreDiffData stores the UTXO diff data to the database.
// maxBlueScoreDifferenceToKeepLoaded is the maximum difference
// between the virtual's blueScore and a blockNode's blueScore
// under which to keep diff data loaded in memory.
var maxBlueScoreDifferenceToKeepLoaded uint64 = 100
// clearOldEntries removes entries whose blue score is lower than
// virtual.blueScore - maxBlueScoreDifferenceToKeepLoaded. Note
// that tips are not removed either even if their blue score is
// lower than the above.
func (diffStore *utxoDiffStore) clearOldEntries() {
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
virtualBlueScore := diffStore.dag.VirtualBlueScore()
minBlueScore := virtualBlueScore - maxBlueScoreDifferenceToKeepLoaded
if maxBlueScoreDifferenceToKeepLoaded > virtualBlueScore {
minBlueScore = 0
}
tips := diffStore.dag.virtual.tips()
toRemove := make(map[*blockNode]struct{})
for node := range diffStore.loaded {
if node.blueScore < minBlueScore && !tips.contains(node) {
toRemove[node] = struct{}{}
}
}
for node := range toRemove {
delete(diffStore.loaded, node)
}
}
// storeDiffData stores the UTXO diff data to the database.
// This overwrites the current entry if there exists one.
func dbStoreDiffData(dbTx database.Tx, writeBuffer *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error {
// To avoid a ton of allocs, use the given writeBuffer
func storeDiffData(dbContext dbaccess.Context, w *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error {
// To avoid a ton of allocs, use the io.Writer
// instead of allocating one. We expect the buffer to
// already be initalized and, in most cases, to already
// already be initialized and, in most cases, to already
// be large enough to accommodate the serialized data
// without growing.
err := serializeBlockUTXODiffData(writeBuffer, diffData)
err := serializeBlockUTXODiffData(w, diffData)
if err != nil {
return err
}
// Bucket.Put doesn't copy on its own, so we manually
// copy here. We do so because we expect the buffer
// to be reused once we're done with it.
serializedDiffData := make([]byte, writeBuffer.Len())
copy(serializedDiffData, writeBuffer.Bytes())
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Put(hash[:], serializedDiffData)
}
func dbRemoveDiffData(dbTx database.Tx, hash *daghash.Hash) error {
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Delete(hash[:])
return dbaccess.StoreUTXODiffData(dbContext, hash, w.Bytes())
}

View File

@@ -1,9 +1,8 @@
package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"reflect"
@@ -12,7 +11,7 @@ import (
func TestUTXODiffStore(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", Config{
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -31,9 +30,12 @@ func TestUTXODiffStore(t *testing.T) {
// Check that an error is returned when asking for non existing node
nonExistingNode := createNode()
_, err = dag.utxoDiffStore.diffByNode(nonExistingNode)
expectedErrString := fmt.Sprintf("Couldn't find diff data for block %s", nonExistingNode.hash)
if err == nil || err.Error() != expectedErrString {
t.Errorf("diffByNode: expected error %s but got %s", expectedErrString, err)
if !dbaccess.IsNotFoundError(err) {
if err != nil {
t.Errorf("diffByNode: %s", err)
} else {
t.Errorf("diffByNode: unexpectedly found diff data")
}
}
// Add node's diff data to the utxoDiffStore and check if it's checked correctly.
@@ -63,13 +65,20 @@ func TestUTXODiffStore(t *testing.T) {
// Flush changes to db, delete them from the dag.utxoDiffStore.loaded
// map, and check if the diff data is re-fetched from the database.
err = dag.db.Update(func(dbTx database.Tx) error {
return dag.utxoDiffStore.flushToDB(dbTx)
})
dbTx, err := dbaccess.NewTx()
if err != nil {
t.Fatalf("Failed to open database transaction: %s", err)
}
defer dbTx.RollbackUnlessClosed()
err = dag.utxoDiffStore.flushToDB(dbTx)
if err != nil {
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
}
delete(dag.utxoDiffStore.loaded, *node.hash)
err = dbTx.Commit()
if err != nil {
t.Fatalf("Failed to commit database transaction: %s", err)
}
delete(dag.utxoDiffStore.loaded, node)
if storeDiff, err := dag.utxoDiffStore.diffByNode(node); err != nil {
t.Fatalf("diffByNode: unexpected error: %s", err)
@@ -78,9 +87,80 @@ func TestUTXODiffStore(t *testing.T) {
}
// Check if getBlockDiff caches the result in dag.utxoDiffStore.loaded
if loadedDiffData, ok := dag.utxoDiffStore.loaded[*node.hash]; !ok {
if loadedDiffData, ok := dag.utxoDiffStore.loaded[node]; !ok {
t.Errorf("the diff data wasn't added to loaded map after requesting it")
} else if !reflect.DeepEqual(loadedDiffData.diff, diff) {
t.Errorf("Expected diff and loadedDiff to be equal")
}
}
func TestClearOldEntries(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestClearOldEntries", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("TestClearOldEntries: Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
// Set maxBlueScoreDifferenceToKeepLoaded to 10 to make this test fast to run
currentDifference := maxBlueScoreDifferenceToKeepLoaded
maxBlueScoreDifferenceToKeepLoaded = 10
defer func() { maxBlueScoreDifferenceToKeepLoaded = currentDifference }()
// Add 10 blocks
blockNodes := make([]*blockNode, 10)
for i := 0; i < 10; i++ {
processedBlock := PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil)
node := dag.index.LookupNode(processedBlock.BlockHash())
if node == nil {
t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash())
}
blockNodes[i] = node
}
// Make sure that all of them exist in the loaded set
for _, node := range blockNodes {
_, ok := dag.utxoDiffStore.loaded[node]
if !ok {
t.Fatalf("TestClearOldEntries: diffData for node %s is not in the loaded set", node.hash)
}
}
// Add 10 more blocks on top of the others
for i := 0; i < 10; i++ {
PrepareAndProcessBlockForTest(t, dag, dag.TipHashes(), nil)
}
// Make sure that all the old nodes no longer exist in the loaded set
for _, node := range blockNodes {
_, ok := dag.utxoDiffStore.loaded[node]
if ok {
t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash)
}
}
// Add a block on top of the genesis to force the retrieval of all diffData
processedBlock := PrepareAndProcessBlockForTest(t, dag, []*daghash.Hash{dag.genesis.hash}, nil)
node := dag.index.LookupNode(processedBlock.BlockHash())
if node == nil {
t.Fatalf("TestClearOldEntries: missing blockNode for hash %s", processedBlock.BlockHash())
}
// Make sure that the child-of-genesis node is in the loaded set, since it
// is a tip.
_, ok := dag.utxoDiffStore.loaded[node]
if !ok {
t.Fatalf("TestClearOldEntries: diffData for node %s is not in the loaded set", node.hash)
}
// Make sure that all the old nodes still do not exist in the loaded set
for _, node := range blockNodes {
_, ok := dag.utxoDiffStore.loaded[node]
if ok {
t.Fatalf("TestClearOldEntries: diffData for node %s is in the loaded set", node.hash)
}
}
}

View File

@@ -2,15 +2,11 @@ package blockdag
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
"io"
"math/big"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"io"
)
// serializeBlockUTXODiffData serializes diff data in the following format:
@@ -40,54 +36,26 @@ func serializeBlockUTXODiffData(w io.Writer, diffData *blockUTXODiffData) error
return nil
}
// utxoEntryHeaderCode returns the calculated header code to be used when
// serializing the provided utxo entry.
func utxoEntryHeaderCode(entry *UTXOEntry) uint64 {
// As described in the serialization format comments, the header code
// encodes the blue score shifted over one bit and the block reward flag
// in the lowest bit.
headerCode := uint64(entry.BlockBlueScore()) << 1
if entry.IsCoinbase() {
headerCode |= 0x01
}
return headerCode
}
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffDataBytes []byte) (*blockUTXODiffData, error) {
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffData []byte) (*blockUTXODiffData, error) {
diffData := &blockUTXODiffData{}
serializedDiffData := bytes.NewBuffer(serializedDiffDataBytes)
r := bytes.NewBuffer(serializedDiffData)
var hasDiffChild bool
err := wire.ReadElement(serializedDiffData, &hasDiffChild)
err := wire.ReadElement(r, &hasDiffChild)
if err != nil {
return nil, err
}
if hasDiffChild {
hash := &daghash.Hash{}
err := wire.ReadElement(serializedDiffData, hash)
err := wire.ReadElement(r, hash)
if err != nil {
return nil, err
}
diffData.diffChild = diffStore.dag.index.LookupNode(hash)
}
diffData.diff = &UTXODiff{
useMultiset: true,
}
diffData.diff.toAdd, err = deserializeDiffEntries(serializedDiffData)
if err != nil {
return nil, err
}
diffData.diff.toRemove, err = deserializeDiffEntries(serializedDiffData)
if err != nil {
return nil, err
}
diffData.diff.diffMultiset, err = deserializeMultiset(serializedDiffData)
diffData.diff, err = deserializeUTXODiff(r)
if err != nil {
return nil, err
}
@@ -95,38 +63,31 @@ func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffDataB
return diffData, nil
}
func deserializeDiffEntries(r io.Reader) (utxoCollection, error) {
func deserializeUTXODiff(r io.Reader) (*UTXODiff, error) {
diff := &UTXODiff{}
var err error
diff.toAdd, err = deserializeUTXOCollection(r)
if err != nil {
return nil, err
}
diff.toRemove, err = deserializeUTXOCollection(r)
if err != nil {
return nil, err
}
return diff, nil
}
func deserializeUTXOCollection(r io.Reader) (utxoCollection, error) {
count, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
collection := utxoCollection{}
for i := uint64(0); i < count; i++ {
outpointSize, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
serializedOutpoint := make([]byte, outpointSize)
err = binary.Read(r, byteOrder, serializedOutpoint)
if err != nil {
return nil, err
}
outpoint, err := deserializeOutpoint(serializedOutpoint)
if err != nil {
return nil, err
}
utxoEntrySize, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
serializedEntry := make([]byte, utxoEntrySize)
err = binary.Read(r, byteOrder, serializedEntry)
if err != nil {
return nil, err
}
utxoEntry, err := deserializeUTXOEntry(serializedEntry)
utxoEntry, outpoint, err := deserializeUTXO(r)
if err != nil {
return nil, err
}
@@ -135,31 +96,22 @@ func deserializeDiffEntries(r io.Reader) (utxoCollection, error) {
return collection, nil
}
// deserializeMultiset deserializes an EMCH multiset.
// See serializeMultiset for more details.
func deserializeMultiset(r io.Reader) (*ecc.Multiset, error) {
xBytes := make([]byte, multisetPointSize)
yBytes := make([]byte, multisetPointSize)
err := binary.Read(r, byteOrder, xBytes)
func deserializeUTXO(r io.Reader) (*UTXOEntry, *wire.Outpoint, error) {
outpoint, err := deserializeOutpoint(r)
if err != nil {
return nil, err
return nil, nil, err
}
err = binary.Read(r, byteOrder, yBytes)
utxoEntry, err := deserializeUTXOEntry(r)
if err != nil {
return nil, err
return nil, nil, err
}
var x, y big.Int
x.SetBytes(xBytes)
y.SetBytes(yBytes)
return ecc.NewMultisetFromPoint(ecc.S256(), &x, &y), nil
return utxoEntry, outpoint, nil
}
// serializeUTXODiff serializes UTXODiff by serializing
// UTXODiff.toAdd, UTXODiff.toRemove and UTXODiff.Multiset one after the other.
func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
if !diff.useMultiset {
return errors.New("Cannot serialize a UTXO diff without a multiset")
}
err := serializeUTXOCollection(w, diff.toAdd)
if err != nil {
return err
@@ -169,10 +121,7 @@ func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
if err != nil {
return err
}
err = serializeMultiset(w, diff.diffMultiset)
if err != nil {
return err
}
return nil
}
@@ -193,120 +142,93 @@ func serializeUTXOCollection(w io.Writer, collection utxoCollection) error {
return nil
}
// serializeMultiset serializes an ECMH multiset. The serialization
// is done by taking the (x,y) coordinnates of the multiset point and
// padding each one of them with 32 byte (it'll be 32 byte in most
// cases anyway except one of the coordinates is zero) and writing
// them one after the other.
func serializeMultiset(w io.Writer, ms *ecc.Multiset) error {
x, y := ms.Point()
xBytes := make([]byte, multisetPointSize)
copy(xBytes, x.Bytes())
yBytes := make([]byte, multisetPointSize)
copy(yBytes, y.Bytes())
err := binary.Write(w, byteOrder, xBytes)
if err != nil {
return err
}
err = binary.Write(w, byteOrder, yBytes)
if err != nil {
return err
}
return nil
}
// serializeUTXO serializes a utxo entry-outpoint pair
func serializeUTXO(w io.Writer, entry *UTXOEntry, outpoint *wire.Outpoint) error {
serializedOutpoint := *outpointKey(*outpoint)
err := wire.WriteVarInt(w, uint64(len(serializedOutpoint)))
err := serializeOutpoint(w, outpoint)
if err != nil {
return err
}
err = binary.Write(w, byteOrder, serializedOutpoint)
if err != nil {
return err
}
serializedUTXOEntry := serializeUTXOEntry(entry)
err = wire.WriteVarInt(w, uint64(len(serializedUTXOEntry)))
if err != nil {
return err
}
err = binary.Write(w, byteOrder, serializedUTXOEntry)
err = serializeUTXOEntry(w, entry)
if err != nil {
return err
}
return nil
}
// serializeUTXOEntry returns the entry serialized to a format that is suitable
// for long-term storage. The format is described in detail above.
func serializeUTXOEntry(entry *UTXOEntry) []byte {
// Encode the header code.
headerCode := utxoEntryHeaderCode(entry)
// p2pkhUTXOEntrySerializeSize is the serialized size for a P2PKH UTXO entry.
// 8 bytes (header code) + 8 bytes (amount) + varint for script pub key length of 25 (for P2PKH) + 25 bytes for P2PKH script.
var p2pkhUTXOEntrySerializeSize = 8 + 8 + wire.VarIntSerializeSize(25) + 25
// Calculate the size needed to serialize the entry.
size := serializeSizeVLQ(headerCode) +
compressedTxOutSize(uint64(entry.Amount()), entry.ScriptPubKey())
// Serialize the header code followed by the compressed unspent
// transaction output.
serialized := make([]byte, size)
offset := putVLQ(serialized, headerCode)
offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()),
entry.ScriptPubKey())
return serialized
}
// deserializeOutpoint decodes an outpoint from the passed serialized byte
// slice into a new wire.Outpoint using a format that is suitable for long-
// term storage. this format is described in detail above.
func deserializeOutpoint(serialized []byte) (*wire.Outpoint, error) {
if len(serialized) <= daghash.HashSize {
return nil, errDeserialize("unexpected end of data")
}
txID := daghash.TxID{}
txID.SetBytes(serialized[:daghash.HashSize])
index, _ := deserializeVLQ(serialized[daghash.HashSize:])
return wire.NewOutpoint(&txID, uint32(index)), nil
}
// deserializeUTXOEntry decodes a UTXO entry from the passed serialized byte
// slice into a new UTXOEntry using a format that is suitable for long-term
// storage. The format is described in detail above.
func deserializeUTXOEntry(serialized []byte) (*UTXOEntry, error) {
// Deserialize the header code.
code, offset := deserializeVLQ(serialized)
if offset >= len(serialized) {
return nil, errDeserialize("unexpected end of data after header")
}
// Decode the header code.
//
// Bit 0 indicates whether the containing transaction is a coinbase.
// Bits 1-x encode blue score of the containing transaction.
isCoinbase := code&0x01 != 0
blockBlueScore := code >> 1
// Decode the compressed unspent transaction output.
amount, scriptPubKey, _, err := decodeCompressedTxOut(serialized[offset:])
// serializeUTXOEntry encodes the entry to the given io.Writer and use compression if useCompression is true.
// The compression format is described in detail above.
func serializeUTXOEntry(w io.Writer, entry *UTXOEntry) error {
// Encode the blueScore.
err := binaryserializer.PutUint64(w, byteOrder, entry.blockBlueScore)
if err != nil {
return nil, errDeserialize(fmt.Sprintf("unable to decode "+
"UTXO: %s", err))
return err
}
// Encode the packedFlags.
err = binaryserializer.PutUint8(w, uint8(entry.packedFlags))
if err != nil {
return err
}
err = binaryserializer.PutUint64(w, byteOrder, entry.Amount())
if err != nil {
return err
}
err = wire.WriteVarInt(w, uint64(len(entry.ScriptPubKey())))
if err != nil {
return err
}
_, err = w.Write(entry.ScriptPubKey())
if err != nil {
return errors.WithStack(err)
}
return nil
}
// deserializeUTXOEntry decodes a UTXO entry from the passed reader
// into a new UTXOEntry. If isCompressed is used it will decompress
// the entry according to the format that is described in detail
// above.
func deserializeUTXOEntry(r io.Reader) (*UTXOEntry, error) {
// Deserialize the blueScore.
blockBlueScore, err := binaryserializer.Uint64(r, byteOrder)
if err != nil {
return nil, err
}
// Decode the packedFlags.
packedFlags, err := binaryserializer.Uint8(r)
if err != nil {
return nil, err
}
entry := &UTXOEntry{
amount: amount,
scriptPubKey: scriptPubKey,
blockBlueScore: blockBlueScore,
packedFlags: 0,
packedFlags: txoFlags(packedFlags),
}
if isCoinbase {
entry.packedFlags |= tfCoinbase
entry.amount, err = binaryserializer.Uint64(r, byteOrder)
if err != nil {
return nil, err
}
scriptPubKeyLen, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
entry.scriptPubKey = make([]byte, scriptPubKeyLen)
_, err = r.Read(entry.scriptPubKey)
if err != nil {
return nil, errors.WithStack(err)
}
return entry, nil

View File

@@ -8,7 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/wire"
)
@@ -153,29 +153,16 @@ func (uc utxoCollection) clone() utxoCollection {
// UTXODiff represents a diff between two UTXO Sets.
type UTXODiff struct {
toAdd utxoCollection
toRemove utxoCollection
diffMultiset *ecc.Multiset
useMultiset bool
toAdd utxoCollection
toRemove utxoCollection
}
// NewUTXODiffWithoutMultiset creates a new, empty utxoDiff
// NewUTXODiff creates a new, empty utxoDiff
// without a multiset.
func NewUTXODiffWithoutMultiset() *UTXODiff {
return &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
useMultiset: false,
}
}
// NewUTXODiff creates a new, empty utxoDiff.
func NewUTXODiff() *UTXODiff {
return &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
useMultiset: true,
diffMultiset: ecc.NewMultiset(ecc.S256()),
toAdd: utxoCollection{},
toRemove: utxoCollection{},
}
}
@@ -209,9 +196,8 @@ func NewUTXODiff() *UTXODiff {
// diffFrom results in the UTXO being added to toAdd
func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
result := UTXODiff{
toAdd: make(utxoCollection, len(d.toRemove)+len(other.toAdd)),
toRemove: make(utxoCollection, len(d.toAdd)+len(other.toRemove)),
useMultiset: d.useMultiset,
toAdd: make(utxoCollection, len(d.toRemove)+len(other.toAdd)),
toRemove: make(utxoCollection, len(d.toAdd)+len(other.toRemove)),
}
// Note that the following cases are not accounted for, as they are impossible
@@ -293,17 +279,12 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
}
}
if d.useMultiset {
// Create a new diffMultiset as the subtraction of the two diffs.
result.diffMultiset = other.diffMultiset.Subtract(d.diffMultiset)
}
return &result, nil
}
// WithDiffInPlace applies provided diff to this diff in-place, that would be the result if
// withDiffInPlace applies provided diff to this diff in-place, that would be the result if
// first d, and than diff were applied to the same base
func (d *UTXODiff) WithDiffInPlace(diff *UTXODiff) error {
func (d *UTXODiff) withDiffInPlace(diff *UTXODiff) error {
for outpoint, entryToRemove := range diff.toRemove {
if d.toAdd.containsWithBlueScore(outpoint, entryToRemove.blockBlueScore) {
// If already exists in toAdd with the same blueScore - remove from toAdd
@@ -312,8 +293,8 @@ func (d *UTXODiff) WithDiffInPlace(diff *UTXODiff) error {
}
if d.toRemove.contains(outpoint) {
// If already exists - this is an error
return ruleError(ErrWithDiff, fmt.Sprintf(
"WithDiffInPlace: outpoint %s both in d.toRemove and in diff.toRemove", outpoint))
return errors.Errorf(
"withDiffInPlace: outpoint %s both in d.toRemove and in diff.toRemove", outpoint)
}
// If not exists neither in toAdd nor in toRemove - add to toRemove
@@ -324,9 +305,9 @@ func (d *UTXODiff) WithDiffInPlace(diff *UTXODiff) error {
if d.toRemove.containsWithBlueScore(outpoint, entryToAdd.blockBlueScore) {
// If already exists in toRemove with the same blueScore - remove from toRemove
if d.toAdd.contains(outpoint) && !diff.toRemove.contains(outpoint) {
return ruleError(ErrWithDiff, fmt.Sprintf(
"WithDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd with no "+
"corresponding entry in diff.toRemove", outpoint))
return errors.Errorf(
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd with no "+
"corresponding entry in diff.toRemove", outpoint)
}
d.toRemove.remove(outpoint)
continue
@@ -335,130 +316,35 @@ func (d *UTXODiff) WithDiffInPlace(diff *UTXODiff) error {
(existingEntry.blockBlueScore == entryToAdd.blockBlueScore ||
!diff.toRemove.containsWithBlueScore(outpoint, existingEntry.blockBlueScore)) {
// If already exists - this is an error
return ruleError(ErrWithDiff, fmt.Sprintf(
"WithDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd", outpoint))
return errors.Errorf(
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd", outpoint)
}
// If not exists neither in toAdd nor in toRemove, or exists in toRemove with different blueScore - add to toAdd
d.toAdd.add(outpoint, entryToAdd)
}
// Apply diff.diffMultiset to d.diffMultiset
if d.useMultiset {
d.diffMultiset = d.diffMultiset.Union(diff.diffMultiset)
}
return nil
}
// WithDiff applies provided diff to this diff, creating a new utxoDiff, that would be the result if
// first d, and than diff were applied to the same base
//
// WithDiff follows a set of rules represented by the following 3 by 3 table:
//
// | | this | |
// ---------+-----------+-----------+-----------+-----------
// | | toAdd | toRemove | None
// ---------+-----------+-----------+-----------+-----------
// other | toAdd | X | - | toAdd
// ---------+-----------+-----------+-----------+-----------
// | toRemove | - | X | toRemove
// ---------+-----------+-----------+-----------+-----------
// | None | toAdd | toRemove | -
//
// Key:
// - Don't add anything to the result
// X Return an error
// toAdd Add the UTXO into the toAdd collection of the result
// toRemove Add the UTXO into the toRemove collection of the result
//
// Examples:
// 1. This diff contains a UTXO in toAdd, and the other diff contains it in toRemove
// WithDiff results in nothing being added
// 2. This diff contains a UTXO in toRemove, and the other diff does not contain it
// WithDiff results in the UTXO being added to toRemove
// first d, and than diff were applied to some base
func (d *UTXODiff) WithDiff(diff *UTXODiff) (*UTXODiff, error) {
result := UTXODiff{
toAdd: make(utxoCollection, len(d.toAdd)+len(diff.toAdd)),
toRemove: make(utxoCollection, len(d.toRemove)+len(diff.toRemove)),
useMultiset: d.useMultiset,
clone := d.clone()
err := clone.withDiffInPlace(diff)
if err != nil {
return nil, err
}
// All transactions in d.toAdd:
// If they are not in diff.toRemove - should be added in result.toAdd
// If they are in diff.toAdd - should throw an error
// Otherwise - should be ignored
for outpoint, utxoEntry := range d.toAdd {
if !diff.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toAdd.add(outpoint, utxoEntry)
}
if diffEntry, ok := diff.toAdd.get(outpoint); ok {
// An exception is made for entries with unequal blue scores
// as long as the appropriate entry exists in either d.toRemove
// or diff.toRemove.
// These are just "updates" to accepted blue score
if diffEntry.blockBlueScore != utxoEntry.blockBlueScore &&
diff.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
continue
}
return nil, ruleError(ErrWithDiff, fmt.Sprintf("WithDiff: outpoint %s both in d.toAdd and in other.toAdd", outpoint))
}
}
// All transactions in d.toRemove:
// If they are not in diff.toAdd - should be added in result.toRemove
// If they are in diff.toRemove - should throw an error
// Otherwise - should be ignored
for outpoint, utxoEntry := range d.toRemove {
if !diff.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toRemove.add(outpoint, utxoEntry)
}
if diffEntry, ok := diff.toRemove.get(outpoint); ok {
// An exception is made for entries with unequal blue scores
// as long as the appropriate entry exists in either d.toAdd
// or diff.toAdd.
// These are just "updates" to accepted blue score
if diffEntry.blockBlueScore != utxoEntry.blockBlueScore &&
d.toAdd.containsWithBlueScore(outpoint, diffEntry.blockBlueScore) {
continue
}
return nil, ruleError(ErrWithDiff, "WithDiff: outpoint both in d.toRemove and in other.toRemove")
}
}
// All transactions in diff.toAdd:
// If they are not in d.toRemove - should be added in result.toAdd
for outpoint, utxoEntry := range diff.toAdd {
if !d.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toAdd.add(outpoint, utxoEntry)
}
}
// All transactions in diff.toRemove:
// If they are not in d.toAdd - should be added in result.toRemove
for outpoint, utxoEntry := range diff.toRemove {
if !d.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toRemove.add(outpoint, utxoEntry)
}
}
// Apply diff.diffMultiset to d.diffMultiset
if d.useMultiset {
result.diffMultiset = d.diffMultiset.Union(diff.diffMultiset)
}
return &result, nil
return clone, nil
}
// clone returns a clone of this utxoDiff
func (d *UTXODiff) clone() *UTXODiff {
clone := &UTXODiff{
toAdd: d.toAdd.clone(),
toRemove: d.toRemove.clone(),
useMultiset: d.useMultiset,
}
if d.useMultiset {
clone.diffMultiset = d.diffMultiset.Clone()
toAdd: d.toAdd.clone(),
toRemove: d.toRemove.clone(),
}
return clone
}
@@ -475,14 +361,6 @@ func (d *UTXODiff) AddEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
} else {
d.toAdd.add(outpoint, entry)
}
if d.useMultiset {
newMs, err := addUTXOToMultiset(d.diffMultiset, entry, &outpoint)
if err != nil {
return err
}
d.diffMultiset = newMs
}
return nil
}
@@ -498,21 +376,10 @@ func (d *UTXODiff) RemoveEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
} else {
d.toRemove.add(outpoint, entry)
}
if d.useMultiset {
newMs, err := removeUTXOFromMultiset(d.diffMultiset, entry, &outpoint)
if err != nil {
return err
}
d.diffMultiset = newMs
}
return nil
}
func (d UTXODiff) String() string {
if d.useMultiset {
return fmt.Sprintf("toAdd: %s; toRemove: %s, Multiset-Hash: %s", d.toAdd, d.toRemove, d.diffMultiset.Hash())
}
return fmt.Sprintf("toAdd: %s; toRemove: %s", d.toAdd, d.toRemove)
}
@@ -532,97 +399,27 @@ type UTXOSet interface {
fmt.Stringer
diffFrom(other UTXOSet) (*UTXODiff, error)
WithDiff(utxoDiff *UTXODiff) (UTXOSet, error)
diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error)
diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error)
AddTx(tx *wire.MsgTx, blockBlueScore uint64) (ok bool, err error)
clone() UTXOSet
Get(outpoint wire.Outpoint) (*UTXOEntry, bool)
Multiset() *ecc.Multiset
WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error)
}
// diffFromTx is a common implementation for diffFromTx, that works
// for both diff-based and full UTXO sets
// Returns a diff that is equivalent to provided transaction,
// or an error if provided transaction is not valid in the context of this UTXOSet
func diffFromTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
diff := NewUTXODiff()
isCoinbase := tx.IsCoinBase()
if !isCoinbase {
for _, txIn := range tx.TxIn {
if entry, ok := u.Get(txIn.PreviousOutpoint); ok {
err := diff.RemoveEntry(txIn.PreviousOutpoint, entry)
if err != nil {
return nil, err
}
} else {
return nil, ruleError(ErrMissingTxOut, fmt.Sprintf(
"Transaction %s is invalid because spends outpoint %s that is not in utxo set",
tx.TxID(), txIn.PreviousOutpoint))
}
}
}
for i, txOut := range tx.TxOut {
entry := NewUTXOEntry(txOut, isCoinbase, acceptingBlueScore)
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
err := diff.AddEntry(outpoint, entry)
if err != nil {
return nil, err
}
}
return diff, nil
}
// diffFromAcceptedTx is a common implementation for diffFromAcceptedTx, that works
// for both diff-based and full UTXO sets.
// Returns a diff that replaces an entry's blockBlueScore with the given acceptingBlueScore.
// Returns an error if the provided transaction's entry is not valid in the context
// of this UTXOSet.
func diffFromAcceptedTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
diff := NewUTXODiff()
isCoinbase := tx.IsCoinBase()
for i, txOut := range tx.TxOut {
// Fetch any unaccepted transaction
existingOutpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
existingEntry, ok := u.Get(existingOutpoint)
if !ok {
return nil, errors.Errorf("cannot accept outpoint %s because it doesn't exist in the given UTXO", existingOutpoint)
}
// Remove unaccepted entries
err := diff.RemoveEntry(existingOutpoint, existingEntry)
if err != nil {
return nil, err
}
// Add new entries with their accepting blue score
newEntry := NewUTXOEntry(txOut, isCoinbase, acceptingBlueScore)
err = diff.AddEntry(existingOutpoint, newEntry)
if err != nil {
return nil, err
}
}
return diff, nil
}
// FullUTXOSet represents a full list of transaction outputs and their values
type FullUTXOSet struct {
utxoCollection
UTXOMultiset *ecc.Multiset
}
// NewFullUTXOSet creates a new utxoSet with full list of transaction outputs and their values
func NewFullUTXOSet() *FullUTXOSet {
return &FullUTXOSet{
utxoCollection: utxoCollection{},
UTXOMultiset: ecc.NewMultiset(ecc.S256()),
}
}
// newFullUTXOSetFromUTXOCollection converts a utxoCollection to a FullUTXOSet
func newFullUTXOSetFromUTXOCollection(collection utxoCollection) (*FullUTXOSet, error) {
var err error
multiset := ecc.NewMultiset(ecc.S256())
multiset := secp256k1.NewMultiset()
for outpoint, utxoEntry := range collection {
multiset, err = addUTXOToMultiset(multiset, utxoEntry, &outpoint)
if err != nil {
@@ -631,7 +428,6 @@ func newFullUTXOSetFromUTXOCollection(collection utxoCollection) (*FullUTXOSet,
}
return &FullUTXOSet{
utxoCollection: collection,
UTXOMultiset: multiset,
}, nil
}
@@ -668,33 +464,19 @@ func (fus *FullUTXOSet) AddTx(tx *wire.MsgTx, blueScore uint64) (isAccepted bool
}
for _, txIn := range tx.TxIn {
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
err := fus.removeAndUpdateMultiset(outpoint)
if err != nil {
return false, err
}
fus.remove(txIn.PreviousOutpoint)
}
}
for i, txOut := range tx.TxOut {
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
entry := NewUTXOEntry(txOut, isCoinbase, blueScore)
err := fus.addAndUpdateMultiset(outpoint, entry)
if err != nil {
return false, err
}
fus.add(outpoint, entry)
}
return true, nil
}
// diffFromTx returns a diff that is equivalent to provided transaction,
// or an error if provided transaction is not valid in the context of this UTXOSet
func (fus *FullUTXOSet) diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
return diffFromTx(fus, tx, acceptingBlueScore)
}
func (fus *FullUTXOSet) containsInputs(tx *wire.MsgTx) bool {
for _, txIn := range tx.TxIn {
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
@@ -706,13 +488,9 @@ func (fus *FullUTXOSet) containsInputs(tx *wire.MsgTx) bool {
return true
}
func (fus *FullUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
return diffFromAcceptedTx(fus, tx, acceptingBlueScore)
}
// clone returns a clone of this utxoSet
func (fus *FullUTXOSet) clone() UTXOSet {
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone(), UTXOMultiset: fus.UTXOMultiset.Clone()}
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone()}
}
// Get returns the UTXOEntry associated with the given Outpoint, and a boolean indicating if such entry was found
@@ -721,55 +499,6 @@ func (fus *FullUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
return utxoEntry, ok
}
// Multiset returns the ecmh-Multiset of this utxoSet
func (fus *FullUTXOSet) Multiset() *ecc.Multiset {
return fus.UTXOMultiset
}
// addAndUpdateMultiset adds a UTXOEntry to this utxoSet and updates its multiset accordingly
func (fus *FullUTXOSet) addAndUpdateMultiset(outpoint wire.Outpoint, entry *UTXOEntry) error {
fus.add(outpoint, entry)
newMs, err := addUTXOToMultiset(fus.UTXOMultiset, entry, &outpoint)
if err != nil {
return err
}
fus.UTXOMultiset = newMs
return nil
}
// removeAndUpdateMultiset removes a UTXOEntry from this utxoSet and updates its multiset accordingly
func (fus *FullUTXOSet) removeAndUpdateMultiset(outpoint wire.Outpoint) error {
entry, ok := fus.Get(outpoint)
if !ok {
return errors.Errorf("Couldn't find outpoint %s", outpoint)
}
fus.remove(outpoint)
var err error
newMs, err := removeUTXOFromMultiset(fus.UTXOMultiset, entry, &outpoint)
if err != nil {
return err
}
fus.UTXOMultiset = newMs
return nil
}
// WithTransactions returns a new UTXO Set with the added transactions.
//
// This function MUST be called with the DAG lock held.
func (fus *FullUTXOSet) WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error) {
diffSet := NewDiffUTXOSet(fus, NewUTXODiff())
for _, tx := range transactions {
isAccepted, err := diffSet.AddTx(tx, blockBlueScore)
if err != nil {
return nil, err
}
if !ignoreDoubleSpends && !isAccepted {
return nil, errors.Errorf("Transaction %s is not valid with the current UTXO set", tx.TxID())
}
}
return UTXOSet(diffSet), nil
}
// DiffUTXOSet represents a utxoSet with a base fullUTXOSet and a UTXODiff
type DiffUTXOSet struct {
base *FullUTXOSet
@@ -830,12 +559,11 @@ func (dus *DiffUTXOSet) AddTx(tx *wire.MsgTx, blockBlueScore uint64) (bool, erro
func (dus *DiffUTXOSet) appendTx(tx *wire.MsgTx, blockBlueScore uint64, isCoinbase bool) error {
if !isCoinbase {
for _, txIn := range tx.TxIn {
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
entry, ok := dus.Get(outpoint)
entry, ok := dus.Get(txIn.PreviousOutpoint)
if !ok {
return errors.Errorf("Couldn't find entry for outpoint %s", outpoint)
return errors.Errorf("Couldn't find entry for outpoint %s", txIn.PreviousOutpoint)
}
err := dus.UTXODiff.RemoveEntry(outpoint, entry)
err := dus.UTXODiff.RemoveEntry(txIn.PreviousOutpoint, entry)
if err != nil {
return err
}
@@ -881,31 +609,12 @@ func (dus *DiffUTXOSet) meldToBase() error {
for outpoint, utxoEntry := range dus.UTXODiff.toAdd {
dus.base.add(outpoint, utxoEntry)
}
if dus.UTXODiff.useMultiset {
dus.base.UTXOMultiset = dus.base.UTXOMultiset.Union(dus.UTXODiff.diffMultiset)
}
if dus.UTXODiff.useMultiset {
dus.UTXODiff = NewUTXODiff()
} else {
dus.UTXODiff = NewUTXODiffWithoutMultiset()
}
dus.UTXODiff = NewUTXODiff()
return nil
}
// diffFromTx returns a diff that is equivalent to provided transaction,
// or an error if provided transaction is not valid in the context of this UTXOSet
func (dus *DiffUTXOSet) diffFromTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
return diffFromTx(dus, tx, acceptingBlueScore)
}
func (dus *DiffUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore uint64) (*UTXODiff, error) {
return diffFromAcceptedTx(dus, tx, acceptingBlueScore)
}
func (dus *DiffUTXOSet) String() string {
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s, Multiset-Hash:%s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove, dus.Multiset().Hash())
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove)
}
// clone returns a clone of this UTXO Set
@@ -913,6 +622,12 @@ func (dus *DiffUTXOSet) clone() UTXOSet {
return NewDiffUTXOSet(dus.base.clone().(*FullUTXOSet), dus.UTXODiff.clone())
}
// cloneWithoutBase returns a *DiffUTXOSet with same
// base as this *DiffUTXOSet and a cloned diff.
func (dus *DiffUTXOSet) cloneWithoutBase() UTXOSet {
return NewDiffUTXOSet(dus.base, dus.UTXODiff.clone())
}
// Get returns the UTXOEntry associated with provided outpoint in this UTXOSet.
// Returns false in second output if this UTXOEntry was not found
func (dus *DiffUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
@@ -930,42 +645,3 @@ func (dus *DiffUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
txOut, ok := dus.UTXODiff.toAdd.get(outpoint)
return txOut, ok
}
// Multiset returns the ecmh-Multiset of this utxoSet
func (dus *DiffUTXOSet) Multiset() *ecc.Multiset {
return dus.base.UTXOMultiset.Union(dus.UTXODiff.diffMultiset)
}
// WithTransactions returns a new UTXO Set with the added transactions.
//
// If dus.UTXODiff.useMultiset is true, this function MUST be
// called with the DAG lock held.
func (dus *DiffUTXOSet) WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error) {
diffSet := NewDiffUTXOSet(dus.base, dus.UTXODiff.clone())
for _, tx := range transactions {
isAccepted, err := diffSet.AddTx(tx, blockBlueScore)
if err != nil {
return nil, err
}
if !ignoreDoubleSpends && !isAccepted {
return nil, errors.Errorf("Transaction %s is not valid with the current UTXO set", tx.TxID())
}
}
return UTXOSet(diffSet), nil
}
func addUTXOToMultiset(ms *ecc.Multiset, entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
utxoMS, err := utxoMultiset(entry, outpoint)
if err != nil {
return nil, err
}
return ms.Union(utxoMS), nil
}
func removeUTXOFromMultiset(ms *ecc.Multiset, entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
utxoMS, err := utxoMultiset(entry, outpoint)
if err != nil {
return nil, err
}
return ms.Subtract(utxoMS), nil
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
@@ -80,49 +79,40 @@ func TestUTXODiff(t *testing.T) {
utxoEntry0 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 0)
utxoEntry1 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 20}, false, 1)
for i := 0; i < 2; i++ {
withMultiset := i == 0
// Test utxoDiff creation
var diff *UTXODiff
if withMultiset {
diff = NewUTXODiff()
} else {
diff = NewUTXODiffWithoutMultiset()
}
if len(diff.toAdd) != 0 || len(diff.toRemove) != 0 {
t.Errorf("new diff is not empty")
}
// Test utxoDiff creation
err := diff.AddEntry(outpoint0, utxoEntry0)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
diff := NewUTXODiff()
err = diff.RemoveEntry(outpoint1, utxoEntry1)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
if len(diff.toAdd) != 0 || len(diff.toRemove) != 0 {
t.Errorf("new diff is not empty")
}
// Test utxoDiff cloning
clonedDiff := diff.clone()
if clonedDiff == diff {
t.Errorf("cloned diff is reference-equal to the original")
}
if !reflect.DeepEqual(clonedDiff, diff) {
t.Errorf("cloned diff not equal to the original"+
"Original: \"%v\", cloned: \"%v\".", diff, clonedDiff)
}
err := diff.AddEntry(outpoint0, utxoEntry0)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
// Test utxoDiff string representation
expectedDiffString := "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ]"
if withMultiset {
expectedDiffString = "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], Multiset-Hash: 7cb61e48005b0c817211d04589d719bff87d86a6a6ce2454515f57265382ded7"
}
diffString := clonedDiff.String()
if diffString != expectedDiffString {
t.Errorf("unexpected diff string. "+
"Expected: \"%s\", got: \"%s\".", expectedDiffString, diffString)
}
err = diff.RemoveEntry(outpoint1, utxoEntry1)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
// Test utxoDiff cloning
clonedDiff := diff.clone()
if clonedDiff == diff {
t.Errorf("cloned diff is reference-equal to the original")
}
if !reflect.DeepEqual(clonedDiff, diff) {
t.Errorf("cloned diff not equal to the original"+
"Original: \"%v\", cloned: \"%v\".", diff, clonedDiff)
}
// Test utxoDiff string representation
expectedDiffString := "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ]"
diffString := clonedDiff.String()
if diffString != expectedDiffString {
t.Errorf("unexpected diff string. "+
"Expected: \"%s\", got: \"%s\".", expectedDiffString, diffString)
}
}
@@ -137,7 +127,7 @@ func TestUTXODiffRules(t *testing.T) {
// For each of the following test cases, we will:
// this.diffFrom(other) and compare it to expectedDiffFromResult
// this.WithDiff(other) and compare it to expectedWithDiffResult
// this.WithDiffInPlace(other) and compare it to expectedWithDiffResult
// this.withDiffInPlace(other) and compare it to expectedWithDiffResult
//
// Note: an expected nil result means that we expect the respective operation to fail
// See the following spreadsheet for a summary of all test-cases:
@@ -542,157 +532,101 @@ func TestUTXODiffRules(t *testing.T) {
}
for _, test := range tests {
this := addMultisetToDiff(t, test.this)
other := addMultisetToDiff(t, test.other)
expectedDiffFromResult := addMultisetToDiff(t, test.expectedDiffFromResult)
expectedWithDiffResult := addMultisetToDiff(t, test.expectedWithDiffResult)
// diffFrom from this to other
diffResult, err := this.diffFrom(other)
// diffFrom from test.this to test.other
diffResult, err := test.this.diffFrom(test.other)
// Test whether diffFrom returned an error
isDiffFromOk := err == nil
expectedIsDiffFromOk := expectedDiffFromResult != nil
expectedIsDiffFromOk := test.expectedDiffFromResult != nil
if isDiffFromOk != expectedIsDiffFromOk {
t.Errorf("unexpected diffFrom error in test \"%s\". "+
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsDiffFromOk, isDiffFromOk)
}
// If not error, test the diffFrom result
if isDiffFromOk && !expectedDiffFromResult.equal(diffResult) {
if isDiffFromOk && !test.expectedDiffFromResult.equal(diffResult) {
t.Errorf("unexpected diffFrom result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedDiffFromResult, diffResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedDiffFromResult, diffResult)
}
// Make sure that WithDiff after diffFrom results in the original other
// Make sure that WithDiff after diffFrom results in the original test.other
if isDiffFromOk {
otherResult, err := this.WithDiff(diffResult)
otherResult, err := test.this.WithDiff(diffResult)
if err != nil {
t.Errorf("WithDiff unexpectedly failed in test \"%s\": %s", test.name, err)
}
if !other.equal(otherResult) {
if !test.other.equal(otherResult) {
t.Errorf("unexpected WithDiff result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, other, otherResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.other, otherResult)
}
}
// WithDiff from this to other
withDiffResult, err := this.WithDiff(other)
// WithDiff from test.this to test.other
withDiffResult, err := test.this.WithDiff(test.other)
// Test whether WithDiff returned an error
isWithDiffOk := err == nil
expectedIsWithDiffOk := expectedWithDiffResult != nil
expectedIsWithDiffOk := test.expectedWithDiffResult != nil
if isWithDiffOk != expectedIsWithDiffOk {
t.Errorf("unexpected WithDiff error in test \"%s\". "+
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsWithDiffOk, isWithDiffOk)
}
// If not error, test the WithDiff result
if isWithDiffOk && !withDiffResult.equal(expectedWithDiffResult) {
if isWithDiffOk && !withDiffResult.equal(test.expectedWithDiffResult) {
t.Errorf("unexpected WithDiff result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedWithDiffResult, withDiffResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedWithDiffResult, withDiffResult)
}
// Repeat WithDiff check this time using WithDiffInPlace
thisClone := this.clone()
err = thisClone.WithDiffInPlace(other)
// Repeat WithDiff check test.this time using withDiffInPlace
thisClone := test.this.clone()
err = thisClone.withDiffInPlace(test.other)
// Test whether WithDiffInPlace returned an error
// Test whether withDiffInPlace returned an error
isWithDiffInPlaceOk := err == nil
expectedIsWithDiffInPlaceOk := expectedWithDiffResult != nil
expectedIsWithDiffInPlaceOk := test.expectedWithDiffResult != nil
if isWithDiffInPlaceOk != expectedIsWithDiffInPlaceOk {
t.Errorf("unexpected WithDiffInPlace error in test \"%s\". "+
t.Errorf("unexpected withDiffInPlace error in test \"%s\". "+
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsWithDiffInPlaceOk, isWithDiffInPlaceOk)
}
// If not error, test the WithDiffInPlace result
if isWithDiffInPlaceOk && !thisClone.equal(expectedWithDiffResult) {
t.Errorf("unexpected WithDiffInPlace result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedWithDiffResult, thisClone)
// If not error, test the withDiffInPlace result
if isWithDiffInPlaceOk && !thisClone.equal(test.expectedWithDiffResult) {
t.Errorf("unexpected withDiffInPlace result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedWithDiffResult, thisClone)
}
// Make sure that diffFrom after WithDiff results in the original other
// Make sure that diffFrom after WithDiff results in the original test.other
if isWithDiffOk {
otherResult, err := this.diffFrom(withDiffResult)
otherResult, err := test.this.diffFrom(withDiffResult)
if err != nil {
t.Errorf("diffFrom unexpectedly failed in test \"%s\": %s", test.name, err)
}
if !other.equal(otherResult) {
if !test.other.equal(otherResult) {
t.Errorf("unexpected diffFrom result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, other, otherResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.other, otherResult)
}
}
}
}
func areMultisetsEqual(a *ecc.Multiset, b *ecc.Multiset) bool {
aX, aY := a.Point()
bX, bY := b.Point()
return aX.Cmp(bX) == 0 && aY.Cmp(bY) == 0
}
func (d *UTXODiff) equal(other *UTXODiff) bool {
if d == nil || other == nil {
return d == other
}
return reflect.DeepEqual(d.toAdd, other.toAdd) &&
reflect.DeepEqual(d.toRemove, other.toRemove) &&
areMultisetsEqual(d.diffMultiset, other.diffMultiset)
reflect.DeepEqual(d.toRemove, other.toRemove)
}
func (fus *FullUTXOSet) equal(other *FullUTXOSet) bool {
return reflect.DeepEqual(fus.utxoCollection, other.utxoCollection) &&
areMultisetsEqual(fus.UTXOMultiset, other.UTXOMultiset)
return reflect.DeepEqual(fus.utxoCollection, other.utxoCollection)
}
func (dus *DiffUTXOSet) equal(other *DiffUTXOSet) bool {
return dus.base.equal(other.base) && dus.UTXODiff.equal(other.UTXODiff)
}
func addMultisetToDiff(t *testing.T, diff *UTXODiff) *UTXODiff {
if diff == nil {
return nil
}
diffWithMs := NewUTXODiff()
for outpoint, entry := range diff.toAdd {
err := diffWithMs.AddEntry(outpoint, entry)
if err != nil {
t.Fatalf("Error with diffWithMs.AddEntry: %s", err)
}
}
for outpoint, entry := range diff.toRemove {
err := diffWithMs.RemoveEntry(outpoint, entry)
if err != nil {
t.Fatalf("Error with diffWithMs.removeEntry: %s", err)
}
}
return diffWithMs
}
func addMultisetToFullUTXOSet(t *testing.T, fus *FullUTXOSet) *FullUTXOSet {
if fus == nil {
return nil
}
fusWithMs := NewFullUTXOSet()
for outpoint, entry := range fus.utxoCollection {
err := fusWithMs.addAndUpdateMultiset(outpoint, entry)
if err != nil {
t.Fatalf("Error with diffWithMs.AddEntry: %s", err)
}
}
return fusWithMs
}
func addMultisetToDiffUTXOSet(t *testing.T, diffSet *DiffUTXOSet) *DiffUTXOSet {
if diffSet == nil {
return nil
}
diffWithMs := addMultisetToDiff(t, diffSet.UTXODiff)
baseWithMs := addMultisetToFullUTXOSet(t, diffSet.base)
return NewDiffUTXOSet(baseWithMs, diffWithMs)
}
// TestFullUTXOSet makes sure that fullUTXOSet is working as expected.
func TestFullUTXOSet(t *testing.T) {
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
@@ -703,10 +637,10 @@ func TestFullUTXOSet(t *testing.T) {
txOut1 := &wire.TxOut{ScriptPubKey: []byte{}, Value: 20}
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
diff := addMultisetToDiff(t, &UTXODiff{
diff := &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry0},
toRemove: utxoCollection{outpoint1: utxoEntry1},
})
}
// Test fullUTXOSet creation
emptySet := NewFullUTXOSet()
@@ -735,7 +669,7 @@ func TestFullUTXOSet(t *testing.T) {
} else if isAccepted {
t.Errorf("addTx unexpectedly succeeded")
}
emptySet = addMultisetToFullUTXOSet(t, &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}})
emptySet = &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}}
if isAccepted, err := emptySet.AddTx(transaction0, 0); err != nil {
t.Errorf("addTx unexpectedly failed. Error: %s", err)
} else if !isAccepted {
@@ -767,10 +701,10 @@ func TestDiffUTXOSet(t *testing.T) {
txOut1 := &wire.TxOut{ScriptPubKey: []byte{}, Value: 20}
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
diff := addMultisetToDiff(t, &UTXODiff{
diff := &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry0},
toRemove: utxoCollection{outpoint1: utxoEntry1},
})
}
// Test diffUTXOSet creation
emptySet := NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff())
@@ -828,7 +762,7 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ ], Multiset-Hash:0000000000000000000000000000000000000000000000000000000000000000}",
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ ]}",
expectedCollection: utxoCollection{},
},
{
@@ -847,7 +781,7 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ ], To Add: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Remove: [ ], Multiset-Hash:da4768bd0359c3426268d6707c1fc17a68c45ef1ea734331b07568418234487f}",
expectedString: "{Base: [ ], To Add: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Remove: [ ]}",
expectedCollection: utxoCollection{outpoint0: utxoEntry0},
},
{
@@ -860,7 +794,7 @@ func TestDiffUTXOSet(t *testing.T) {
},
},
expectedMeldSet: nil,
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], Multiset-Hash:046242cb1bb1e6d3fd91d0f181e1b2d4a597ac57fa2584fc3c2eb0e0f46c9369}",
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]}",
expectedCollection: utxoCollection{},
expectedMeldToBaseError: "Couldn't remove outpoint 0000000000000000000000000000000000000000000000000000000000000000:0 because it doesn't exist in the DiffUTXOSet base",
},
@@ -885,7 +819,7 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], To Remove: [ ], Multiset-Hash:556cc61fd4d7e74d7807ca2298c5320375a6a20310a18920e54667220924baff}",
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], To Remove: [ ]}",
expectedCollection: utxoCollection{
outpoint0: utxoEntry0,
outpoint1: utxoEntry1,
@@ -909,24 +843,21 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], Multiset-Hash:0000000000000000000000000000000000000000000000000000000000000000}",
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]}",
expectedCollection: utxoCollection{},
},
}
for _, test := range tests {
diffSet := addMultisetToDiffUTXOSet(t, test.diffSet)
expectedMeldSet := addMultisetToDiffUTXOSet(t, test.expectedMeldSet)
// Test string representation
setString := diffSet.String()
setString := test.diffSet.String()
if setString != test.expectedString {
t.Errorf("unexpected string in test \"%s\". "+
"Expected: \"%s\", got: \"%s\".", test.name, test.expectedString, setString)
}
// Test meldToBase
meldSet := diffSet.clone().(*DiffUTXOSet)
meldSet := test.diffSet.clone().(*DiffUTXOSet)
err := meldSet.meldToBase()
errString := ""
if err != nil {
@@ -938,27 +869,27 @@ func TestDiffUTXOSet(t *testing.T) {
if err != nil {
continue
}
if !meldSet.equal(expectedMeldSet) {
if !meldSet.equal(test.expectedMeldSet) {
t.Errorf("unexpected melded set in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedMeldSet, meldSet)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedMeldSet, meldSet)
}
// Test collection
setCollection, err := diffSet.collection()
setCollection, err := test.diffSet.collection()
if err != nil {
t.Errorf("Error getting diffSet collection: %s", err)
t.Errorf("Error getting test.diffSet collection: %s", err)
} else if !reflect.DeepEqual(setCollection, test.expectedCollection) {
t.Errorf("unexpected set collection in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedCollection, setCollection)
}
// Test cloning
clonedSet := diffSet.clone().(*DiffUTXOSet)
if !reflect.DeepEqual(clonedSet, diffSet) {
clonedSet := test.diffSet.clone().(*DiffUTXOSet)
if !reflect.DeepEqual(clonedSet, test.diffSet) {
t.Errorf("unexpected set clone in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, diffSet, clonedSet)
"Expected: \"%v\", got: \"%v\".", test.name, test.diffSet, clonedSet)
}
if clonedSet == diffSet {
if clonedSet == test.diffSet {
t.Errorf("cloned set is reference-equal to the original")
}
}
@@ -1159,10 +1090,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
testLoop:
for _, test := range tests {
startSet := addMultisetToDiffUTXOSet(t, test.startSet)
expectedSet := addMultisetToDiffUTXOSet(t, test.expectedSet)
diffSet := startSet.clone()
diffSet := test.startSet.clone()
// Apply all transactions to diffSet, in order, with the initial block height startHeight
for i, transaction := range test.toAdd {
@@ -1174,89 +1102,14 @@ testLoop:
}
}
// Make sure that the result diffSet equals to the expectedSet
if !diffSet.(*DiffUTXOSet).equal(expectedSet) {
// Make sure that the result diffSet equals to test.expectedSet
if !diffSet.(*DiffUTXOSet).equal(test.expectedSet) {
t.Errorf("unexpected diffSet in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedSet, diffSet)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedSet, diffSet)
}
}
}
func TestDiffFromTx(t *testing.T) {
fus := addMultisetToFullUTXOSet(t, &FullUTXOSet{
utxoCollection: utxoCollection{},
})
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutpoint: wire.Outpoint{TxID: *txID0, Index: math.MaxUint32}, Sequence: 0}
txOut0 := &wire.TxOut{ScriptPubKey: []byte{0}, Value: 10}
cbTx := wire.NewSubnetworkMsgTx(1, []*wire.TxIn{txIn0}, []*wire.TxOut{txOut0}, subnetworkid.SubnetworkIDCoinbase, 0, nil)
if isAccepted, err := fus.AddTx(cbTx, 1); err != nil {
t.Fatalf("AddTx unexpectedly failed. Error: %s", err)
} else if !isAccepted {
t.Fatalf("AddTx unexpectedly didn't add tx %s", cbTx.TxID())
}
acceptingBlueScore := uint64(2)
cbOutpoint := wire.Outpoint{TxID: *cbTx.TxID(), Index: 0}
txIns := []*wire.TxIn{{
PreviousOutpoint: cbOutpoint,
SignatureScript: nil,
Sequence: wire.MaxTxInSequenceNum,
}}
txOuts := []*wire.TxOut{{
ScriptPubKey: OpTrueScript,
Value: uint64(1),
}}
tx := wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
diff, err := fus.diffFromTx(tx, acceptingBlueScore)
if err != nil {
t.Errorf("diffFromTx: %v", err)
}
if !reflect.DeepEqual(diff.toAdd, utxoCollection{
wire.Outpoint{TxID: *tx.TxID(), Index: 0}: NewUTXOEntry(tx.TxOut[0], false, 2),
}) {
t.Errorf("diff.toAdd doesn't have the expected values")
}
if !reflect.DeepEqual(diff.toRemove, utxoCollection{
wire.Outpoint{TxID: *cbTx.TxID(), Index: 0}: NewUTXOEntry(cbTx.TxOut[0], true, 1),
}) {
t.Errorf("diff.toRemove doesn't have the expected values")
}
//Test that we get an error if we don't have the outpoint inside the utxo set
invalidTxIns := []*wire.TxIn{{
PreviousOutpoint: wire.Outpoint{TxID: daghash.TxID{}, Index: 0},
SignatureScript: nil,
Sequence: wire.MaxTxInSequenceNum,
}}
invalidTxOuts := []*wire.TxOut{{
ScriptPubKey: OpTrueScript,
Value: uint64(1),
}}
invalidTx := wire.NewNativeMsgTx(wire.TxVersion, invalidTxIns, invalidTxOuts)
_, err = fus.diffFromTx(invalidTx, acceptingBlueScore)
if err == nil {
t.Errorf("diffFromTx: expected an error but got <nil>")
}
//Test that we get an error if the outpoint is inside diffUTXOSet's toRemove
diff2 := addMultisetToDiff(t, &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
})
dus := NewDiffUTXOSet(fus, diff2)
if isAccepted, err := dus.AddTx(tx, 2); err != nil {
t.Fatalf("AddTx unexpectedly failed. Error: %s", err)
} else if !isAccepted {
t.Fatalf("AddTx unexpectedly didn't add tx %s", tx.TxID())
}
_, err = dus.diffFromTx(tx, acceptingBlueScore)
if err == nil {
t.Errorf("diffFromTx: expected an error but got <nil>")
}
}
// collection returns a collection of all UTXOs in this set
func (fus *FullUTXOSet) collection() utxoCollection {
return fus.utxoCollection.clone()
@@ -1321,7 +1174,6 @@ func TestUTXOSetAddEntry(t *testing.T) {
}
for _, test := range tests {
expectedUTXODiff := addMultisetToDiff(t, test.expectedUTXODiff)
err := utxoDiff.AddEntry(*test.outpointToAdd, test.utxoEntryToAdd)
errString := ""
if err != nil {
@@ -1330,9 +1182,9 @@ func TestUTXOSetAddEntry(t *testing.T) {
if errString != test.expectedError {
t.Fatalf("utxoDiff.AddEntry: unexpected err in test \"%s\". Expected: %s but got: %s", test.name, test.expectedError, err)
}
if err == nil && !utxoDiff.equal(expectedUTXODiff) {
if err == nil && !utxoDiff.equal(test.expectedUTXODiff) {
t.Fatalf("utxoDiff.AddEntry: unexpected utxoDiff in test \"%s\". "+
"Expected: %v, got: %v", test.name, expectedUTXODiff, utxoDiff)
"Expected: %v, got: %v", test.name, test.expectedUTXODiff, utxoDiff)
}
}
}

View File

@@ -435,7 +435,7 @@ func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags Beha
// the duration of time that should be waited before the block becomes valid.
// This check needs to be last as it does not return an error but rather marks the
// header as delayed (and valid).
maxTimestamp := dag.AdjustedTime().Add(time.Second *
maxTimestamp := dag.Now().Add(time.Second *
time.Duration(int64(dag.TimestampDeviationTolerance)*dag.targetTimePerBlock))
if header.Timestamp.After(maxTimestamp) {
return header.Timestamp.Sub(maxTimestamp), nil
@@ -545,6 +545,20 @@ func (dag *BlockDAG) checkBlockSanity(block *util.Block, flags BehaviorFlags) (t
existingTxIDs[*id] = struct{}{}
}
// Check for double spends with transactions on the same block.
usedOutpoints := make(map[wire.Outpoint]*daghash.TxID)
for _, tx := range transactions {
for _, txIn := range tx.MsgTx().TxIn {
if spendingTxID, exists := usedOutpoints[txIn.PreviousOutpoint]; exists {
str := fmt.Sprintf("transaction %s spends "+
"outpoint %s that was already spent by "+
"transaction %s in this block", tx.ID(), txIn.PreviousOutpoint, spendingTxID)
return 0, ruleError(ErrDoubleSpendInSameBlock, str)
}
usedOutpoints[txIn.PreviousOutpoint] = tx.ID()
}
}
return delay, nil
}
@@ -838,6 +852,11 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
return nil, err
}
err = checkDoubleSpendsWithBlockPast(pastUTXO, transactions)
if err != nil {
return nil, err
}
if err := validateBlockMass(pastUTXO, transactions); err != nil {
return nil, err
}
@@ -913,7 +932,7 @@ func (dag *BlockDAG) checkConnectToPastUTXO(block *blockNode, pastUTXO UTXOSet,
// Now that the inexpensive checks are done and have passed, verify the
// transactions are actually allowed to spend the coins by running the
// expensive ECDSA signature check scripts. Doing this last helps
// expensive SCHNORR signature check scripts. Doing this last helps
// prevent CPU exhaustion attacks.
err := checkBlockScripts(block, pastUTXO, transactions, scriptFlags, dag.sigCache)
if err != nil {

View File

@@ -5,12 +5,13 @@
package blockdag
import (
"github.com/pkg/errors"
"math"
"path/filepath"
"testing"
"time"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
@@ -69,7 +70,7 @@ func TestSequenceLocksActive(t *testing.T) {
// ensure it fails.
func TestCheckConnectBlockTemplate(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", Config{
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -161,7 +162,7 @@ func TestCheckConnectBlockTemplate(t *testing.T) {
// as expected.
func TestCheckBlockSanity(t *testing.T) {
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", Config{
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -169,6 +170,7 @@ func TestCheckBlockSanity(t *testing.T) {
return
}
defer teardownFunc()
dag.timeSource = newFakeTimeSource(time.Now())
block := util.NewBlock(&Block100000)
if len(block.Transactions()) < 3 {
@@ -191,7 +193,8 @@ func TestCheckBlockSanity(t *testing.T) {
if !errors.As(err, &ruleErr) {
t.Errorf("CheckBlockSanity: wrong error returned, expect RuleError, got %T", err)
} else if ruleErr.ErrorCode != ErrTransactionsNotSorted {
t.Errorf("CheckBlockSanity: wrong error returned, expect ErrTransactionsNotSorted, got %v, err %s", ruleErr.ErrorCode, err)
t.Errorf("CheckBlockSanity: wrong error returned, expect ErrTransactionsNotSorted, got"+
" %v, err %s", ruleErr.ErrorCode, err)
}
if delay != 0 {
t.Errorf("CheckBlockSanity: unexpected return %s delay", delay)
@@ -492,8 +495,8 @@ func TestCheckBlockSanity(t *testing.T) {
blockInTheFuture := Block100000
expectedDelay := 10 * time.Second
now := time.Unix(time.Now().Unix(), 0)
blockInTheFuture.Header.Timestamp = now.Add(time.Duration(dag.TimestampDeviationTolerance)*time.Second + expectedDelay)
deviationTolerance := time.Duration(dag.TimestampDeviationTolerance*uint64(dag.targetTimePerBlock)) * time.Second
blockInTheFuture.Header.Timestamp = dag.Now().Add(deviationTolerance + expectedDelay)
delay, err = dag.checkBlockSanity(util.NewBlock(&blockInTheFuture), BFNoPoWCheck)
if err != nil {
t.Errorf("CheckBlockSanity: %v", err)
@@ -559,7 +562,7 @@ func TestPastMedianTime(t *testing.T) {
func TestValidateParents(t *testing.T) {
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", Config{
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -568,9 +571,9 @@ func TestValidateParents(t *testing.T) {
}
defer teardownFunc()
a := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
b := prepareAndProcessBlock(t, dag, a)
c := prepareAndProcessBlock(t, dag, dag.dagParams.GenesisBlock)
a := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
b := prepareAndProcessBlockByParentMsgBlocks(t, dag, a)
c := prepareAndProcessBlockByParentMsgBlocks(t, dag, dag.dagParams.GenesisBlock)
aNode := nodeByMsgBlock(t, dag, a)
bNode := nodeByMsgBlock(t, dag, b)

View File

@@ -35,7 +35,7 @@ func TestVirtualBlock(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestVirtualBlock", Config{
dag, teardownFunc, err := DAGSetup("TestVirtualBlock", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -97,7 +97,7 @@ func TestVirtualBlock(t *testing.T) {
tipsToSet: []*blockNode{},
tipsToAdd: []*blockNode{node0, node1, node2, node3, node4, node5, node6},
expectedTips: blockSetFromSlice(node2, node5, node6),
expectedSelectedParent: node5,
expectedSelectedParent: node6,
},
}
@@ -134,7 +134,7 @@ func TestSelectedPath(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestSelectedPath", Config{
dag, teardownFunc, err := DAGSetup("TestSelectedPath", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -222,7 +222,7 @@ func TestChainUpdates(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestChainUpdates", Config{
dag, teardownFunc, err := DAGSetup("TestChainUpdates", true, Config{
DAGParams: &params,
})
if err != nil {

View File

@@ -5,12 +5,9 @@
package main
import (
"github.com/pkg/errors"
"os"
"path/filepath"
"runtime"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/limits"
"github.com/kaspanet/kaspad/logs"
"github.com/kaspanet/kaspad/util/panics"
@@ -27,39 +24,6 @@ var (
spawn func(func())
)
// loadBlockDB opens the block database and returns a handle to it.
func loadBlockDB() (database.DB, error) {
// The database name is based on the database type.
dbName := blockDBNamePrefix + "_" + cfg.DBType
dbPath := filepath.Join(cfg.DataDir, dbName)
log.Infof("Loading block database from '%s'", dbPath)
db, err := database.Open(cfg.DBType, dbPath, ActiveConfig().NetParams().Net)
if err != nil {
// Return the error if it's not because the database doesn't
// exist.
var dbErr database.Error
if ok := errors.As(err, &dbErr); !ok || dbErr.ErrorCode !=
database.ErrDbDoesNotExist {
return nil, err
}
// Create the db if it does not exist.
err = os.MkdirAll(cfg.DataDir, 0700)
if err != nil {
return nil, err
}
db, err = database.Create(cfg.DBType, dbPath, ActiveConfig().NetParams().Net)
if err != nil {
return nil, err
}
}
log.Info("Block database loaded")
return db, nil
}
// realMain is the real main function for the utility. It is necessary to work
// around the fact that deferred functions do not run when os.Exit() is called.
func realMain() error {
@@ -76,14 +40,6 @@ func realMain() error {
log = backendLogger.Logger("MAIN")
spawn = panics.GoroutineWrapperFunc(log)
// Load the block database.
db, err := loadBlockDB()
if err != nil {
log.Errorf("Failed to load database: %s", err)
return err
}
defer db.Close()
fi, err := os.Open(cfg.InFile)
if err != nil {
log.Errorf("Failed to open file %s: %s", cfg.InFile, err)
@@ -94,7 +50,7 @@ func realMain() error {
// Create a block importer for the database and input file and start it.
// The done channel returned from start will contain an error if
// anything went wrong.
importer, err := newBlockImporter(db, fi)
importer, err := newBlockImporter(fi)
if err != nil {
log.Errorf("Failed create block importer: %s", err)
return err

View File

@@ -6,20 +6,15 @@ package main
import (
"fmt"
flags "github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"os"
"path/filepath"
"strings"
flags "github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
)
const (
defaultDBType = "ffldb"
defaultDataFile = "bootstrap.dat"
defaultProgress = 10
)
@@ -27,7 +22,6 @@ const (
var (
kaspadHomeDir = util.AppDataDir("kaspad", false)
defaultDataDir = filepath.Join(kaspadHomeDir, "data")
knownDbTypes = database.SupportedDrivers()
activeConfig *ConfigFlags
)
@@ -41,7 +35,6 @@ func ActiveConfig() *ConfigFlags {
// See loadConfig for details on the configuration load process.
type ConfigFlags struct {
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
DBType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainFromBlock RPC available"`
@@ -58,23 +51,11 @@ func fileExists(name string) bool {
return true
}
// validDbType returns whether or not dbType is a supported database type.
func validDbType(dbType string) bool {
for _, knownType := range knownDbTypes {
if dbType == knownType {
return true
}
}
return false
}
// loadConfig initializes and parses the config using command line options.
func loadConfig() (*ConfigFlags, []string, error) {
// Default config.
activeConfig = &ConfigFlags{
DataDir: defaultDataDir,
DBType: defaultDBType,
InFile: defaultDataFile,
Progress: defaultProgress,
}
@@ -95,16 +76,6 @@ func loadConfig() (*ConfigFlags, []string, error) {
return nil, nil, err
}
// Validate database type.
if !validDbType(activeConfig.DBType) {
str := "%s: The specified database type [%s] is invalid -- " +
"supported types %s"
err := errors.Errorf(str, "loadConfig", activeConfig.DBType, strings.Join(knownDbTypes, ", "))
fmt.Fprintln(os.Stderr, err)
parser.WriteHelp(os.Stderr)
return nil, nil, err
}
// Append the network type to the data directory so it is "namespaced"
// per network. In addition to the block database, there are other
// pieces of data that are saved to disk such as address manager state.

View File

@@ -13,7 +13,6 @@ import (
"time"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
)
@@ -28,7 +27,6 @@ type importResults struct {
// blockImporter houses information about an ongoing import from a block data
// file to the block database.
type blockImporter struct {
db database.DB
dag *blockdag.BlockDAG
r io.ReadSeeker
processQueue chan []byte
@@ -287,7 +285,7 @@ func (bi *blockImporter) Import() chan *importResults {
// newBlockImporter returns a new importer for the provided file reader seeker
// and database.
func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
func newBlockImporter(r io.ReadSeeker) (*blockImporter, error) {
// Create the acceptance index if needed.
var indexes []indexers.Indexer
if cfg.AcceptanceIndex {
@@ -302,9 +300,8 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
}
dag, err := blockdag.New(&blockdag.Config{
DB: db,
DAGParams: ActiveConfig().NetParams(),
TimeSource: blockdag.NewMedianTime(),
TimeSource: blockdag.NewTimeSource(),
IndexManager: indexManager,
})
if err != nil {
@@ -312,7 +309,6 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
}
return &blockImporter{
db: db,
r: r,
processQueue: make(chan []byte, 2),
doneChan: make(chan bool),

View File

@@ -2,11 +2,13 @@ package main
import (
"fmt"
"github.com/kaspanet/kaspad/config"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
@@ -28,15 +30,17 @@ var (
)
type configFlags struct {
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."`
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."`
MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
config.NetworkFlags
}
@@ -72,12 +76,19 @@ func parseConfig() (*configFlags, error) {
}
if cfg.RPCCert == "" && !cfg.DisableTLS {
return nil, errors.New("--notls has to be disabled if --cert is used")
return nil, errors.New("either --notls or --rpccert must be specified")
}
if cfg.RPCCert != "" && cfg.DisableTLS {
return nil, errors.New("--rpccert should be omitted if --notls is used")
}
if cfg.Profile != "" {
profilePort, err := strconv.Atoi(cfg.Profile)
if err != nil || profilePort < 1024 || profilePort > 65535 {
return nil, errors.New("The profile port must be between 1024 and 65535")
}
}
initLog(defaultLogFile, defaultErrLogFile)
return cfg, nil

View File

@@ -1,5 +1,5 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
FROM golang:1.14-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
@@ -20,7 +20,7 @@ WORKDIR /go/src/github.com/kaspanet/kaspad/cmd/kaspaminer
RUN GOFMT_RESULT=`go fmt ./...`; echo $GOFMT_RESULT; test -z "$GOFMT_RESULT"
RUN go vet ./...
RUN golint -set_exit_status ./...
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o kaspaminer .
RUN GOOS=linux go build -a -installsuffix cgo -o kaspaminer .
# --- multistage docker build: stage #2: runtime image
FROM alpine

View File

@@ -2,13 +2,17 @@ package main
import (
"fmt"
"github.com/kaspanet/kaspad/version"
"os"
"github.com/kaspanet/kaspad/version"
"github.com/pkg/errors"
_ "net/http/pprof"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/util/profiling"
)
func main() {
@@ -28,6 +32,11 @@ func main() {
enableRPCLogging()
}
// Enable http profiling server if requested.
if cfg.Profile != "" {
profiling.Start(cfg.Profile, log)
}
client, err := connectToServer(cfg)
if err != nil {
panic(errors.Wrap(err, "Error connecting to the RPC server"))
@@ -36,7 +45,7 @@ func main() {
doneChan := make(chan struct{})
spawn(func() {
err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay)
err = mineLoop(client, cfg.NumberOfBlocks, cfg.BlockDelay, cfg.MineWhenNotSynced)
if err != nil {
panic(errors.Errorf("Error in mine loop: %s", err))
}

View File

@@ -25,7 +25,7 @@ var hashesTried uint64
const logHashRateInterval = 10 * time.Second
func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) error {
func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64, mineWhenNotSynced bool) error {
errChan := make(chan error)
templateStopChan := make(chan struct{})
@@ -35,7 +35,7 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, blockDelay uint64) err
wg := sync.WaitGroup{}
for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ {
foundBlock := make(chan *util.Block)
mineNextBlock(client, foundBlock, templateStopChan, errChan)
mineNextBlock(client, foundBlock, mineWhenNotSynced, templateStopChan, errChan)
block := <-foundBlock
templateStopChan <- struct{}{}
wg.Add(1)
@@ -80,13 +80,15 @@ func logHashRate() {
})
}
func mineNextBlock(client *minerClient, foundBlock chan *util.Block, templateStopChan chan struct{}, errChan chan error) {
func mineNextBlock(client *minerClient, foundBlock chan *util.Block, mineWhenNotSynced bool,
templateStopChan chan struct{}, errChan chan error) {
newTemplateChan := make(chan *rpcmodel.GetBlockTemplateResult)
spawn(func() {
templatesLoop(client, newTemplateChan, errChan, templateStopChan)
})
spawn(func() {
solveLoop(newTemplateChan, foundBlock, errChan)
solveLoop(newTemplateChan, foundBlock, mineWhenNotSynced, errChan)
})
}
@@ -207,12 +209,23 @@ func getBlockTemplate(client *minerClient, longPollID string) (*rpcmodel.GetBloc
return client.GetBlockTemplate([]string{"coinbasetxn"}, longPollID)
}
func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block, errChan chan error) {
func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block,
mineWhenNotSynced bool, errChan chan error) {
var stopOldTemplateSolving chan struct{}
for template := range newTemplateChan {
if stopOldTemplateSolving != nil {
close(stopOldTemplateSolving)
}
if !template.IsSynced {
if !mineWhenNotSynced {
errChan <- errors.Errorf("got template with isSynced=false")
return
}
log.Warnf("Got template with isSynced=false")
}
stopOldTemplateSolving = make(chan struct{})
block, err := parseBlock(template)
if err != nil {

View File

@@ -4,7 +4,7 @@ import (
"bytes"
"encoding/hex"
"fmt"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
@@ -28,7 +28,11 @@ func main() {
printErrorAndExit(err, "Failed to decode transaction")
}
scriptPubKey, err := createScriptPubKey(privateKey.PubKey())
pubkey, err := privateKey.SchnorrPublicKey()
if err != nil {
printErrorAndExit(err, "Failed to generate a public key")
}
scriptPubKey, err := createScriptPubKey(pubkey)
if err != nil {
printErrorAndExit(err, "Failed to create scriptPubKey")
}
@@ -46,10 +50,12 @@ func main() {
fmt.Printf("Signed Transaction (hex): %s\n\n", serializedTransaction)
}
func parsePrivateKey(privateKeyHex string) (*ecc.PrivateKey, error) {
func parsePrivateKey(privateKeyHex string) (*secp256k1.PrivateKey, error) {
privateKeyBytes, err := hex.DecodeString(privateKeyHex)
privateKey, _ := ecc.PrivKeyFromBytes(ecc.S256(), privateKeyBytes)
return privateKey, err
if err != nil {
return nil, errors.Errorf("'%s' isn't a valid hex. err: '%s' ", privateKeyHex, err)
}
return secp256k1.DeserializePrivateKeyFromSlice(privateKeyBytes)
}
func parseTransaction(transactionHex string) (*wire.MsgTx, error) {
@@ -62,8 +68,12 @@ func parseTransaction(transactionHex string) (*wire.MsgTx, error) {
return &transaction, err
}
func createScriptPubKey(publicKey *ecc.PublicKey) ([]byte, error) {
p2pkhAddress, err := util.NewAddressPubKeyHashFromPublicKey(publicKey.SerializeCompressed(), ActiveConfig().NetParams().Prefix)
func createScriptPubKey(publicKey *secp256k1.SchnorrPublicKey) ([]byte, error) {
serializedKey, err := publicKey.SerializeCompressed()
if err != nil {
return nil, err
}
p2pkhAddress, err := util.NewAddressPubKeyHashFromPublicKey(serializedKey, ActiveConfig().NetParams().Prefix)
if err != nil {
return nil, err
}
@@ -71,7 +81,7 @@ func createScriptPubKey(publicKey *ecc.PublicKey) ([]byte, error) {
return scriptPubKey, err
}
func signTransaction(transaction *wire.MsgTx, privateKey *ecc.PrivateKey, scriptPubKey []byte) error {
func signTransaction(transaction *wire.MsgTx, privateKey *secp256k1.PrivateKey, scriptPubKey []byte) error {
for i, transactionInput := range transaction.TxIn {
signatureScript, err := txscript.SignatureScript(transaction, i, scriptPubKey, txscript.SigHashAll, privateKey, true)
if err != nil {

View File

@@ -22,7 +22,6 @@ import (
"github.com/btcsuite/go-socks/socks"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/network"
@@ -46,7 +45,6 @@ const (
defaultMaxRPCClients = 10
defaultMaxRPCWebsockets = 25
defaultMaxRPCConcurrentReqs = 20
defaultDbType = "ffldb"
defaultBlockMaxMass = 10000000
blockMaxMassMin = 1000
blockMaxMassMax = 10000000
@@ -65,7 +63,6 @@ var (
defaultConfigFile = filepath.Join(DefaultHomeDir, defaultConfigFilename)
defaultDataDir = filepath.Join(DefaultHomeDir, defaultDataDirname)
knownDbTypes = database.SupportedDrivers()
defaultRPCKeyFile = filepath.Join(DefaultHomeDir, "rpc.key")
defaultRPCCertFile = filepath.Join(DefaultHomeDir, "rpc.cert")
defaultLogDir = filepath.Join(DefaultHomeDir, defaultLogDirname)
@@ -168,17 +165,6 @@ func cleanAndExpandPath(path string) string {
return filepath.Clean(os.ExpandEnv(path))
}
// validDbType returns whether or not dbType is a supported database type.
func validDbType(dbType string) bool {
for _, knownType := range knownDbTypes {
if dbType == knownType {
return true
}
}
return false
}
// newConfigParser returns a new command line flags parser.
func newConfigParser(cfgFlags *Flags, so *serviceOptions, options flags.Options) *flags.Parser {
parser := flags.NewParser(cfgFlags, options)
@@ -235,7 +221,6 @@ func loadConfig() (*Config, []string, error) {
RPCMaxConcurrentReqs: defaultMaxRPCConcurrentReqs,
DataDir: defaultDataDir,
LogDir: defaultLogDir,
DbType: defaultDbType,
RPCKey: defaultRPCKeyFile,
RPCCert: defaultRPCCertFile,
BlockMaxMass: defaultBlockMaxMass,
@@ -424,16 +409,6 @@ func loadConfig() (*Config, []string, error) {
return nil, nil, err
}
// Validate database type.
if !validDbType(activeConfig.DbType) {
str := "%s: The specified database type [%s] is invalid -- " +
"supported types %s"
err := errors.Errorf(str, funcName, activeConfig.DbType, knownDbTypes)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Validate profile port number
if activeConfig.Profile != "" {
profilePort, err := strconv.Atoi(activeConfig.Profile)

View File

@@ -7,6 +7,9 @@ package connmgr
import (
nativeerrors "errors"
"fmt"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/wire"
"net"
"sync"
"sync/atomic"
@@ -30,10 +33,6 @@ var (
// defaultRetryDuration is the default duration of time for retrying
// persistent connections.
defaultRetryDuration = time.Second * 5
// defaultTargetOutbound is the default number of outbound connections to
// maintain.
defaultTargetOutbound = uint32(8)
)
var (
@@ -54,6 +53,9 @@ var (
// ErrPeerNotFound is an error that is thrown if the peer was not found.
ErrPeerNotFound = errors.New("peer not found")
//ErrAddressManagerNil is used to indicate that Address Manager cannot be nil in the configuration.
ErrAddressManagerNil = errors.New("Config: Address manager cannot be nil")
)
// ConnState represents the state of the requested connection.
@@ -77,7 +79,7 @@ type ConnReq struct {
// The following variables must only be used atomically.
id uint64
Addr net.Addr
Addr *net.TCPAddr
Permanent bool
conn net.Conn
@@ -151,13 +153,15 @@ type Config struct {
// connection is established.
OnConnection func(*ConnReq, net.Conn)
// OnConnectionFailed is a callback that is fired when a new outbound
// connection has failed to be established.
OnConnectionFailed func(*ConnReq)
// OnDisconnection is a callback that is fired when an outbound
// connection is disconnected.
OnDisconnection func(*ConnReq)
// GetNewAddress is a way to get an address to make a network connection
// to. If nil, no new connections will be made automatically.
GetNewAddress func() (net.Addr, error)
AddrManager *addrmgr.AddrManager
// Dial connects to the address on the named network. It cannot be nil.
Dial func(net.Addr) (net.Conn, error)
@@ -197,7 +201,9 @@ type ConnManager struct {
start int32
stop int32
newConnReqMtx sync.Mutex
addressMtx sync.Mutex
usedOutboundGroups map[string]int64
usedAddresses map[string]struct{}
cfg Config
wg sync.WaitGroup
@@ -233,9 +239,12 @@ func (cm *ConnManager) handleFailedConn(c *ConnReq, err error) {
log.Debugf("Retrying further connections to %s every %s", c, d)
}
spawnAfter(d, func() {
cm.Connect(c)
cm.connect(c)
})
} else if cm.cfg.GetNewAddress != nil {
} else {
if c.Addr != nil {
cm.releaseAddress(c.Addr)
}
cm.failedAttempts++
if cm.failedAttempts >= maxFailedAttempts {
if shouldWriteLog {
@@ -250,6 +259,43 @@ func (cm *ConnManager) handleFailedConn(c *ConnReq, err error) {
}
}
func (cm *ConnManager) releaseAddress(addr *net.TCPAddr) {
cm.addressMtx.Lock()
defer cm.addressMtx.Unlock()
groupKey := usedOutboundGroupsKey(addr)
cm.usedOutboundGroups[groupKey]--
if cm.usedOutboundGroups[groupKey] < 0 {
panic(fmt.Errorf("cm.usedOutboundGroups[%s] has a negative value of %d. This should never happen", groupKey, cm.usedOutboundGroups[groupKey]))
}
delete(cm.usedAddresses, usedAddressesKey(addr))
}
func (cm *ConnManager) markAddressAsUsed(addr *net.TCPAddr) {
cm.usedOutboundGroups[usedOutboundGroupsKey(addr)]++
cm.usedAddresses[usedAddressesKey(addr)] = struct{}{}
}
func (cm *ConnManager) isOutboundGroupUsed(addr *net.TCPAddr) bool {
_, ok := cm.usedOutboundGroups[usedOutboundGroupsKey(addr)]
return ok
}
func (cm *ConnManager) isAddressUsed(addr *net.TCPAddr) bool {
_, ok := cm.usedAddresses[usedAddressesKey(addr)]
return ok
}
func usedOutboundGroupsKey(addr *net.TCPAddr) string {
// A fake service flag is used since it doesn't affect the group key.
na := wire.NewNetAddress(addr, wire.SFNodeNetwork)
return addrmgr.GroupKey(na)
}
func usedAddressesKey(addr *net.TCPAddr) string {
return addr.String()
}
// throttledError defines an error type whose logs get throttled. This is to
// prevent flooding the logs with identical errors.
type throttledError error
@@ -388,21 +434,16 @@ out:
continue
}
// Otherwise, we will attempt a reconnection if
// we do not have enough peers, or if this is a
// persistent peer. The connection request is
// re added to the pending map, so that
// subsequent processing of connections and
// failures do not ignore the request.
if uint32(len(conns)) < cm.cfg.TargetOutbound ||
connReq.Permanent {
connReq.updateState(ConnPending)
log.Debugf("Reconnecting to %s",
connReq)
pending[msg.id] = connReq
cm.handleFailedConn(connReq, nil)
}
// Otherwise, we will attempt a reconnection.
// The connection request is re added to the
// pending map, so that subsequent processing
// of connections and failures do not ignore
// the request.
connReq.updateState(ConnPending)
log.Debugf("Reconnecting to %s",
connReq)
pending[msg.id] = connReq
cm.handleFailedConn(connReq, nil)
case handleFailed:
connReq := msg.c
@@ -419,6 +460,10 @@ out:
connReq, msg.err)
}
cm.handleFailedConn(connReq, msg.err)
if cm.cfg.OnConnectionFailed != nil {
cm.cfg.OnConnectionFailed(connReq)
}
}
case <-cm.quit:
@@ -440,14 +485,9 @@ func (cm *ConnManager) NotifyConnectionRequestComplete() {
// NewConnReq creates a new connection request and connects to the
// corresponding address.
func (cm *ConnManager) NewConnReq() {
cm.newConnReqMtx.Lock()
defer cm.newConnReqMtx.Unlock()
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
if cm.cfg.GetNewAddress == nil {
return
}
c := &ConnReq{}
atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1))
@@ -470,8 +510,7 @@ func (cm *ConnManager) NewConnReq() {
case <-cm.quit:
return
}
addr, err := cm.cfg.GetNewAddress()
err := cm.associateAddressToConnReq(c)
if err != nil {
select {
case cm.requests <- handleFailed{c, err}:
@@ -480,17 +519,52 @@ func (cm *ConnManager) NewConnReq() {
return
}
c.Addr = addr
cm.connect(c)
}
cm.Connect(c)
func (cm *ConnManager) associateAddressToConnReq(c *ConnReq) error {
cm.addressMtx.Lock()
defer cm.addressMtx.Unlock()
addr, err := cm.getNewAddress()
if err != nil {
return err
}
cm.markAddressAsUsed(addr)
c.Addr = addr
return nil
}
// Connect assigns an id and dials a connection to the address of the
// connection request.
func (cm *ConnManager) Connect(c *ConnReq) {
func (cm *ConnManager) Connect(c *ConnReq) error {
err := func() error {
cm.addressMtx.Lock()
defer cm.addressMtx.Unlock()
if cm.isAddressUsed(c.Addr) {
return fmt.Errorf("address %s is already in use", c.Addr)
}
cm.markAddressAsUsed(c.Addr)
return nil
}()
if err != nil {
return err
}
cm.connect(c)
return nil
}
// connect assigns an id and dials a connection to the address of the
// connection request. This function assumes that the connection address
// has checked and already marked as used.
func (cm *ConnManager) connect(c *ConnReq) {
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
if atomic.LoadUint64(&c.id) == 0 {
atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1))
@@ -637,23 +711,69 @@ func (cm *ConnManager) Stop() {
log.Trace("Connection manager stopped")
}
func (cm *ConnManager) getNewAddress() (*net.TCPAddr, error) {
for tries := 0; tries < 100; tries++ {
addr := cm.cfg.AddrManager.GetAddress()
if addr == nil {
break
}
// Check if there's already a connection to the same address.
netAddr := addr.NetAddress().TCPAddress()
if cm.isAddressUsed(netAddr) {
continue
}
// Address will not be invalid, local or unroutable
// because addrmanager rejects those on addition.
// Just check that we don't already have an address
// in the same group so that we are not connecting
// to the same network segment at the expense of
// others.
//
// Networks that accept unroutable connections are exempt
// from this rule, since they're meant to run within a
// private subnet, like 10.0.0.0/16.
if !config.ActiveConfig().NetParams().AcceptUnroutable && cm.isOutboundGroupUsed(netAddr) {
continue
}
// only allow recent nodes (10mins) after we failed 30
// times
if tries < 30 && time.Since(addr.LastAttempt()) < 10*time.Minute {
continue
}
// allow nondefault ports after 50 failed tries.
if tries < 50 && fmt.Sprintf("%d", netAddr.Port) !=
config.ActiveConfig().NetParams().DefaultPort {
continue
}
return netAddr, nil
}
return nil, ErrNoAddress
}
// New returns a new connection manager.
// Use Start to start connecting to the network.
func New(cfg *Config) (*ConnManager, error) {
if cfg.Dial == nil {
return nil, ErrDialNil
return nil, errors.WithStack(ErrDialNil)
}
if cfg.AddrManager == nil {
return nil, errors.WithStack(ErrAddressManagerNil)
}
// Default to sane values
if cfg.RetryDuration <= 0 {
cfg.RetryDuration = defaultRetryDuration
}
if cfg.TargetOutbound == 0 {
cfg.TargetOutbound = defaultTargetOutbound
}
cm := ConnManager{
cfg: *cfg, // Copy so caller can't mutate
requests: make(chan interface{}),
quit: make(chan struct{}),
cfg: *cfg, // Copy so caller can't mutate
requests: make(chan interface{}),
quit: make(chan struct{}),
usedAddresses: make(map[string]struct{}),
usedOutboundGroups: make(map[string]int64),
}
return &cm, nil
}

View File

@@ -5,9 +5,15 @@
package connmgr
import (
"fmt"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/pkg/errors"
"io"
"io/ioutil"
"net"
"os"
"sync/atomic"
"testing"
"time"
@@ -70,13 +76,28 @@ func mockDialer(addr net.Addr) (net.Conn, error) {
// TestNewConfig tests that new ConnManager config is validated as expected.
func TestNewConfig(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
_, err := New(&Config{})
if err == nil {
t.Fatalf("New expected error: 'Dial can't be nil', got nil")
if !errors.Is(err, ErrDialNil) {
t.Fatalf("New expected error: %s, got %s", ErrDialNil, err)
}
_, err = New(&Config{
Dial: mockDialer,
})
if !errors.Is(err, ErrAddressManagerNil) {
t.Fatalf("New expected error: %s, got %s", ErrAddressManagerNil, err)
}
amgr, teardown := addressManagerForTest(t, "TestNewConfig", 10)
defer teardown()
_, err = New(&Config{
Dial: mockDialer,
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New unexpected error: %v", err)
}
@@ -85,17 +106,19 @@ func TestNewConfig(t *testing.T) {
// TestStartStop tests that the connection manager starts and stops as
// expected.
func TestStartStop(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
connected := make(chan *ConnReq)
disconnected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestStartStop", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: 1,
GetNewAddress: func() (net.Addr, error) {
return &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
}, nil
},
Dial: mockDialer,
AddrManager: amgr,
Dial: mockDialer,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
@@ -104,7 +127,7 @@ func TestStartStop(t *testing.T) {
},
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
gotConnReq := <-connected
@@ -119,7 +142,10 @@ func TestStartStop(t *testing.T) {
},
Permanent: true,
}
cmgr.Connect(cr)
err = cmgr.Connect(cr)
if err != nil {
t.Fatalf("Connect error: %s", err)
}
if cr.ID() != 0 {
t.Fatalf("start/stop: got id: %v, want: 0", cr.ID())
}
@@ -133,21 +159,78 @@ func TestStartStop(t *testing.T) {
}
}
func overrideActiveConfig() func() {
originalActiveCfg := config.ActiveConfig()
config.SetActiveConfig(&config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimnetParams},
},
})
return func() {
// Give some extra time to all open NewConnReq goroutines
// to finish before restoring the active config to prevent
// potential panics.
time.Sleep(10 * time.Millisecond)
config.SetActiveConfig(originalActiveCfg)
}
}
func addressManagerForTest(t *testing.T, testName string, numAddresses uint8) (*addrmgr.AddrManager, func()) {
amgr, teardown := createEmptyAddressManagerForTest(t, testName)
for i := uint8(0); i < numAddresses; i++ {
ip := fmt.Sprintf("173.%d.115.66:16511", i)
err := amgr.AddAddressByIP(ip, nil)
if err != nil {
t.Fatalf("AddAddressByIP unexpectedly failed to add IP %s: %s", ip, err)
}
}
return amgr, teardown
}
func createEmptyAddressManagerForTest(t *testing.T, testName string) (*addrmgr.AddrManager, func()) {
path, err := ioutil.TempDir("", fmt.Sprintf("%s-addressmanager", testName))
if err != nil {
t.Fatalf("createEmptyAddressManagerForTest: TempDir unexpectedly "+
"failed: %s", err)
}
return addrmgr.New(path, nil, nil), func() {
// Wait for the connection manager to finish
time.Sleep(10 * time.Millisecond)
err := os.RemoveAll(path)
if err != nil {
t.Fatalf("couldn't remove path %s", path)
}
}
}
// TestConnectMode tests that the connection manager works in the connect mode.
//
// In connect mode, automatic connections are disabled, so we test that
// requests using Connect are handled and that no other connections are made.
func TestConnectMode(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
connected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestConnectMode", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: 2,
TargetOutbound: 0,
Dial: mockDialer,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cr := &ConnReq{
Addr: &net.TCPAddr{
@@ -176,6 +259,7 @@ func TestConnectMode(t *testing.T) {
break
}
cmgr.Stop()
cmgr.Wait()
}
// TestTargetOutbound tests the target number of outbound connections.
@@ -183,23 +267,26 @@ func TestConnectMode(t *testing.T) {
// We wait until all connections are established, then test they there are the
// only connections made.
func TestTargetOutbound(t *testing.T) {
targetOutbound := uint32(10)
restoreConfig := overrideActiveConfig()
defer restoreConfig()
const numAddressesInAddressManager = 10
targetOutbound := uint32(numAddressesInAddressManager - 2)
connected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestTargetOutbound", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: targetOutbound,
Dial: mockDialer,
GetNewAddress: func() (net.Addr, error) {
return &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
}, nil
},
AddrManager: amgr,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
for i := uint32(0); i < targetOutbound; i++ {
@@ -213,6 +300,146 @@ func TestTargetOutbound(t *testing.T) {
break
}
cmgr.Stop()
cmgr.Wait()
}
// TestDuplicateOutboundConnections tests that connection requests cannot use an already used address.
// It checks it by creating one connection request for each address in the address manager, so that
// the next connection request will have to fail because no unused address will be available.
func TestDuplicateOutboundConnections(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
const numAddressesInAddressManager = 10
targetOutbound := uint32(numAddressesInAddressManager - 1)
connected := make(chan struct{})
failedConnections := make(chan struct{})
amgr, teardown := addressManagerForTest(t, "TestDuplicateOutboundConnections", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: targetOutbound,
Dial: mockDialer,
AddrManager: amgr,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- struct{}{}
},
OnConnectionFailed: func(_ *ConnReq) {
failedConnections <- struct{}{}
},
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
for i := uint32(0); i < targetOutbound; i++ {
<-connected
}
time.Sleep(time.Millisecond)
// Here we check that making a manual connection request beyond the target outbound connection
// doesn't fail, so we can know that the reason such connection request will fail is an address
// related issue.
cmgr.NewConnReq()
select {
case <-connected:
break
case <-time.After(time.Millisecond):
t.Fatalf("connection request unexpectedly didn't connect")
}
select {
case <-failedConnections:
t.Fatalf("a connection request unexpectedly failed")
case <-time.After(time.Millisecond):
break
}
// After we created numAddressesInAddressManager connection requests, this request should fail
// because there aren't any more available addresses.
cmgr.NewConnReq()
select {
case <-connected:
t.Fatalf("connection request unexpectedly succeeded")
case <-time.After(time.Millisecond):
t.Fatalf("connection request didn't fail as expected")
case <-failedConnections:
break
}
cmgr.Stop()
cmgr.Wait()
}
// TestSameOutboundGroupConnections tests that connection requests cannot use an address with an already used
// address CIDR group.
// It checks it by creating an address manager with only two addresses, that both belong to the same CIDR group
// and checks that the second connection request fails.
func TestSameOutboundGroupConnections(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
amgr, teardown := createEmptyAddressManagerForTest(t, "TestSameOutboundGroupConnections")
defer teardown()
err := amgr.AddAddressByIP("173.190.115.66:16511", nil)
if err != nil {
t.Fatalf("AddAddressByIP unexpectedly failed: %s", err)
}
err = amgr.AddAddressByIP("173.190.115.67:16511", nil)
if err != nil {
t.Fatalf("AddAddressByIP unexpectedly failed: %s", err)
}
connected := make(chan struct{})
failedConnections := make(chan struct{})
cmgr, err := New(&Config{
TargetOutbound: 0,
Dial: mockDialer,
AddrManager: amgr,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- struct{}{}
},
OnConnectionFailed: func(_ *ConnReq) {
failedConnections <- struct{}{}
},
})
if err != nil {
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
cmgr.NewConnReq()
select {
case <-connected:
break
case <-time.After(time.Millisecond):
t.Fatalf("connection request unexpectedly didn't connect")
}
select {
case <-failedConnections:
t.Fatalf("a connection request unexpectedly failed")
case <-time.After(time.Millisecond):
break
}
cmgr.NewConnReq()
select {
case <-connected:
t.Fatalf("connection request unexpectedly succeeded")
case <-time.After(time.Millisecond):
t.Fatalf("connection request didn't fail as expected")
case <-failedConnections:
break
}
cmgr.Stop()
cmgr.Wait()
}
// TestRetryPermanent tests that permanent connection requests are retried.
@@ -220,11 +447,18 @@ func TestTargetOutbound(t *testing.T) {
// We make a permanent connection request using Connect, disconnect it using
// Disconnect and we wait for it to be connected back.
func TestRetryPermanent(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
connected := make(chan *ConnReq)
disconnected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestRetryPermanent", 10)
defer teardown()
cmgr, err := New(&Config{
RetryDuration: time.Millisecond,
TargetOutbound: 1,
TargetOutbound: 0,
Dial: mockDialer,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
@@ -232,9 +466,10 @@ func TestRetryPermanent(t *testing.T) {
OnDisconnection: func(c *ConnReq) {
disconnected <- c
},
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cr := &ConnReq{
@@ -289,6 +524,9 @@ func TestRetryPermanent(t *testing.T) {
cmgr.Remove(cr.ID())
gotConnReq = <-disconnected
// Wait for status to be updated
time.Sleep(10 * time.Millisecond)
wantID = cr.ID()
gotID = gotConnReq.ID()
if gotID != wantID {
@@ -300,6 +538,7 @@ func TestRetryPermanent(t *testing.T) {
t.Fatalf("retry: %v - want state %v, got state %v", cr.Addr, wantState, gotState)
}
cmgr.Stop()
cmgr.Wait()
}
// TestMaxRetryDuration tests the maximum retry duration.
@@ -307,6 +546,9 @@ func TestRetryPermanent(t *testing.T) {
// We have a timed dialer which initially returns err but after RetryDuration
// hits maxRetryDuration returns a mock conn.
func TestMaxRetryDuration(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
networkUp := make(chan struct{})
time.AfterFunc(5*time.Millisecond, func() {
close(networkUp)
@@ -320,6 +562,9 @@ func TestMaxRetryDuration(t *testing.T) {
}
}
amgr, teardown := addressManagerForTest(t, "TestMaxRetryDuration", 10)
defer teardown()
connected := make(chan *ConnReq)
cmgr, err := New(&Config{
RetryDuration: time.Millisecond,
@@ -328,9 +573,10 @@ func TestMaxRetryDuration(t *testing.T) {
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cr := &ConnReq{
@@ -350,35 +596,40 @@ func TestMaxRetryDuration(t *testing.T) {
case <-time.Tick(100 * time.Millisecond):
t.Fatalf("max retry duration: connection timeout")
}
cmgr.Stop()
cmgr.Wait()
}
// TestNetworkFailure tests that the connection manager handles a network
// failure gracefully.
func TestNetworkFailure(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
var dials uint32
errDialer := func(net net.Addr) (net.Conn, error) {
atomic.AddUint32(&dials, 1)
return nil, errors.New("network down")
}
amgr, teardown := addressManagerForTest(t, "TestNetworkFailure", 10)
defer teardown()
cmgr, err := New(&Config{
TargetOutbound: 5,
RetryDuration: 5 * time.Millisecond,
Dial: errDialer,
GetNewAddress: func() (net.Addr, error) {
return &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 18555,
}, nil
},
AddrManager: amgr,
OnConnection: func(c *ConnReq, conn net.Conn) {
t.Fatalf("network failure: got unexpected connection - %v", c.Addr)
},
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
time.AfterFunc(10*time.Millisecond, cmgr.Stop)
time.Sleep(10 * time.Millisecond)
cmgr.Stop()
cmgr.Wait()
wantMaxDials := uint32(75)
if atomic.LoadUint32(&dials) > wantMaxDials {
@@ -394,17 +645,25 @@ func TestNetworkFailure(t *testing.T) {
// err so that the handler assumes that the conn manager is stopped and ignores
// the failure.
func TestStopFailed(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
done := make(chan struct{}, 1)
waitDialer := func(addr net.Addr) (net.Conn, error) {
done <- struct{}{}
time.Sleep(time.Millisecond)
return nil, errors.New("network down")
}
amgr, teardown := addressManagerForTest(t, "TestStopFailed", 10)
defer teardown()
cmgr, err := New(&Config{
Dial: waitDialer,
Dial: waitDialer,
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
go func() {
@@ -428,6 +687,9 @@ func TestStopFailed(t *testing.T) {
// TestRemovePendingConnection tests that it's possible to cancel a pending
// connection, removing its internal state from the ConnMgr.
func TestRemovePendingConnection(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
// Create a ConnMgr instance with an instance of a dialer that'll never
// succeed.
wait := make(chan struct{})
@@ -435,11 +697,16 @@ func TestRemovePendingConnection(t *testing.T) {
<-wait
return nil, errors.Errorf("error")
}
amgr, teardown := addressManagerForTest(t, "TestRemovePendingConnection", 10)
defer teardown()
cmgr, err := New(&Config{
Dial: indefiniteDialer,
Dial: indefiniteDialer,
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
@@ -474,12 +741,16 @@ func TestRemovePendingConnection(t *testing.T) {
close(wait)
cmgr.Stop()
cmgr.Wait()
}
// TestCancelIgnoreDelayedConnection tests that a canceled connection request will
// not execute the on connection callback, even if an outstanding retry
// succeeds.
func TestCancelIgnoreDelayedConnection(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
retryTimeout := 10 * time.Millisecond
// Setup a dialer that will continue to return an error until the
@@ -497,18 +768,22 @@ func TestCancelIgnoreDelayedConnection(t *testing.T) {
}
connected := make(chan *ConnReq)
amgr, teardown := addressManagerForTest(t, "TestCancelIgnoreDelayedConnection", 10)
defer teardown()
cmgr, err := New(&Config{
Dial: failingDialer,
RetryDuration: retryTimeout,
OnConnection: func(c *ConnReq, conn net.Conn) {
connected <- c
},
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()
defer cmgr.Stop()
// Establish a connection request to a random IP we've chosen.
cr := &ConnReq{
@@ -552,7 +827,8 @@ func TestCancelIgnoreDelayedConnection(t *testing.T) {
t.Fatalf("on-connect should not be called for canceled req")
case <-time.After(5 * retryTimeout):
}
cmgr.Stop()
cmgr.Wait()
}
// mockListener implements the net.Listener interface and is used to test
@@ -617,21 +893,29 @@ func newMockListener(localAddr string) *mockListener {
// TestListeners ensures providing listeners to the connection manager along
// with an accept callback works properly.
func TestListeners(t *testing.T) {
restoreConfig := overrideActiveConfig()
defer restoreConfig()
// Setup a connection manager with a couple of mock listeners that
// notify a channel when they receive mock connections.
receivedConns := make(chan net.Conn)
listener1 := newMockListener("127.0.0.1:16111")
listener2 := newMockListener("127.0.0.1:9333")
listeners := []net.Listener{listener1, listener2}
amgr, teardown := addressManagerForTest(t, "TestListeners", 10)
defer teardown()
cmgr, err := New(&Config{
Listeners: listeners,
OnAccept: func(conn net.Conn) {
receivedConns <- conn
},
Dial: mockDialer,
Dial: mockDialer,
AddrManager: amgr,
})
if err != nil {
t.Fatalf("New error: %v", err)
t.Fatalf("unexpected error from New: %s", err)
}
cmgr.Start()

View File

@@ -13,7 +13,6 @@ import (
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/hdkeychain"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
@@ -49,7 +48,7 @@ var (
)
const (
ghostdagK = 10
ghostdagK = 15
difficultyAdjustmentWindowSize = 2640
timestampDeviationTolerance = 132
finalityDuration = 24 * time.Hour
@@ -177,13 +176,6 @@ type Params struct {
// Address encoding magics
PrivateKeyID byte // First byte of a WIF private key
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair hdkeychain.HDKeyIDPair
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType uint32
}
// NormalizeRPCServerAddress returns addr with the current network default
@@ -238,13 +230,6 @@ var MainnetParams = Params{
// Address encoding magics
PrivateKeyID: 0x80, // starts with 5 (uncompressed) or K (compressed)
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair: hdkeychain.HDKeyPairMainnet,
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 0,
}
// RegressionNetParams defines the network parameters for the regression test
@@ -295,13 +280,6 @@ var RegressionNetParams = Params{
// Address encoding magics
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair: hdkeychain.HDKeyPairRegressionNet,
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 1,
}
// TestnetParams defines the network parameters for the test Kaspa network.
@@ -350,13 +328,6 @@ var TestnetParams = Params{
// Address encoding magics
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair: hdkeychain.HDKeyPairTestnet,
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 1,
}
// SimnetParams defines the network parameters for the simulation test Kaspa
@@ -409,13 +380,6 @@ var SimnetParams = Params{
PrivateKeyID: 0x64, // starts with 4 (uncompressed) or F (compressed)
// Human-readable part for Bech32 encoded addresses
Prefix: util.Bech32PrefixKaspaSim,
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair: hdkeychain.HDKeyPairSimnet,
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 115, // ASCII for s
}
// DevnetParams defines the network parameters for the development Kaspa network.
@@ -464,13 +428,6 @@ var DevnetParams = Params{
// Address encoding magics
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair: hdkeychain.HDKeyPairDevnet,
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 1,
}
var (

View File

@@ -1,9 +1,6 @@
package dagconfig_test
import (
"bytes"
"github.com/kaspanet/kaspad/util/hdkeychain"
"reflect"
"testing"
. "github.com/kaspanet/kaspad/dagconfig"
@@ -15,10 +12,6 @@ import (
var mockNetParams = Params{
Name: "mocknet",
Net: 1<<32 - 1,
HDKeyIDPair: hdkeychain.HDKeyIDPair{
PrivateKeyID: [4]byte{0x01, 0x02, 0x03, 0x04},
PublicKeyID: [4]byte{0x05, 0x06, 0x07, 0x08},
},
}
func TestRegister(t *testing.T) {
@@ -27,16 +20,10 @@ func TestRegister(t *testing.T) {
params *Params
err error
}
type hdTest struct {
priv []byte
want []byte
err error
}
tests := []struct {
name string
register []registerTest
hdMagics []hdTest
}{
{
name: "default networks",
@@ -62,40 +49,6 @@ func TestRegister(t *testing.T) {
err: ErrDuplicateNet,
},
},
hdMagics: []hdTest{
{
priv: MainnetParams.HDKeyIDPair.PrivateKeyID[:],
want: MainnetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: TestnetParams.HDKeyIDPair.PrivateKeyID[:],
want: TestnetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: RegressionNetParams.HDKeyIDPair.PrivateKeyID[:],
want: RegressionNetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: SimnetParams.HDKeyIDPair.PrivateKeyID[:],
want: SimnetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: mockNetParams.HDKeyIDPair.PrivateKeyID[:],
err: hdkeychain.ErrUnknownHDKeyID,
},
{
priv: []byte{0xff, 0xff, 0xff, 0xff},
err: hdkeychain.ErrUnknownHDKeyID,
},
{
priv: []byte{0xff},
err: hdkeychain.ErrUnknownHDKeyID,
},
},
},
{
name: "register mocknet",
@@ -106,13 +59,6 @@ func TestRegister(t *testing.T) {
err: nil,
},
},
hdMagics: []hdTest{
{
priv: mockNetParams.HDKeyIDPair.PrivateKeyID[:],
want: mockNetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
},
},
{
name: "more duplicates",
@@ -143,41 +89,6 @@ func TestRegister(t *testing.T) {
err: ErrDuplicateNet,
},
},
hdMagics: []hdTest{
{
priv: MainnetParams.HDKeyIDPair.PrivateKeyID[:],
want: MainnetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: TestnetParams.HDKeyIDPair.PrivateKeyID[:],
want: TestnetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: RegressionNetParams.HDKeyIDPair.PrivateKeyID[:],
want: RegressionNetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: SimnetParams.HDKeyIDPair.PrivateKeyID[:],
want: SimnetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: mockNetParams.HDKeyIDPair.PrivateKeyID[:],
want: mockNetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: []byte{0xff, 0xff, 0xff, 0xff},
err: hdkeychain.ErrUnknownHDKeyID,
},
{
priv: []byte{0xff},
err: hdkeychain.ErrUnknownHDKeyID,
},
},
},
}
@@ -185,25 +96,10 @@ func TestRegister(t *testing.T) {
for _, regtest := range test.register {
err := Register(regtest.params)
// HDKeyIDPairs must be registered separately
hdkeychain.RegisterHDKeyIDPair(regtest.params.HDKeyIDPair)
if err != regtest.err {
t.Errorf("%s:%s: Registered network with unexpected error: got %v expected %v",
test.name, regtest.name, err, regtest.err)
}
}
for i, magTest := range test.hdMagics {
pubKey, err := hdkeychain.HDPrivateKeyToPublicKeyID(magTest.priv[:])
if !reflect.DeepEqual(err, magTest.err) {
t.Errorf("%s: HD magic %d mismatched error: got %v expected %v ",
test.name, i, err, magTest.err)
continue
}
if magTest.err == nil && !bytes.Equal(pubKey, magTest.want[:]) {
t.Errorf("%s: HD magic %d private and public mismatch: got %v expected %v ",
test.name, i, pubKey, magTest.want[:])
}
}
}
}

View File

@@ -4,29 +4,35 @@ database
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/database)
Package database provides a block and metadata storage database.
Package database provides a database for kaspad.
Please note that this package is intended to enable kaspad to support different
database backends and is not something that a client can directly access as only
one entity can have the database open at a time (for most database backends),
and that entity will be kaspad.
Overview
--------
This package provides a database layer to store and retrieve data in a simple
and efficient manner.
When a client wants programmatic access to the data provided by kaspad, they'll
likely want to use the [rpcclient](https://github.com/kaspanet/kaspad/tree/master/rpcclient)
package which makes use of the [JSON-RPC API](https://github.com/kaspanet/kaspad/tree/master/docs/json_rpc_api.md).
The current backend is ffldb, which makes use of leveldb, flat files, and strict
checksums in key areas to ensure data integrity.
The default backend, ffldb, has a strong focus on speed, efficiency, and
robustness. It makes use of leveldb for the metadata, flat files for block
storage, and strict checksums in key areas to ensure data integrity.
Implementors of additional backends are required to implement the following interfaces:
## Feature Overview
DataAccessor
------------
This defines the common interface by which data gets accessed in a generic kaspad
database. Both the Database and the Transaction interfaces (see below) implement it.
- Key/value metadata store
- Kaspa block storage
- Efficient retrieval of block headers and regions (transactions, scripts, etc)
- Read-only and read-write transactions with both manual and managed modes
- Nested buckets
- Iteration support including cursors with seek capability
- Supports registration of backend databases
- Comprehensive test coverage
Database
--------
This defines the interface of a database that can begin transactions and close itself.
Transaction
-----------
This defines the interface of a generic kaspad database transaction.
Note: Transactions provide data consistency over the state of the database as it was
when the transaction started. There is NO guarantee that if one puts data into the
transaction then it will be available to get within the same transaction.
Cursor
------
This iterates over database entries given some bucket.

View File

@@ -1,62 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"encoding/hex"
"github.com/pkg/errors"
"time"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
// fetchBlockCmd defines the configuration options for the fetchblock command.
type fetchBlockCmd struct{}
var (
// fetchBlockCfg defines the configuration options for the command.
fetchBlockCfg = fetchBlockCmd{}
)
// Execute is the main entry point for the command. It's invoked by the parser.
func (cmd *fetchBlockCmd) Execute(args []string) error {
// Setup the global config options and ensure they are valid.
if err := setupGlobalConfig(); err != nil {
return err
}
if len(args) < 1 {
return errors.New("required block hash parameter not specified")
}
blockHash, err := daghash.NewHashFromStr(args[0])
if err != nil {
return err
}
// Load the block database.
db, err := loadBlockDB()
if err != nil {
return err
}
defer db.Close()
return db.View(func(dbTx database.Tx) error {
log.Infof("Fetching block %s", blockHash)
startTime := time.Now()
blockBytes, err := dbTx.FetchBlock(blockHash)
if err != nil {
return err
}
log.Infof("Loaded block in %s", time.Since(startTime))
log.Infof("Block Hex: %s", hex.EncodeToString(blockBytes))
return nil
})
}
// Usage overrides the usage display for the command.
func (cmd *fetchBlockCmd) Usage() string {
return "<block-hash>"
}

View File

@@ -1,90 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"encoding/hex"
"github.com/pkg/errors"
"strconv"
"time"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
// blockRegionCmd defines the configuration options for the fetchblockregion
// command.
type blockRegionCmd struct{}
var (
// blockRegionCfg defines the configuration options for the command.
blockRegionCfg = blockRegionCmd{}
)
// Execute is the main entry point for the command. It's invoked by the parser.
func (cmd *blockRegionCmd) Execute(args []string) error {
// Setup the global config options and ensure they are valid.
if err := setupGlobalConfig(); err != nil {
return err
}
// Ensure expected arguments.
if len(args) < 1 {
return errors.New("required block hash parameter not specified")
}
if len(args) < 2 {
return errors.New("required start offset parameter not " +
"specified")
}
if len(args) < 3 {
return errors.New("required region length parameter not " +
"specified")
}
// Parse arguments.
blockHash, err := daghash.NewHashFromStr(args[0])
if err != nil {
return err
}
startOffset, err := strconv.ParseUint(args[1], 10, 32)
if err != nil {
return err
}
regionLen, err := strconv.ParseUint(args[2], 10, 32)
if err != nil {
return err
}
// Load the block database.
db, err := loadBlockDB()
if err != nil {
return err
}
defer db.Close()
return db.View(func(dbTx database.Tx) error {
log.Infof("Fetching block region %s<%d:%d>", blockHash,
startOffset, startOffset+regionLen-1)
region := database.BlockRegion{
Hash: blockHash,
Offset: uint32(startOffset),
Len: uint32(regionLen),
}
startTime := time.Now()
regionBytes, err := dbTx.FetchBlockRegion(&region)
if err != nil {
return err
}
log.Infof("Loaded block region in %s", time.Since(startTime))
log.Infof("Double Hash: %s", daghash.DoubleHashH(regionBytes))
log.Infof("Region Hex: %s", hex.EncodeToString(regionBytes))
return nil
})
}
// Usage overrides the usage display for the command.
func (cmd *blockRegionCmd) Usage() string {
return "<block-hash> <start-offset> <length-of-region>"
}

View File

@@ -1,111 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"github.com/pkg/errors"
"os"
"path/filepath"
"strings"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
)
var (
kaspadHomeDir = util.AppDataDir("kaspad", false)
knownDbTypes = database.SupportedDrivers()
activeNetParams = &dagconfig.MainnetParams
// Default global config.
cfg = &config{
DataDir: filepath.Join(kaspadHomeDir, "data"),
DbType: "ffldb",
}
)
// config defines the global configuration options.
type config struct {
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
DbType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
Testnet bool `long:"testnet" description:"Use the test network"`
RegressionTest bool `long:"regtest" description:"Use the regression test network"`
Simnet bool `long:"simnet" description:"Use the simulation test network"`
Devnet bool `long:"devnet" description:"Use the development test network"`
}
// fileExists reports whether the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// validDbType returns whether or not dbType is a supported database type.
func validDbType(dbType string) bool {
for _, knownType := range knownDbTypes {
if dbType == knownType {
return true
}
}
return false
}
// setupGlobalConfig examine the global configuration options for any conditions
// which are invalid as well as performs any addition setup necessary after the
// initial parse.
func setupGlobalConfig() error {
// Multiple networks can't be selected simultaneously.
// Count number of network flags passed; assign active network params
// while we're at it
numNets := 0
if cfg.Testnet {
numNets++
activeNetParams = &dagconfig.TestnetParams
}
if cfg.RegressionTest {
numNets++
activeNetParams = &dagconfig.RegressionNetParams
}
if cfg.Simnet {
numNets++
activeNetParams = &dagconfig.SimnetParams
}
if cfg.Devnet {
numNets++
activeNetParams = &dagconfig.DevnetParams
}
if numNets > 1 {
return errors.New("The testnet, regtest, simnet and devnet params " +
"can't be used together -- choose one of the four")
}
if numNets == 0 {
return errors.New("Mainnet has not launched yet, use --testnet to run in testnet mode")
}
// Validate database type.
if !validDbType(cfg.DbType) {
str := "The specified database type [%s] is invalid -- " +
"supported types: %s"
return errors.Errorf(str, cfg.DbType, strings.Join(knownDbTypes, ", "))
}
// Append the network type to the data directory so it is "namespaced"
// per network. In addition to the block database, there are other
// pieces of data that are saved to disk such as address manager state.
// All data is specific to a network, so namespacing the data directory
// means each individual piece of serialized data does not have to
// worry about changing names per network and such.
cfg.DataDir = filepath.Join(cfg.DataDir, activeNetParams.Name)
return nil
}

View File

@@ -1,113 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"github.com/kaspanet/kaspad/util/panics"
"github.com/pkg/errors"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/logs"
)
const (
// blockDbNamePrefix is the prefix for the kaspad block database.
blockDbNamePrefix = "blocks"
)
var (
log *logs.Logger
spawn func(func())
shutdownChannel = make(chan error)
)
// loadBlockDB opens the block database and returns a handle to it.
func loadBlockDB() (database.DB, error) {
// The database name is based on the database type.
dbName := blockDbNamePrefix + "_" + cfg.DbType
dbPath := filepath.Join(cfg.DataDir, dbName)
log.Infof("Loading block database from '%s'", dbPath)
db, err := database.Open(cfg.DbType, dbPath, activeNetParams.Net)
if err != nil {
// Return the error if it's not because the database doesn't
// exist.
var dbErr database.Error
if ok := errors.As(err, &dbErr); !ok || dbErr.ErrorCode !=
database.ErrDbDoesNotExist {
return nil, err
}
// Create the db if it does not exist.
err = os.MkdirAll(cfg.DataDir, 0700)
if err != nil {
return nil, err
}
db, err = database.Create(cfg.DbType, dbPath, activeNetParams.Net)
if err != nil {
return nil, err
}
}
log.Info("Block database loaded")
return db, nil
}
// realMain is the real main function for the utility. It is necessary to work
// around the fact that deferred functions do not run when os.Exit() is called.
func realMain() error {
// Setup logging.
backendLogger := logs.NewBackend()
defer os.Stdout.Sync()
log = backendLogger.Logger("MAIN")
spawn = panics.GoroutineWrapperFunc(log)
dbLog, _ := logger.Get(logger.SubsystemTags.KSDB)
dbLog.SetLevel(logs.LevelDebug)
// Setup the parser options and commands.
appName := filepath.Base(os.Args[0])
appName = strings.TrimSuffix(appName, filepath.Ext(appName))
parserFlags := flags.Options(flags.HelpFlag | flags.PassDoubleDash)
parser := flags.NewNamedParser(appName, parserFlags)
parser.AddGroup("Global Options", "", cfg)
parser.AddCommand("fetchblock",
"Fetch the specific block hash from the database", "",
&fetchBlockCfg)
parser.AddCommand("fetchblockregion",
"Fetch the specified block region from the database", "",
&blockRegionCfg)
// Parse command line and invoke the Execute function for the specified
// command.
if _, err := parser.Parse(); err != nil {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); ok && flagsErr.Type == flags.ErrHelp {
parser.WriteHelp(os.Stderr)
} else {
log.Error(err)
}
return err
}
return nil
}
func main() {
// Use all processor cores.
runtime.GOMAXPROCS(runtime.NumCPU())
// Work around defer not working after os.Exit()
if err := realMain(); err != nil {
os.Exit(1)
}
}

View File

@@ -1,82 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"os"
"os/signal"
)
// interruptChannel is used to receive SIGINT (Ctrl+C) signals.
var interruptChannel chan os.Signal
// addHandlerChannel is used to add an interrupt handler to the list of handlers
// to be invoked on SIGINT (Ctrl+C) signals.
var addHandlerChannel = make(chan func())
// mainInterruptHandler listens for SIGINT (Ctrl+C) signals on the
// interruptChannel and invokes the registered interruptCallbacks accordingly.
// It also listens for callback registration. It must be run as a goroutine.
func mainInterruptHandler() {
// interruptCallbacks is a list of callbacks to invoke when a
// SIGINT (Ctrl+C) is received.
var interruptCallbacks []func()
// isShutdown is a flag which is used to indicate whether or not
// the shutdown signal has already been received and hence any future
// attempts to add a new interrupt handler should invoke them
// immediately.
var isShutdown bool
for {
select {
case <-interruptChannel:
// Ignore more than one shutdown signal.
if isShutdown {
log.Infof("Received SIGINT (Ctrl+C). " +
"Already shutting down...")
continue
}
isShutdown = true
log.Infof("Received SIGINT (Ctrl+C). Shutting down...")
// Run handlers in LIFO order.
for i := range interruptCallbacks {
idx := len(interruptCallbacks) - 1 - i
callback := interruptCallbacks[idx]
callback()
}
// Signal the main goroutine to shutdown.
spawn(func() {
shutdownChannel <- nil
})
case handler := <-addHandlerChannel:
// The shutdown signal has already been received, so
// just invoke and new handlers immediately.
if isShutdown {
handler()
}
interruptCallbacks = append(interruptCallbacks, handler)
}
}
}
// addInterruptHandler adds a handler to call when a SIGINT (Ctrl+C) is
// received.
func addInterruptHandler(handler func()) {
// Create the channel and start the main interrupt handler which invokes
// all other callbacks and exits if not already done.
if interruptChannel == nil {
interruptChannel = make(chan os.Signal, 1)
signal.Notify(interruptChannel, os.Interrupt)
spawn(mainInterruptHandler)
}
addHandlerChannel <- handler
}

84
database/common_test.go Normal file
View File

@@ -0,0 +1,84 @@
package database_test
import (
"fmt"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/database/ffldb"
"io/ioutil"
"testing"
)
type databasePrepareFunc func(t *testing.T, testName string) (db database.Database, name string, teardownFunc func())
// databasePrepareFuncs is a set of functions, in which each function
// prepares a separate database type for testing.
// See testForAllDatabaseTypes for further details.
var databasePrepareFuncs = []databasePrepareFunc{
prepareFFLDBForTest,
}
func prepareFFLDBForTest(t *testing.T, testName string) (db database.Database, name string, teardownFunc func()) {
// Create a temp db to run tests against
path, err := ioutil.TempDir("", testName)
if err != nil {
t.Fatalf("%s: TempDir unexpectedly "+
"failed: %s", testName, err)
}
db, err = ffldb.Open(path)
if err != nil {
t.Fatalf("%s: Open unexpectedly "+
"failed: %s", testName, err)
}
teardownFunc = func() {
err = db.Close()
if err != nil {
t.Fatalf("%s: Close unexpectedly "+
"failed: %s", testName, err)
}
}
return db, "ffldb", teardownFunc
}
// testForAllDatabaseTypes runs the given testFunc for every database
// type defined in databasePrepareFuncs. This is to make sure that
// all supported database types adhere to the assumptions defined in
// the interfaces in this package.
func testForAllDatabaseTypes(t *testing.T, testName string,
testFunc func(t *testing.T, db database.Database, testName string)) {
for _, prepareDatabase := range databasePrepareFuncs {
func() {
db, dbType, teardownFunc := prepareDatabase(t, testName)
defer teardownFunc()
testName := fmt.Sprintf("%s: %s", dbType, testName)
testFunc(t, db, testName)
}()
}
}
type keyValuePair struct {
key *database.Key
value []byte
}
func populateDatabaseForTest(t *testing.T, db database.Database, testName string) []keyValuePair {
// Prepare a list of key/value pairs
entries := make([]keyValuePair, 10)
for i := 0; i < 10; i++ {
key := database.MakeBucket().Key([]byte(fmt.Sprintf("key%d", i)))
value := []byte("value")
entries[i] = keyValuePair{key: key, value: value}
}
// Put the pairs into the database
for _, entry := range entries {
err := db.Put(entry.key, entry.value)
if err != nil {
t.Fatalf("%s: Put unexpectedly "+
"failed: %s", testName, err)
}
}
return entries
}

30
database/cursor.go Normal file
View File

@@ -0,0 +1,30 @@
package database
// Cursor iterates over database entries given some bucket.
type Cursor interface {
// Next moves the iterator to the next key/value pair. It returns whether the
// iterator is exhausted. Panics if the cursor is closed.
Next() bool
// First moves the iterator to the first key/value pair. It returns false if
// such a pair does not exist. Panics if the cursor is closed.
First() bool
// Seek moves the iterator to the first key/value pair whose key is greater
// than or equal to the given key. It returns ErrNotFound if such pair does not
// exist.
Seek(key *Key) error
// Key returns the key of the current key/value pair, or ErrNotFound if done.
// The caller should not modify the contents of the returned key, and
// its contents may change on the next call to Next.
Key() (*Key, error)
// Value returns the value of the current key/value pair, or ErrNotFound if done.
// The caller should not modify the contents of the returned slice, and its
// contents may change on the next call to Next.
Value() ([]byte, error)
// Close releases associated resources.
Close() error
}

345
database/cursor_test.go Normal file
View File

@@ -0,0 +1,345 @@
// All tests within this file should call testForAllDatabaseTypes
// over the actual test. This is to make sure that all supported
// database types adhere to the assumptions defined in the
// interfaces in this package.
package database_test
import (
"bytes"
"fmt"
"github.com/kaspanet/kaspad/database"
"reflect"
"strings"
"testing"
)
func prepareCursorForTest(t *testing.T, db database.Database, testName string) database.Cursor {
cursor, err := db.Cursor(database.MakeBucket())
if err != nil {
t.Fatalf("%s: Cursor unexpectedly "+
"failed: %s", testName, err)
}
return cursor
}
func recoverFromClosedCursorPanic(t *testing.T, testName string) {
panicErr := recover()
if panicErr == nil {
t.Fatalf("%s: cursor unexpectedly "+
"didn't panic after being closed", testName)
}
expectedPanicErr := "closed cursor"
if !strings.Contains(fmt.Sprintf("%v", panicErr), expectedPanicErr) {
t.Fatalf("%s: cursor panicked "+
"with wrong message. Want: %v, got: %s",
testName, expectedPanicErr, panicErr)
}
}
func TestCursorNext(t *testing.T) {
testForAllDatabaseTypes(t, "TestCursorNext", testCursorNext)
}
func testCursorNext(t *testing.T, db database.Database, testName string) {
entries := populateDatabaseForTest(t, db, testName)
cursor := prepareCursorForTest(t, db, testName)
// Make sure that all the entries exist in the cursor, in their
// correct order
for _, entry := range entries {
hasNext := cursor.Next()
if !hasNext {
t.Fatalf("%s: cursor unexpectedly "+
"done", testName)
}
cursorKey, err := cursor.Key()
if err != nil {
t.Fatalf("%s: Key unexpectedly "+
"failed: %s", testName, err)
}
if !reflect.DeepEqual(cursorKey, entry.key) {
t.Fatalf("%s: Cursor returned "+
"wrong key. Want: %s, got: %s", testName, entry.key, cursorKey)
}
cursorValue, err := cursor.Value()
if err != nil {
t.Fatalf("%s: Value unexpectedly "+
"failed: %s", testName, err)
}
if !bytes.Equal(cursorValue, entry.value) {
t.Fatalf("%s: Cursor returned "+
"wrong value. Want: %s, got: %s", testName, entry.value, cursorValue)
}
}
// The cursor should now be exhausted. Make sure Next now
// returns false
hasNext := cursor.Next()
if hasNext {
t.Fatalf("%s: cursor unexpectedly "+
"not done", testName)
}
// Rewind the cursor and close it
cursor.First()
err := cursor.Close()
if err != nil {
t.Fatalf("%s: Close unexpectedly "+
"failed: %s", testName, err)
}
// Call Next on the cursor. This time it should panic
// because it's closed.
func() {
defer recoverFromClosedCursorPanic(t, testName)
cursor.Next()
}()
}
func TestCursorFirst(t *testing.T) {
testForAllDatabaseTypes(t, "TestCursorFirst", testCursorFirst)
}
func testCursorFirst(t *testing.T, db database.Database, testName string) {
entries := populateDatabaseForTest(t, db, testName)
cursor := prepareCursorForTest(t, db, testName)
// Make sure that First returns true when the cursor is not empty
exists := cursor.First()
if !exists {
t.Fatalf("%s: Cursor unexpectedly "+
"returned false", testName)
}
// Make sure that the first key and value are as expected
firstEntryKey := entries[0].key
firstCursorKey, err := cursor.Key()
if err != nil {
t.Fatalf("%s: Key unexpectedly "+
"failed: %s", testName, err)
}
if !reflect.DeepEqual(firstCursorKey, firstEntryKey) {
t.Fatalf("%s: Cursor returned "+
"wrong key. Want: %s, got: %s", testName, firstEntryKey, firstCursorKey)
}
firstEntryValue := entries[0].value
firstCursorValue, err := cursor.Value()
if err != nil {
t.Fatalf("%s: Value unexpectedly "+
"failed: %s", testName, err)
}
if !bytes.Equal(firstCursorValue, firstEntryValue) {
t.Fatalf("%s: Cursor returned "+
"wrong value. Want: %s, got: %s", testName, firstEntryValue, firstCursorValue)
}
// Exhaust the cursor
for cursor.Next() {
// Do nothing
}
// Call first again and make sure it still returns true
exists = cursor.First()
if !exists {
t.Fatalf("%s: First unexpectedly "+
"returned false", testName)
}
// Call next and make sure it returns true as well
exists = cursor.Next()
if !exists {
t.Fatalf("%s: Next unexpectedly "+
"returned false", testName)
}
// Remove all the entries from the database
for _, entry := range entries {
err := db.Delete(entry.key)
if err != nil {
t.Fatalf("%s: Delete unexpectedly "+
"failed: %s", testName, err)
}
}
// Create a new cursor over an empty dataset
cursor = prepareCursorForTest(t, db, testName)
// Make sure that First returns false when the cursor is empty
exists = cursor.First()
if exists {
t.Fatalf("%s: Cursor unexpectedly "+
"returned true", testName)
}
}
func TestCursorSeek(t *testing.T) {
testForAllDatabaseTypes(t, "TestCursorSeek", testCursorSeek)
}
func testCursorSeek(t *testing.T, db database.Database, testName string) {
entries := populateDatabaseForTest(t, db, testName)
cursor := prepareCursorForTest(t, db, testName)
// Seek to the fourth entry and make sure it exists
fourthEntry := entries[3]
err := cursor.Seek(fourthEntry.key)
if err != nil {
t.Fatalf("%s: Cursor unexpectedly "+
"failed: %s", testName, err)
}
// Make sure that the key and value are as expected
fourthEntryKey := entries[3].key
fourthCursorKey, err := cursor.Key()
if err != nil {
t.Fatalf("%s: Key unexpectedly "+
"failed: %s", testName, err)
}
if !reflect.DeepEqual(fourthCursorKey, fourthEntryKey) {
t.Fatalf("%s: Cursor returned "+
"wrong key. Want: %s, got: %s", testName, fourthEntryKey, fourthCursorKey)
}
fourthEntryValue := entries[3].value
fourthCursorValue, err := cursor.Value()
if err != nil {
t.Fatalf("%s: Value unexpectedly "+
"failed: %s", testName, err)
}
if !bytes.Equal(fourthCursorValue, fourthEntryValue) {
t.Fatalf("%s: Cursor returned "+
"wrong value. Want: %s, got: %s", testName, fourthEntryValue, fourthCursorValue)
}
// Call Next and make sure that we are now on the fifth entry
exists := cursor.Next()
if !exists {
t.Fatalf("%s: Next unexpectedly "+
"returned false", testName)
}
fifthEntryKey := entries[4].key
fifthCursorKey, err := cursor.Key()
if err != nil {
t.Fatalf("%s: Key unexpectedly "+
"failed: %s", testName, err)
}
if !reflect.DeepEqual(fifthCursorKey, fifthEntryKey) {
t.Fatalf("%s: Cursor returned "+
"wrong key. Want: %s, got: %s", testName, fifthEntryKey, fifthCursorKey)
}
fifthEntryValue := entries[4].value
fifthCursorValue, err := cursor.Value()
if err != nil {
t.Fatalf("%s: Value unexpectedly "+
"failed: %s", testName, err)
}
if !bytes.Equal(fifthCursorValue, fifthEntryValue) {
t.Fatalf("%s: Cursor returned "+
"wrong value. Want: %s, got: %s", testName, fifthEntryValue, fifthCursorValue)
}
// Seek to a value that doesn't exist and make sure that
// the returned error is ErrNotFound
err = cursor.Seek(database.MakeBucket().Key([]byte("doesn't exist")))
if err == nil {
t.Fatalf("%s: Seek unexpectedly "+
"succeeded", testName)
}
if !database.IsNotFoundError(err) {
t.Fatalf("%s: Seek returned "+
"wrong error: %s", testName, err)
}
}
func TestCursorCloseErrors(t *testing.T) {
testForAllDatabaseTypes(t, "TestCursorCloseErrors", testCursorCloseErrors)
}
func testCursorCloseErrors(t *testing.T, db database.Database, testName string) {
populateDatabaseForTest(t, db, testName)
cursor := prepareCursorForTest(t, db, testName)
// Close the cursor
err := cursor.Close()
if err != nil {
t.Fatalf("%s: Close "+
"unexpectedly failed: %s", testName, err)
}
tests := []struct {
name string
function func() error
}{
{
name: "Seek",
function: func() error {
return cursor.Seek(database.MakeBucket().Key([]byte{}))
},
},
{
name: "Key",
function: func() error {
_, err := cursor.Key()
return err
},
},
{
name: "Value",
function: func() error {
_, err := cursor.Value()
return err
},
},
{
name: "Close",
function: func() error {
return cursor.Close()
},
},
}
for _, test := range tests {
expectedErrContainsString := "closed cursor"
// Make sure that the test function returns a "closed cursor" error
err = test.function()
if err == nil {
t.Fatalf("%s: %s "+
"unexpectedly succeeded", testName, test.name)
}
if !strings.Contains(err.Error(), expectedErrContainsString) {
t.Fatalf("%s: %s "+
"returned wrong error. Want: %s, got: %s",
testName, test.name, expectedErrContainsString, err)
}
}
}
func TestCursorCloseFirstAndNext(t *testing.T) {
testForAllDatabaseTypes(t, "TestCursorCloseFirstAndNext", testCursorCloseFirstAndNext)
}
func testCursorCloseFirstAndNext(t *testing.T, db database.Database, testName string) {
populateDatabaseForTest(t, db, testName)
cursor := prepareCursorForTest(t, db, testName)
// Close the cursor
err := cursor.Close()
if err != nil {
t.Fatalf("%s: Close "+
"unexpectedly failed: %s", testName, err)
}
// We expect First to panic
func() {
defer recoverFromClosedCursorPanic(t, testName)
cursor.First()
}()
// We expect Next to panic
func() {
defer recoverFromClosedCursorPanic(t, testName)
cursor.Next()
}()
}

36
database/dataaccessor.go Normal file
View File

@@ -0,0 +1,36 @@
package database
// DataAccessor defines the common interface by which data gets
// accessed in a generic kaspad database.
type DataAccessor interface {
// Put sets the value for the given key. It overwrites
// any previous value for that key.
Put(key *Key, value []byte) error
// Get gets the value for the given key. It returns
// ErrNotFound if the given key does not exist.
Get(key *Key) ([]byte, error)
// Has returns true if the database does contains the
// given key.
Has(key *Key) (bool, error)
// Delete deletes the value for the given key. Will not
// return an error if the key doesn't exist.
Delete(key *Key) error
// AppendToStore appends the given data to the store
// defined by storeName. This function returns a serialized
// location handle that's meant to be stored and later used
// when querying the data that has just now been inserted.
AppendToStore(storeName string, data []byte) ([]byte, error)
// RetrieveFromStore retrieves data from the store defined by
// storeName using the given serialized location handle. It
// returns ErrNotFound if the location does not exist. See
// AppendToStore for further details.
RetrieveFromStore(storeName string, location []byte) ([]byte, error)
// Cursor begins a new cursor over the given bucket.
Cursor(bucket *Bucket) (Cursor, error)
}

19
database/database.go Normal file
View File

@@ -0,0 +1,19 @@
package database
// Database defines the interface of a database that can begin
// transactions and close itself.
//
// Important: This is not part of the DataAccessor interface
// because the Transaction interface includes it. Were we to
// merge Database with DataAccessor, implementors of the
// Transaction interface would be forced to implement methods
// such as Begin and Close, which is undesirable.
type Database interface {
DataAccessor
// Begin begins a new database transaction.
Begin() (Transaction, error)
// Close closes the database.
Close() error
}

207
database/database_test.go Normal file
View File

@@ -0,0 +1,207 @@
// All tests within this file should call testForAllDatabaseTypes
// over the actual test. This is to make sure that all supported
// database types adhere to the assumptions defined in the
// interfaces in this package.
package database_test
import (
"bytes"
"github.com/kaspanet/kaspad/database"
"testing"
)
func TestDatabasePut(t *testing.T) {
testForAllDatabaseTypes(t, "TestDatabasePut", testDatabasePut)
}
func testDatabasePut(t *testing.T, db database.Database, testName string) {
// Put value1 into the database
key := database.MakeBucket().Key([]byte("key"))
value1 := []byte("value1")
err := db.Put(key, value1)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that the returned value is value1
returnedValue, err := db.Get(key)
if err != nil {
t.Fatalf("%s: Get "+
"unexpectedly failed: %s", testName, err)
}
if !bytes.Equal(returnedValue, value1) {
t.Fatalf("%s: Get "+
"returned wrong value. Want: %s, got: %s",
testName, string(value1), string(returnedValue))
}
// Put value2 into the database with the same key
value2 := []byte("value2")
err = db.Put(key, value2)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that the returned value is value2
returnedValue, err = db.Get(key)
if err != nil {
t.Fatalf("%s: Get "+
"unexpectedly failed: %s", testName, err)
}
if !bytes.Equal(returnedValue, value2) {
t.Fatalf("%s: Get "+
"returned wrong value. Want: %s, got: %s",
testName, string(value2), string(returnedValue))
}
}
func TestDatabaseGet(t *testing.T) {
testForAllDatabaseTypes(t, "TestDatabaseGet", testDatabaseGet)
}
func testDatabaseGet(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key := database.MakeBucket().Key([]byte("key"))
value := []byte("value")
err := db.Put(key, value)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Get the value back and make sure it's the same one
returnedValue, err := db.Get(key)
if err != nil {
t.Fatalf("%s: Get "+
"unexpectedly failed: %s", testName, err)
}
if !bytes.Equal(returnedValue, value) {
t.Fatalf("%s: Get "+
"returned wrong value. Want: %s, got: %s",
testName, string(value), string(returnedValue))
}
// Try getting a non-existent value and make sure
// the returned error is ErrNotFound
_, err = db.Get(database.MakeBucket().Key([]byte("doesn't exist")))
if err == nil {
t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName)
}
if !database.IsNotFoundError(err) {
t.Fatalf("%s: Get "+
"returned wrong error: %s", testName, err)
}
}
func TestDatabaseHas(t *testing.T) {
testForAllDatabaseTypes(t, "TestDatabaseHas", testDatabaseHas)
}
func testDatabaseHas(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key := database.MakeBucket().Key([]byte("key"))
value := []byte("value")
err := db.Put(key, value)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that Has returns true for the value we just put
exists, err := db.Has(key)
if err != nil {
t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err)
}
if !exists {
t.Fatalf("%s: Has "+
"unexpectedly returned that the value does not exist", testName)
}
// Make sure that Has returns false for a non-existent value
exists, err = db.Has(database.MakeBucket().Key([]byte("doesn't exist")))
if err != nil {
t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err)
}
if exists {
t.Fatalf("%s: Has "+
"unexpectedly returned that the value exists", testName)
}
}
func TestDatabaseDelete(t *testing.T) {
testForAllDatabaseTypes(t, "TestDatabaseDelete", testDatabaseDelete)
}
func testDatabaseDelete(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key := database.MakeBucket().Key([]byte("key"))
value := []byte("value")
err := db.Put(key, value)
if err != nil {
t.Fatalf("%s: Put "+
"unexpectedly failed: %s", testName, err)
}
// Delete the value
err = db.Delete(key)
if err != nil {
t.Fatalf("%s: Delete "+
"unexpectedly failed: %s", testName, err)
}
// Make sure that Has returns false for the deleted value
exists, err := db.Has(key)
if err != nil {
t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err)
}
if exists {
t.Fatalf("%s: Has "+
"unexpectedly returned that the value exists", testName)
}
}
func TestDatabaseAppendToStoreAndRetrieveFromStore(t *testing.T) {
testForAllDatabaseTypes(t, "TestDatabaseAppendToStoreAndRetrieveFromStore", testDatabaseAppendToStoreAndRetrieveFromStore)
}
func testDatabaseAppendToStoreAndRetrieveFromStore(t *testing.T, db database.Database, testName string) {
// Append some data into the store
storeName := "store"
data := []byte("data")
location, err := db.AppendToStore(storeName, data)
if err != nil {
t.Fatalf("%s: AppendToStore "+
"unexpectedly failed: %s", testName, err)
}
// Retrieve the data and make sure it's equal to what was appended
retrievedData, err := db.RetrieveFromStore(storeName, location)
if err != nil {
t.Fatalf("%s: RetrieveFromStore "+
"unexpectedly failed: %s", testName, err)
}
if !bytes.Equal(retrievedData, data) {
t.Fatalf("%s: RetrieveFromStore "+
"returned unexpected data. Want: %s, got: %s",
testName, string(data), string(retrievedData))
}
// Make sure that an invalid location returns ErrNotFound
fakeLocation := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
_, err = db.RetrieveFromStore(storeName, fakeLocation)
if err == nil {
t.Fatalf("%s: RetrieveFromStore "+
"unexpectedly succeeded", testName)
}
if !database.IsNotFoundError(err) {
t.Fatalf("%s: RetrieveFromStore "+
"returned wrong error: %s", testName, err)
}
}

View File

@@ -1,85 +1,34 @@
/*
Package database provides a block and metadata storage database.
Package database provides a database for kaspad.
Overview
This package provides a database layer to store and retrieve this data in a
simple and efficient manner.
This package provides a database layer to store and retrieve data in a simple
and efficient manner.
The default backend, ffldb, has a strong focus on speed, efficiency, and
robustness. It makes use leveldb for the metadata, flat files for block
storage, and strict checksums in key areas to ensure data integrity.
The current backend is ffldb, which makes use of leveldb, flat files, and strict
checksums in key areas to ensure data integrity.
A quick overview of the features database provides are as follows:
Implementors of additional backends are required to implement the following interfaces:
- Key/value metadata store
- Kaspa block storage
- Efficient retrieval of block headers and regions (transactions, scripts, etc)
- Read-only and read-write transactions with both manual and managed modes
- Nested buckets
- Supports registration of backend databases
- Comprehensive test coverage
DataAccessor
This defines the common interface by which data gets accessed in a generic kaspad
database. Both the Database and the Transaction interfaces (see below) implement it.
Database
The main entry point is the DB interface. It exposes functionality for
transactional-based access and storage of metadata and block data. It is
obtained via the Create and Open functions which take a database type string
that identifies the specific database driver (backend) to use as well as
arguments specific to the specified driver.
This defines the interface of a database that can begin transactions and close itself.
The interface provides facilities for obtaining transactions (the Tx interface)
that are the basis of all database reads and writes. Unlike some database
interfaces that support reading and writing without transactions, this interface
requires transactions even when only reading or writing a single key.
Transaction
The Begin function provides an unmanaged transaction while the View and Update
functions provide a managed transaction. These are described in more detail
below.
This defines the interface of a generic kaspad database transaction.
Note: transactions provide data consistency over the state of the database as it was
when the transaction started. There is NO guarantee that if one puts data into the
transaction then it will be available to get within the same transaction.
Transactions
Cursor
The Tx interface provides facilities for rolling back or committing changes that
took place while the transaction was active. It also provides the root metadata
bucket under which all keys, values, and nested buckets are stored. A
transaction can either be read-only or read-write and managed or unmanaged.
Managed versus Unmanaged Transactions
A managed transaction is one where the caller provides a function to execute
within the context of the transaction and the commit or rollback is handled
automatically depending on whether or not the provided function returns an
error. Attempting to manually call Rollback or Commit on the managed
transaction will result in a panic.
An unmanaged transaction, on the other hand, requires the caller to manually
call Commit or Rollback when they are finished with it. Leaving transactions
open for long periods of time can have several adverse effects, so it is
recommended that managed transactions are used instead.
Buckets
The Bucket interface provides the ability to manipulate key/value pairs and
nested buckets as well as iterate through them.
The Get, Put, and Delete functions work with key/value pairs, while the Bucket,
CreateBucket, CreateBucketIfNotExists, and DeleteBucket functions work with
buckets. The ForEach function allows the caller to provide a function to be
called with each key/value pair and nested bucket in the current bucket.
Metadata Bucket
As discussed above, all of the functions which are used to manipulate key/value
pairs and nested buckets exist on the Bucket interface. The root metadata
bucket is the upper-most bucket in which data is stored and is created at the
same time as the database. Use the Metadata function on the Tx interface
to retrieve it.
Nested Buckets
The CreateBucket and CreateBucketIfNotExists functions on the Bucket interface
provide the ability to create an arbitrary number of nested buckets. It is
a good idea to avoid a lot of buckets with little data in them as it could lead
to poor page utilization depending on the specific driver in use.
This iterates over database entries given some bucket.
*/
package database

View File

@@ -1,84 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database
import (
"fmt"
)
// Driver defines a structure for backend drivers to use when they registered
// themselves as a backend which implements the DB interface.
type Driver struct {
// DbType is the identifier used to uniquely identify a specific
// database driver. There can be only one driver with the same name.
DbType string
// Create is the function that will be invoked with all user-specified
// arguments to create the database. This function must return
// ErrDbExists if the database already exists.
Create func(args ...interface{}) (DB, error)
// Open is the function that will be invoked with all user-specified
// arguments to open the database. This function must return
// ErrDbDoesNotExist if the database has not already been created.
Open func(args ...interface{}) (DB, error)
}
// driverList holds all of the registered database backends.
var drivers = make(map[string]*Driver)
// RegisterDriver adds a backend database driver to available interfaces.
// ErrDbTypeRegistered will be returned if the database type for the driver has
// already been registered.
func RegisterDriver(driver Driver) error {
if _, exists := drivers[driver.DbType]; exists {
str := fmt.Sprintf("driver %q is already registered",
driver.DbType)
return makeError(ErrDbTypeRegistered, str, nil)
}
drivers[driver.DbType] = &driver
return nil
}
// SupportedDrivers returns a slice of strings that represent the database
// drivers that have been registered and are therefore supported.
func SupportedDrivers() []string {
supportedDBs := make([]string, 0, len(drivers))
for _, drv := range drivers {
supportedDBs = append(supportedDBs, drv.DbType)
}
return supportedDBs
}
// Create initializes and opens a database for the specified type. The
// arguments are specific to the database type driver. See the documentation
// for the database driver for further details.
//
// ErrDbUnknownType will be returned if the the database type is not registered.
func Create(dbType string, args ...interface{}) (DB, error) {
drv, exists := drivers[dbType]
if !exists {
str := fmt.Sprintf("driver %q is not registered", dbType)
return nil, makeError(ErrDbUnknownType, str, nil)
}
return drv.Create(args...)
}
// Open opens an existing database for the specified type. The arguments are
// specific to the database type driver. See the documentation for the database
// driver for further details.
//
// ErrDbUnknownType will be returned if the the database type is not registered.
func Open(dbType string, args ...interface{}) (DB, error) {
drv, exists := drivers[dbType]
if !exists {
str := fmt.Sprintf("driver %q is not registered", dbType)
return nil, makeError(ErrDbUnknownType, str, nil)
}
return drv.Open(args...)
}

View File

@@ -1,128 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database_test
import (
"github.com/pkg/errors"
"testing"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
)
// checkDbError ensures the passed error is a database.Error with an error code
// that matches the passed error code.
func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool {
dbErr, ok := gotErr.(database.Error)
if !ok {
t.Errorf("%s: unexpected error type - got %T, want %T",
testName, gotErr, database.Error{})
return false
}
if dbErr.ErrorCode != wantErrCode {
t.Errorf("%s: unexpected error code - got %s (%s), want %s",
testName, dbErr.ErrorCode, dbErr.Description,
wantErrCode)
return false
}
return true
}
// TestAddDuplicateDriver ensures that adding a duplicate driver does not
// overwrite an existing one.
func TestAddDuplicateDriver(t *testing.T) {
supportedDrivers := database.SupportedDrivers()
if len(supportedDrivers) == 0 {
t.Errorf("no backends to test")
return
}
dbType := supportedDrivers[0]
// bogusCreateDB is a function which acts as a bogus create and open
// driver function and intentionally returns a failure that can be
// detected if the interface allows a duplicate driver to overwrite an
// existing one.
bogusCreateDB := func(args ...interface{}) (database.DB, error) {
return nil, errors.Errorf("duplicate driver allowed for database "+
"type [%v]", dbType)
}
// Create a driver that tries to replace an existing one. Set its
// create and open functions to a function that causes a test failure if
// they are invoked.
driver := database.Driver{
DbType: dbType,
Create: bogusCreateDB,
Open: bogusCreateDB,
}
testName := "duplicate driver registration"
err := database.RegisterDriver(driver)
if !checkDbError(t, testName, err, database.ErrDbTypeRegistered) {
return
}
}
// TestCreateOpenFail ensures that errors which occur while opening or closing
// a database are handled properly.
func TestCreateOpenFail(t *testing.T) {
// bogusCreateDB is a function which acts as a bogus create and open
// driver function that intentionally returns a failure which can be
// detected.
dbType := "createopenfail"
openError := errors.Errorf("failed to create or open database for "+
"database type [%v]", dbType)
bogusCreateDB := func(args ...interface{}) (database.DB, error) {
return nil, openError
}
// Create and add driver that intentionally fails when created or opened
// to ensure errors on database open and create are handled properly.
driver := database.Driver{
DbType: dbType,
Create: bogusCreateDB,
Open: bogusCreateDB,
}
database.RegisterDriver(driver)
// Ensure creating a database with the new type fails with the expected
// error.
_, err := database.Create(dbType)
if err != openError {
t.Errorf("expected error not received - got: %v, want %v", err,
openError)
return
}
// Ensure opening a database with the new type fails with the expected
// error.
_, err = database.Open(dbType)
if err != openError {
t.Errorf("expected error not received - got: %v, want %v", err,
openError)
return
}
}
// TestCreateOpenUnsupported ensures that attempting to create or open an
// unsupported database type is handled properly.
func TestCreateOpenUnsupported(t *testing.T) {
// Ensure creating a database with an unsupported type fails with the
// expected error.
testName := "create with unsupported database type"
dbType := "unsupported"
_, err := database.Create(dbType)
if !checkDbError(t, testName, err, database.ErrDbUnknownType) {
return
}
// Ensure opening a database with the an unsupported type fails with the
// expected error.
testName = "open with unsupported database type"
_, err = database.Open(dbType)
if !checkDbError(t, testName, err, database.ErrDbUnknownType) {
return
}
}

View File

@@ -1,211 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database
import (
"fmt"
"github.com/pkg/errors"
)
// ErrorCode identifies a kind of error.
type ErrorCode int
// These constants are used to identify a specific database Error.
const (
// **************************************
// Errors related to driver registration.
// **************************************
// ErrDbTypeRegistered indicates two different database drivers
// attempt to register with the name database type.
ErrDbTypeRegistered ErrorCode = iota
// *************************************
// Errors related to database functions.
// *************************************
// ErrDbUnknownType indicates there is no driver registered for
// the specified database type.
ErrDbUnknownType
// ErrDbDoesNotExist indicates open is called for a database that
// does not exist.
ErrDbDoesNotExist
// ErrDbExists indicates create is called for a database that
// already exists.
ErrDbExists
// ErrDbNotOpen indicates a database instance is accessed before
// it is opened or after it is closed.
ErrDbNotOpen
// ErrDbAlreadyOpen indicates open was called on a database that
// is already open.
ErrDbAlreadyOpen
// ErrInvalid indicates the specified database is not valid.
ErrInvalid
// ErrCorruption indicates a checksum failure occurred which invariably
// means the database is corrupt.
ErrCorruption
// ****************************************
// Errors related to database transactions.
// ****************************************
// ErrTxClosed indicates an attempt was made to commit or rollback a
// transaction that has already had one of those operations performed.
ErrTxClosed
// ErrTxNotWritable indicates an operation that requires write access to
// the database was attempted against a read-only transaction.
ErrTxNotWritable
// **************************************
// Errors related to metadata operations.
// **************************************
// ErrBucketNotFound indicates an attempt to access a bucket that has
// not been created yet.
ErrBucketNotFound
// ErrBucketExists indicates an attempt to create a bucket that already
// exists.
ErrBucketExists
// ErrBucketNameRequired indicates an attempt to create a bucket with a
// blank name.
ErrBucketNameRequired
// ErrKeyRequired indicates at attempt to insert a zero-length key.
ErrKeyRequired
// ErrKeyTooLarge indicates an attmempt to insert a key that is larger
// than the max allowed key size. The max key size depends on the
// specific backend driver being used. As a general rule, key sizes
// should be relatively, so this should rarely be an issue.
ErrKeyTooLarge
// ErrValueTooLarge indicates an attmpt to insert a value that is larger
// than max allowed value size. The max key size depends on the
// specific backend driver being used.
ErrValueTooLarge
// ErrIncompatibleValue indicates the value in question is invalid for
// the specific requested operation. For example, trying create or
// delete a bucket with an existing non-bucket key, attempting to create
// or delete a non-bucket key with an existing bucket key, or trying to
// delete a value via a cursor when it points to a nested bucket.
ErrIncompatibleValue
// ***************************************
// Errors related to block I/O operations.
// ***************************************
// ErrBlockNotFound indicates a block with the provided hash does not
// exist in the database.
ErrBlockNotFound
// ErrBlockExists indicates a block with the provided hash already
// exists in the database.
ErrBlockExists
// ErrBlockRegionInvalid indicates a region that exceeds the bounds of
// the specified block was requested. When the hash provided by the
// region does not correspond to an existing block, the error will be
// ErrBlockNotFound instead.
ErrBlockRegionInvalid
// ***********************************
// Support for driver-specific errors.
// ***********************************
// ErrDriverSpecific indicates the Err field is a driver-specific error.
// This provides a mechanism for drivers to plug-in their own custom
// errors for any situations which aren't already covered by the error
// codes provided by this package.
ErrDriverSpecific
// numErrorCodes is the maximum error code number used in tests.
numErrorCodes
)
// Map of ErrorCode values back to their constant names for pretty printing.
var errorCodeStrings = map[ErrorCode]string{
ErrDbTypeRegistered: "ErrDbTypeRegistered",
ErrDbUnknownType: "ErrDbUnknownType",
ErrDbDoesNotExist: "ErrDbDoesNotExist",
ErrDbExists: "ErrDbExists",
ErrDbNotOpen: "ErrDbNotOpen",
ErrDbAlreadyOpen: "ErrDbAlreadyOpen",
ErrInvalid: "ErrInvalid",
ErrCorruption: "ErrCorruption",
ErrTxClosed: "ErrTxClosed",
ErrTxNotWritable: "ErrTxNotWritable",
ErrBucketNotFound: "ErrBucketNotFound",
ErrBucketExists: "ErrBucketExists",
ErrBucketNameRequired: "ErrBucketNameRequired",
ErrKeyRequired: "ErrKeyRequired",
ErrKeyTooLarge: "ErrKeyTooLarge",
ErrValueTooLarge: "ErrValueTooLarge",
ErrIncompatibleValue: "ErrIncompatibleValue",
ErrBlockNotFound: "ErrBlockNotFound",
ErrBlockExists: "ErrBlockExists",
ErrBlockRegionInvalid: "ErrBlockRegionInvalid",
ErrDriverSpecific: "ErrDriverSpecific",
}
// String returns the ErrorCode as a human-readable name.
func (e ErrorCode) String() string {
if s := errorCodeStrings[e]; s != "" {
return s
}
return fmt.Sprintf("Unknown ErrorCode (%d)", int(e))
}
// Error provides a single type for errors that can happen during database
// operation. It is used to indicate several types of failures including errors
// with caller requests such as specifying invalid block regions or attempting
// to access data against closed database transactions, driver errors, errors
// retrieving data, and errors communicating with database servers.
//
// The caller can use type assertions to determine if an error is an Error and
// access the ErrorCode field to ascertain the specific reason for the failure.
//
// The ErrDriverSpecific error code will also have the Err field set with the
// underlying error. Depending on the backend driver, the Err field might be
// set to the underlying error for other error codes as well.
type Error struct {
ErrorCode ErrorCode // Describes the kind of error
Description string // Human readable description of the issue
Err error // Underlying error
}
// Error satisfies the error interface and prints human-readable errors.
func (e Error) Error() string {
if e.Err != nil {
return e.Description + ": " + e.Err.Error()
}
return e.Description
}
// makeError creates an Error given a set of arguments. The error code must
// be one of the error codes provided by this package.
func makeError(c ErrorCode, desc string, err error) Error {
return Error{ErrorCode: c, Description: desc, Err: err}
}
// IsErrorCode returns whether or not the provided error is a script error with
// the provided error code.
func IsErrorCode(err error, c ErrorCode) bool {
var errError Error
if ok := errors.As(err, &errError); ok {
return errError.ErrorCode == c
}
return false
}

View File

@@ -1,118 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database
import (
"github.com/pkg/errors"
"testing"
)
// TestErrorCodeStringer tests the stringized output for the ErrorCode type.
func TestErrorCodeStringer(t *testing.T) {
tests := []struct {
in ErrorCode
want string
}{
{ErrDbTypeRegistered, "ErrDbTypeRegistered"},
{ErrDbUnknownType, "ErrDbUnknownType"},
{ErrDbDoesNotExist, "ErrDbDoesNotExist"},
{ErrDbExists, "ErrDbExists"},
{ErrDbNotOpen, "ErrDbNotOpen"},
{ErrDbAlreadyOpen, "ErrDbAlreadyOpen"},
{ErrInvalid, "ErrInvalid"},
{ErrCorruption, "ErrCorruption"},
{ErrTxClosed, "ErrTxClosed"},
{ErrTxNotWritable, "ErrTxNotWritable"},
{ErrBucketNotFound, "ErrBucketNotFound"},
{ErrBucketExists, "ErrBucketExists"},
{ErrBucketNameRequired, "ErrBucketNameRequired"},
{ErrKeyRequired, "ErrKeyRequired"},
{ErrKeyTooLarge, "ErrKeyTooLarge"},
{ErrValueTooLarge, "ErrValueTooLarge"},
{ErrIncompatibleValue, "ErrIncompatibleValue"},
{ErrBlockNotFound, "ErrBlockNotFound"},
{ErrBlockExists, "ErrBlockExists"},
{ErrBlockRegionInvalid, "ErrBlockRegionInvalid"},
{ErrDriverSpecific, "ErrDriverSpecific"},
{0xffff, "Unknown ErrorCode (65535)"},
}
// Detect additional error codes that don't have the stringer added.
if len(tests)-1 != int(TstNumErrorCodes) {
t.Errorf("It appears an error code was added without adding " +
"an associated stringer test")
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.String()
if result != test.want {
t.Errorf("String #%d\ngot: %s\nwant: %s", i, result,
test.want)
continue
}
}
}
// TestError tests the error output for the Error type.
func TestError(t *testing.T) {
t.Parallel()
tests := []struct {
in Error
want string
}{
{
Error{Description: "some error"},
"some error",
},
{
Error{Description: "human-readable error"},
"human-readable error",
},
{
Error{
ErrorCode: ErrDriverSpecific,
Description: "some error",
Err: errors.New("driver-specific error"),
},
"some error: driver-specific error",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.Error()
if result != test.want {
t.Errorf("Error #%d\n got: %s want: %s", i, result,
test.want)
continue
}
}
}
func TestIsErrorCode(t *testing.T) {
dummyError := errors.New("")
tests := []struct {
err error
code ErrorCode
expectedResult bool
}{
{makeError(ErrBucketExists, "", dummyError), ErrBucketExists, true},
{makeError(ErrBucketExists, "", dummyError), ErrBlockExists, false},
{dummyError, ErrBlockExists, false},
{nil, ErrBlockExists, false},
}
for i, test := range tests {
actualResult := IsErrorCode(test.err, test.code)
if test.expectedResult != actualResult {
t.Errorf("TestIsErrorCode: %d: Expected: %t, but got: %t",
i, test.expectedResult, actualResult)
}
}
}

12
database/errors.go Normal file
View File

@@ -0,0 +1,12 @@
package database
import "errors"
// ErrNotFound denotes that the requested item was not
// found in the database.
var ErrNotFound = errors.New("not found")
// IsNotFoundError checks whether an error is an ErrNotFound.
func IsNotFoundError(err error) bool {
return errors.Is(err, ErrNotFound)
}

View File

@@ -1,180 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database_test
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"os"
"path/filepath"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
)
// This example demonstrates creating a new database.
func ExampleCreate() {
// This example assumes the ffldb driver is imported.
//
// import (
// "github.com/kaspanet/kaspad/database"
// _ "github.com/kaspanet/kaspad/database/ffldb"
// )
// Create a database and schedule it to be closed and removed on exit.
// Typically you wouldn't want to remove the database right away like
// this, nor put it in the temp directory, but it's done here to ensure
// the example cleans up after itself.
dbPath := filepath.Join(os.TempDir(), "examplecreate")
db, err := database.Create("ffldb", dbPath, wire.Mainnet)
if err != nil {
fmt.Println(err)
return
}
defer os.RemoveAll(dbPath)
defer db.Close()
// Output:
}
// This example demonstrates creating a new database and using a managed
// read-write transaction to store and retrieve metadata.
func Example_basicUsage() {
// This example assumes the ffldb driver is imported.
//
// import (
// "github.com/kaspanet/kaspad/database"
// _ "github.com/kaspanet/kaspad/database/ffldb"
// )
// Create a database and schedule it to be closed and removed on exit.
// Typically you wouldn't want to remove the database right away like
// this, nor put it in the temp directory, but it's done here to ensure
// the example cleans up after itself.
dbPath := filepath.Join(os.TempDir(), "exampleusage")
// ensure that DB does not exist before test starts
os.RemoveAll(dbPath)
db, err := database.Create("ffldb", dbPath, wire.Mainnet)
if err != nil {
fmt.Println(err)
return
}
defer os.RemoveAll(dbPath)
defer db.Close()
// Use the Update function of the database to perform a managed
// read-write transaction. The transaction will automatically be rolled
// back if the supplied inner function returns a non-nil error.
err = db.Update(func(dbTx database.Tx) error {
// Store a key/value pair directly in the metadata bucket.
// Typically a nested bucket would be used for a given feature,
// but this example is using the metadata bucket directly for
// simplicity.
key := []byte("mykey")
value := []byte("myvalue")
if err := dbTx.Metadata().Put(key, value); err != nil {
return err
}
// Read the key back and ensure it matches.
if !bytes.Equal(dbTx.Metadata().Get(key), value) {
return errors.Errorf("unexpected value for key '%s'", key)
}
// Create a new nested bucket under the metadata bucket.
nestedBucketKey := []byte("mybucket")
nestedBucket, err := dbTx.Metadata().CreateBucket(nestedBucketKey)
if err != nil {
return err
}
// The key from above that was set in the metadata bucket does
// not exist in this new nested bucket.
if nestedBucket.Get(key) != nil {
return errors.Errorf("key '%s' is not expected nil", key)
}
return nil
})
if err != nil {
fmt.Println(err)
return
}
// Output:
}
// This example demonstrates creating a new database, using a managed read-write
// transaction to store a block, and using a managed read-only transaction to
// fetch the block.
func Example_blockStorageAndRetrieval() {
// This example assumes the ffldb driver is imported.
//
// import (
// "github.com/kaspanet/kaspad/database"
// _ "github.com/kaspanet/kaspad/database/ffldb"
// )
// Create a database and schedule it to be closed and removed on exit.
// Typically you wouldn't want to remove the database right away like
// this, nor put it in the temp directory, but it's done here to ensure
// the example cleans up after itself.
dbPath := filepath.Join(os.TempDir(), "exampleblkstorage")
db, err := database.Create("ffldb", dbPath, wire.Mainnet)
if err != nil {
fmt.Println(err)
return
}
defer os.RemoveAll(dbPath)
defer db.Close()
// Use the Update function of the database to perform a managed
// read-write transaction and store a genesis block in the database as
// and example.
err = db.Update(func(dbTx database.Tx) error {
genesisBlock := dagconfig.MainnetParams.GenesisBlock
return dbTx.StoreBlock(util.NewBlock(genesisBlock))
})
if err != nil {
fmt.Println(err)
return
}
// Use the View function of the database to perform a managed read-only
// transaction and fetch the block stored above.
var loadedBlockBytes []byte
err = db.Update(func(dbTx database.Tx) error {
genesisHash := dagconfig.MainnetParams.GenesisHash
blockBytes, err := dbTx.FetchBlock(genesisHash)
if err != nil {
return err
}
// As documented, all data fetched from the database is only
// valid during a database transaction in order to support
// zero-copy backends. Thus, make a copy of the data so it
// can be used outside of the transaction.
loadedBlockBytes = make([]byte, len(blockBytes))
copy(loadedBlockBytes, blockBytes)
return nil
})
if err != nil {
fmt.Println(err)
return
}
// Typically at this point, the block could be deserialized via the
// wire.MsgBlock.Deserialize function or used in its serialized form
// depending on need. However, for this example, just display the
// number of serialized bytes to show it was loaded as expected.
fmt.Printf("Serialized block size: %d bytes\n", len(loadedBlockBytes))
// Output:
// Serialized block size: 280 bytes
}

View File

@@ -1,17 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
This test file is part of the database package rather than than the
database_test package so it can bridge access to the internals to properly test
cases which are either not possible or can't reliably be tested via the public
interface. The functions, constants, and variables are only exported while the
tests are being run.
*/
package database
// TstNumErrorCodes makes the internal numErrorCodes parameter available to the
// test package.
const TstNumErrorCodes = numErrorCodes

View File

@@ -1,34 +0,0 @@
ffldb
=====
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://godoc.org/github.com/kaspanet/kaspad/database/ffldb?status.png)](http://godoc.org/github.com/kaspanet/kaspad/database/ffldb)
=======
Package ffldb implements a driver for the database package that uses leveldb for
the backing metadata and flat files for block storage.
This driver is the recommended driver for use with kaspad. It makes use of leveldb
for the metadata, flat files for block storage, and checksums in key areas to
ensure data integrity.
## Usage
This package is a driver to the database package and provides the database type
of "ffldb". The parameters the Open and Create functions take are the
database path as a string and the block network.
```Go
db, err := database.Open("ffldb", "path/to/database", wire.Mainnet)
if err != nil {
// Handle error
}
```
```Go
db, err := database.Create("ffldb", "path/to/database", wire.Mainnet)
if err != nil {
// Handle error
}
```

View File

@@ -1,97 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ffldb
import (
"os"
"path/filepath"
"testing"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
)
// BenchmarkBlockHeader benchmarks how long it takes to load the mainnet genesis
// block header.
func BenchmarkBlockHeader(b *testing.B) {
// Start by creating a new database and populating it with the mainnet
// genesis block.
dbPath := filepath.Join(os.TempDir(), "ffldb-benchblkhdr")
_ = os.RemoveAll(dbPath)
db, err := database.Create("ffldb", dbPath, blockDataNet)
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(dbPath)
defer db.Close()
err = db.Update(func(dbTx database.Tx) error {
block := util.NewBlock(dagconfig.MainnetParams.GenesisBlock)
return dbTx.StoreBlock(block)
})
if err != nil {
b.Fatal(err)
}
b.ReportAllocs()
b.ResetTimer()
err = db.View(func(dbTx database.Tx) error {
blockHash := dagconfig.MainnetParams.GenesisHash
for i := 0; i < b.N; i++ {
_, err := dbTx.FetchBlockHeader(blockHash)
if err != nil {
return err
}
}
return nil
})
if err != nil {
b.Fatal(err)
}
// Don't benchmark teardown.
b.StopTimer()
}
// BenchmarkBlockHeader benchmarks how long it takes to load the mainnet genesis
// block.
func BenchmarkBlock(b *testing.B) {
// Start by creating a new database and populating it with the mainnet
// genesis block.
dbPath := filepath.Join(os.TempDir(), "ffldb-benchblk")
_ = os.RemoveAll(dbPath)
db, err := database.Create("ffldb", dbPath, blockDataNet)
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(dbPath)
defer db.Close()
err = db.Update(func(dbTx database.Tx) error {
block := util.NewBlock(dagconfig.MainnetParams.GenesisBlock)
return dbTx.StoreBlock(block)
})
if err != nil {
b.Fatal(err)
}
b.ReportAllocs()
b.ResetTimer()
err = db.View(func(dbTx database.Tx) error {
blockHash := dagconfig.MainnetParams.GenesisHash
for i := 0; i < b.N; i++ {
_, err := dbTx.FetchBlock(blockHash)
if err != nil {
return err
}
}
return nil
})
if err != nil {
b.Fatal(err)
}
// Don't benchmark teardown.
b.StopTimer()
}

View File

@@ -1,765 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// This file contains the implementation functions for reading, writing, and
// otherwise working with the flat files that house the actual blocks.
package ffldb
import (
"container/list"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
"hash/crc32"
"io"
"os"
"path/filepath"
"sync"
"syscall"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const (
// maxOpenFiles is the max number of open files to maintain in the
// open blocks cache. Note that this does not include the current
// write file, so there will typically be one more than this value open.
maxOpenFiles = 25
// maxBlockFileSize is the maximum size for each file used to store
// blocks.
//
// NOTE: The current code uses uint32 for all offsets, so this value
// must be less than 2^32 (4 GiB). This is also why it's a typed
// constant.
maxBlockFileSize uint32 = 512 * 1024 * 1024 // 512 MiB
)
var (
// castagnoli houses the Catagnoli polynomial used for CRC-32 checksums.
castagnoli = crc32.MakeTable(crc32.Castagnoli)
)
// filer is an interface which acts very similar to a *os.File and is typically
// implemented by it. It exists so the test code can provide mock files for
// properly testing corruption and file system issues.
type filer interface {
io.Closer
io.WriterAt
io.ReaderAt
Truncate(size int64) error
Sync() error
}
// lockableFile represents a block file on disk that has been opened for either
// read or read/write access. It also contains a read-write mutex to support
// multiple concurrent readers.
type lockableFile struct {
sync.RWMutex
file filer
}
// writeCursor represents the current file and offset of the block file on disk
// for performing all writes. It also contains a read-write mutex to support
// multiple concurrent readers which can reuse the file handle.
type writeCursor struct {
sync.RWMutex
// curFile is the current block file that will be appended to when
// writing new blocks.
curFile *lockableFile
// curFileNum is the current block file number and is used to allow
// readers to use the same open file handle.
curFileNum uint32
// curOffset is the offset in the current write block file where the
// next new block will be written.
curOffset uint32
}
// blockStore houses information used to handle reading and writing blocks (and
// part of blocks) into flat files with support for multiple concurrent readers.
type blockStore struct {
// network is the specific network to use in the flat files for each
// block.
network wire.KaspaNet
// basePath is the base path used for the flat block files and metadata.
basePath string
// maxBlockFileSize is the maximum size for each file used to store
// blocks. It is defined on the store so the whitebox tests can
// override the value.
maxBlockFileSize uint32
// maxOpenFiles is the max number of open files to maintain in the
// open blocks cache. Note that this does not include the current
// write file, so there will typically be one more than this value open.
// It is defined on the store so the whitebox tests can override the value.
maxOpenFiles int
// The following fields are related to the flat files which hold the
// actual blocks. The number of open files is limited by maxOpenFiles.
//
// obfMutex protects concurrent access to the openBlockFiles map. It is
// a RWMutex so multiple readers can simultaneously access open files.
//
// openBlockFiles houses the open file handles for existing block files
// which have been opened read-only along with an individual RWMutex.
// This scheme allows multiple concurrent readers to the same file while
// preventing the file from being closed out from under them.
//
// lruMutex protects concurrent access to the least recently used list
// and lookup map.
//
// openBlocksLRU tracks how the open files are refenced by pushing the
// most recently used files to the front of the list thereby trickling
// the least recently used files to end of the list. When a file needs
// to be closed due to exceeding the the max number of allowed open
// files, the one at the end of the list is closed.
//
// fileNumToLRUElem is a mapping between a specific block file number
// and the associated list element on the least recently used list.
//
// Thus, with the combination of these fields, the database supports
// concurrent non-blocking reads across multiple and individual files
// along with intelligently limiting the number of open file handles by
// closing the least recently used files as needed.
//
// NOTE: The locking order used throughout is well-defined and MUST be
// followed. Failure to do so could lead to deadlocks. In particular,
// the locking order is as follows:
// 1) obfMutex
// 2) lruMutex
// 3) writeCursor mutex
// 4) specific file mutexes
//
// None of the mutexes are required to be locked at the same time, and
// often aren't. However, if they are to be locked simultaneously, they
// MUST be locked in the order previously specified.
//
// Due to the high performance and multi-read concurrency requirements,
// write locks should only be held for the minimum time necessary.
obfMutex sync.RWMutex
lruMutex sync.Mutex
openBlocksLRU *list.List // Contains uint32 block file numbers.
fileNumToLRUElem map[uint32]*list.Element
openBlockFiles map[uint32]*lockableFile
// writeCursor houses the state for the current file and location that
// new blocks are written to.
writeCursor *writeCursor
// These functions are set to openFile, openWriteFile, and deleteFile by
// default, but are exposed here to allow the whitebox tests to replace
// them when working with mock files.
openFileFunc func(fileNum uint32) (*lockableFile, error)
openWriteFileFunc func(fileNum uint32) (filer, error)
deleteFileFunc func(fileNum uint32) error
}
// blockLocation identifies a particular block file and location.
type blockLocation struct {
blockFileNum uint32
fileOffset uint32
blockLen uint32
}
// deserializeBlockLoc deserializes the passed serialized block location
// information. This is data stored into the block index metadata for each
// block. The serialized data passed to this function MUST be at least
// blockLocSize bytes or it will panic. The error check is avoided here because
// this information will always be coming from the block index which includes a
// checksum to detect corruption. Thus it is safe to use this unchecked here.
func deserializeBlockLoc(serializedLoc []byte) blockLocation {
// The serialized block location format is:
//
// [0:4] Block file (4 bytes)
// [4:8] File offset (4 bytes)
// [8:12] Block length (4 bytes)
return blockLocation{
blockFileNum: byteOrder.Uint32(serializedLoc[0:4]),
fileOffset: byteOrder.Uint32(serializedLoc[4:8]),
blockLen: byteOrder.Uint32(serializedLoc[8:12]),
}
}
// serializeBlockLoc returns the serialization of the passed block location.
// This is data to be stored into the block index metadata for each block.
func serializeBlockLoc(loc blockLocation) []byte {
// The serialized block location format is:
//
// [0:4] Block file (4 bytes)
// [4:8] File offset (4 bytes)
// [8:12] Block length (4 bytes)
var serializedData [12]byte
byteOrder.PutUint32(serializedData[0:4], loc.blockFileNum)
byteOrder.PutUint32(serializedData[4:8], loc.fileOffset)
byteOrder.PutUint32(serializedData[8:12], loc.blockLen)
return serializedData[:]
}
// blockFilePath return the file path for the provided block file number.
func blockFilePath(dbPath string, fileNum uint32) string {
// Choose 9 digits of precision for the filenames. 9 digits provide
// 10^9 files @ 512MiB each a total of ~476.84PiB.
fileName := fmt.Sprintf("%09d.fdb", fileNum)
return filepath.Join(dbPath, fileName)
}
// openWriteFile returns a file handle for the passed flat file number in
// read/write mode. The file will be created if needed. It is typically used
// for the current file that will have all new data appended. Unlike openFile,
// this function does not keep track of the open file and it is not subject to
// the maxOpenFiles limit.
func (s *blockStore) openWriteFile(fileNum uint32) (filer, error) {
// The current block file needs to be read-write so it is possible to
// append to it. Also, it shouldn't be part of the least recently used
// file.
filePath := blockFilePath(s.basePath, fileNum)
file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0666)
if err != nil {
str := fmt.Sprintf("failed to open file %q: %s", filePath, err)
return nil, makeDbErr(database.ErrDriverSpecific, str, err)
}
return file, nil
}
// openFile returns a read-only file handle for the passed flat file number.
// The function also keeps track of the open files, performs least recently
// used tracking, and limits the number of open files to maxOpenFiles by closing
// the least recently used file as needed.
//
// This function MUST be called with the overall files mutex (s.obfMutex) locked
// for WRITES.
func (s *blockStore) openFile(fileNum uint32) (*lockableFile, error) {
// Open the appropriate file as read-only.
filePath := blockFilePath(s.basePath, fileNum)
file, err := os.Open(filePath)
if err != nil {
return nil, makeDbErr(database.ErrDriverSpecific, err.Error(),
err)
}
blockFile := &lockableFile{file: file}
// Close the least recently used file if the file exceeds the max
// allowed open files. This is not done until after the file open in
// case the file fails to open, there is no need to close any files.
//
// A write lock is required on the LRU list here to protect against
// modifications happening as already open files are read from and
// shuffled to the front of the list.
//
// Also, add the file that was just opened to the front of the least
// recently used list to indicate it is the most recently used file and
// therefore should be closed last.
s.lruMutex.Lock()
lruList := s.openBlocksLRU
if lruList.Len() >= s.maxOpenFiles {
lruFileNum := lruList.Remove(lruList.Back()).(uint32)
oldBlockFile := s.openBlockFiles[lruFileNum]
// Close the old file under the write lock for the file in case
// any readers are currently reading from it so it's not closed
// out from under them.
oldBlockFile.Lock()
_ = oldBlockFile.file.Close()
oldBlockFile.Unlock()
delete(s.openBlockFiles, lruFileNum)
delete(s.fileNumToLRUElem, lruFileNum)
}
s.fileNumToLRUElem[fileNum] = lruList.PushFront(fileNum)
s.lruMutex.Unlock()
// Store a reference to it in the open block files map.
s.openBlockFiles[fileNum] = blockFile
return blockFile, nil
}
// deleteFile removes the block file for the passed flat file number. The file
// must already be closed and it is the responsibility of the caller to do any
// other state cleanup necessary.
func (s *blockStore) deleteFile(fileNum uint32) error {
filePath := blockFilePath(s.basePath, fileNum)
if err := os.Remove(filePath); err != nil {
return makeDbErr(database.ErrDriverSpecific, err.Error(), err)
}
return nil
}
// blockFile attempts to return an existing file handle for the passed flat file
// number if it is already open as well as marking it as most recently used. It
// will also open the file when it's not already open subject to the rules
// described in openFile.
//
// NOTE: The returned block file will already have the read lock acquired and
// the caller MUST call .RUnlock() to release it once it has finished all read
// operations. This is necessary because otherwise it would be possible for a
// separate goroutine to close the file after it is returned from here, but
// before the caller has acquired a read lock.
func (s *blockStore) blockFile(fileNum uint32) (*lockableFile, error) {
// When the requested block file is open for writes, return it.
wc := s.writeCursor
wc.RLock()
if fileNum == wc.curFileNum && wc.curFile.file != nil {
obf := wc.curFile
obf.RLock()
wc.RUnlock()
return obf, nil
}
wc.RUnlock()
// Try to return an open file under the overall files read lock.
s.obfMutex.RLock()
if obf, ok := s.openBlockFiles[fileNum]; ok {
s.lruMutex.Lock()
s.openBlocksLRU.MoveToFront(s.fileNumToLRUElem[fileNum])
s.lruMutex.Unlock()
obf.RLock()
s.obfMutex.RUnlock()
return obf, nil
}
s.obfMutex.RUnlock()
// Since the file isn't open already, need to check the open block files
// map again under write lock in case multiple readers got here and a
// separate one is already opening the file.
s.obfMutex.Lock()
if obf, ok := s.openBlockFiles[fileNum]; ok {
obf.RLock()
s.obfMutex.Unlock()
return obf, nil
}
// The file isn't open, so open it while potentially closing the least
// recently used one as needed.
obf, err := s.openFileFunc(fileNum)
if err != nil {
s.obfMutex.Unlock()
return nil, err
}
obf.RLock()
s.obfMutex.Unlock()
return obf, nil
}
// writeData is a helper function for writeBlock which writes the provided data
// at the current write offset and updates the write cursor accordingly. The
// field name parameter is only used when there is an error to provide a nicer
// error message.
//
// The write cursor will be advanced the number of bytes actually written in the
// event of failure.
//
// NOTE: This function MUST be called with the write cursor current file lock
// held and must only be called during a write transaction so it is effectively
// locked for writes. Also, the write cursor current file must NOT be nil.
func (s *blockStore) writeData(data []byte, fieldName string) error {
wc := s.writeCursor
n, err := wc.curFile.file.WriteAt(data, int64(wc.curOffset))
wc.curOffset += uint32(n)
if err != nil {
var pathErr *os.PathError
if ok := errors.As(err, &pathErr); ok && pathErr.Err == syscall.ENOSPC {
log.Errorf("No space left on the hard disk, exiting...")
os.Exit(1)
}
str := fmt.Sprintf("failed to write %s to file %d at "+
"offset %d: %s", fieldName, wc.curFileNum,
wc.curOffset-uint32(n), err)
return makeDbErr(database.ErrDriverSpecific, str, err)
}
return nil
}
// writeBlock appends the specified raw block bytes to the store's write cursor
// location and increments it accordingly. When the block would exceed the max
// file size for the current flat file, this function will close the current
// file, create the next file, update the write cursor, and write the block to
// the new file.
//
// The write cursor will also be advanced the number of bytes actually written
// in the event of failure.
//
// Format: <network><block length><serialized block><checksum>
func (s *blockStore) writeBlock(rawBlock []byte) (blockLocation, error) {
// Compute how many bytes will be written.
// 4 bytes each for block network + 4 bytes for block length +
// length of raw block + 4 bytes for checksum.
blockLen := uint32(len(rawBlock))
fullLen := blockLen + 12
// Move to the next block file if adding the new block would exceed the
// max allowed size for the current block file. Also detect overflow
// to be paranoid, even though it isn't possible currently, numbers
// might change in the future to make it possible.
//
// NOTE: The writeCursor.offset field isn't protected by the mutex
// since it's only read/changed during this function which can only be
// called during a write transaction, of which there can be only one at
// a time.
wc := s.writeCursor
finalOffset := wc.curOffset + fullLen
if finalOffset < wc.curOffset || finalOffset > s.maxBlockFileSize {
// This is done under the write cursor lock since the curFileNum
// field is accessed elsewhere by readers.
//
// Close the current write file to force a read-only reopen
// with LRU tracking. The close is done under the write lock
// for the file to prevent it from being closed out from under
// any readers currently reading from it.
wc.Lock()
wc.curFile.Lock()
if wc.curFile.file != nil {
_ = wc.curFile.file.Close()
wc.curFile.file = nil
}
wc.curFile.Unlock()
// Start writes into next file.
wc.curFileNum++
wc.curOffset = 0
wc.Unlock()
}
// All writes are done under the write lock for the file to ensure any
// readers are finished and blocked first.
wc.curFile.Lock()
defer wc.curFile.Unlock()
// Open the current file if needed. This will typically only be the
// case when moving to the next file to write to or on initial database
// load. However, it might also be the case if rollbacks happened after
// file writes started during a transaction commit.
if wc.curFile.file == nil {
file, err := s.openWriteFileFunc(wc.curFileNum)
if err != nil {
return blockLocation{}, err
}
wc.curFile.file = file
}
// Kaspa network.
origOffset := wc.curOffset
hasher := crc32.New(castagnoli)
var scratch [4]byte
byteOrder.PutUint32(scratch[:], uint32(s.network))
if err := s.writeData(scratch[:], "network"); err != nil {
return blockLocation{}, err
}
_, _ = hasher.Write(scratch[:])
// Block length.
byteOrder.PutUint32(scratch[:], blockLen)
if err := s.writeData(scratch[:], "block length"); err != nil {
return blockLocation{}, err
}
_, _ = hasher.Write(scratch[:])
// Serialized block.
if err := s.writeData(rawBlock[:], "block"); err != nil {
return blockLocation{}, err
}
_, _ = hasher.Write(rawBlock)
// Castagnoli CRC-32 as a checksum of all the previous.
if err := s.writeData(hasher.Sum(nil), "checksum"); err != nil {
return blockLocation{}, err
}
loc := blockLocation{
blockFileNum: wc.curFileNum,
fileOffset: origOffset,
blockLen: fullLen,
}
return loc, nil
}
// readBlock reads the specified block record and returns the serialized block.
// It ensures the integrity of the block data by checking that the serialized
// network matches the current network associated with the block store and
// comparing the calculated checksum against the one stored in the flat file.
// This function also automatically handles all file management such as opening
// and closing files as necessary to stay within the maximum allowed open files
// limit.
//
// Returns ErrDriverSpecific if the data fails to read for any reason and
// ErrCorruption if the checksum of the read data doesn't match the checksum
// read from the file.
//
// Format: <network><block length><serialized block><checksum>
func (s *blockStore) readBlock(hash *daghash.Hash, loc blockLocation) ([]byte, error) {
// Get the referenced block file handle opening the file as needed. The
// function also handles closing files as needed to avoid going over the
// max allowed open files.
blockFile, err := s.blockFile(loc.blockFileNum)
if err != nil {
return nil, err
}
serializedData := make([]byte, loc.blockLen)
n, err := blockFile.file.ReadAt(serializedData, int64(loc.fileOffset))
blockFile.RUnlock()
if err != nil {
str := fmt.Sprintf("failed to read block %s from file %d, "+
"offset %d: %s", hash, loc.blockFileNum, loc.fileOffset,
err)
return nil, makeDbErr(database.ErrDriverSpecific, str, err)
}
// Calculate the checksum of the read data and ensure it matches the
// serialized checksum. This will detect any data corruption in the
// flat file without having to do much more expensive merkle root
// calculations on the loaded block.
serializedChecksum := binary.BigEndian.Uint32(serializedData[n-4:])
calculatedChecksum := crc32.Checksum(serializedData[:n-4], castagnoli)
if serializedChecksum != calculatedChecksum {
str := fmt.Sprintf("block data for block %s checksum "+
"does not match - got %x, want %x", hash,
calculatedChecksum, serializedChecksum)
return nil, makeDbErr(database.ErrCorruption, str, nil)
}
// The network associated with the block must match the current active
// network, otherwise somebody probably put the block files for the
// wrong network in the directory.
serializedNet := byteOrder.Uint32(serializedData[:4])
if serializedNet != uint32(s.network) {
str := fmt.Sprintf("block data for block %s is for the "+
"wrong network - got %d, want %d", hash, serializedNet,
uint32(s.network))
return nil, makeDbErr(database.ErrDriverSpecific, str, nil)
}
// The raw block excludes the network, length of the block, and
// checksum.
return serializedData[8 : n-4], nil
}
// readBlockRegion reads the specified amount of data at the provided offset for
// a given block location. The offset is relative to the start of the
// serialized block (as opposed to the beginning of the block record). This
// function automatically handles all file management such as opening and
// closing files as necessary to stay within the maximum allowed open files
// limit.
//
// Returns ErrDriverSpecific if the data fails to read for any reason.
func (s *blockStore) readBlockRegion(loc blockLocation, offset, numBytes uint32) ([]byte, error) {
// Get the referenced block file handle opening the file as needed. The
// function also handles closing files as needed to avoid going over the
// max allowed open files.
blockFile, err := s.blockFile(loc.blockFileNum)
if err != nil {
return nil, err
}
// Regions are offsets into the actual block, however the serialized
// data for a block includes an initial 4 bytes for network + 4 bytes
// for block length. Thus, add 8 bytes to adjust.
readOffset := loc.fileOffset + 8 + offset
serializedData := make([]byte, numBytes)
_, err = blockFile.file.ReadAt(serializedData, int64(readOffset))
blockFile.RUnlock()
if err != nil {
str := fmt.Sprintf("failed to read region from block file %d, "+
"offset %d, len %d: %s", loc.blockFileNum, readOffset,
numBytes, err)
return nil, makeDbErr(database.ErrDriverSpecific, str, err)
}
return serializedData, nil
}
// syncBlocks performs a file system sync on the flat file associated with the
// store's current write cursor. It is safe to call even when there is not a
// current write file in which case it will have no effect.
//
// This is used when flushing cached metadata updates to disk to ensure all the
// block data is fully written before updating the metadata. This ensures the
// metadata and block data can be properly reconciled in failure scenarios.
func (s *blockStore) syncBlocks() error {
wc := s.writeCursor
wc.RLock()
defer wc.RUnlock()
// Nothing to do if there is no current file associated with the write
// cursor.
wc.curFile.RLock()
defer wc.curFile.RUnlock()
if wc.curFile.file == nil {
return nil
}
// Sync the file to disk.
if err := wc.curFile.file.Sync(); err != nil {
str := fmt.Sprintf("failed to sync file %d: %s", wc.curFileNum,
err)
return makeDbErr(database.ErrDriverSpecific, str, err)
}
return nil
}
// handleRollback rolls the block files on disk back to the provided file number
// and offset. This involves potentially deleting and truncating the files that
// were partially written.
//
// There are effectively two scenarios to consider here:
// 1) Transient write failures from which recovery is possible
// 2) More permanent failures such as hard disk death and/or removal
//
// In either case, the write cursor will be repositioned to the old block file
// offset regardless of any other errors that occur while attempting to undo
// writes.
//
// For the first scenario, this will lead to any data which failed to be undone
// being overwritten and thus behaves as desired as the system continues to run.
//
// For the second scenario, the metadata which stores the current write cursor
// position within the block files will not have been updated yet and thus if
// the system eventually recovers (perhaps the hard drive is reconnected), it
// will also lead to any data which failed to be undone being overwritten and
// thus behaves as desired.
//
// Therefore, any errors are simply logged at a warning level rather than being
// returned since there is nothing more that could be done about it anyways.
func (s *blockStore) handleRollback(oldBlockFileNum, oldBlockOffset uint32) {
// Grab the write cursor mutex since it is modified throughout this
// function.
wc := s.writeCursor
wc.Lock()
defer wc.Unlock()
// Nothing to do if the rollback point is the same as the current write
// cursor.
if wc.curFileNum == oldBlockFileNum && wc.curOffset == oldBlockOffset {
return
}
// Regardless of any failures that happen below, reposition the write
// cursor to the old block file and offset.
defer func() {
wc.curFileNum = oldBlockFileNum
wc.curOffset = oldBlockOffset
}()
log.Debugf("ROLLBACK: Rolling back to file %d, offset %d",
oldBlockFileNum, oldBlockOffset)
// Close the current write file if it needs to be deleted. Then delete
// all files that are newer than the provided rollback file while
// also moving the write cursor file backwards accordingly.
if wc.curFileNum > oldBlockFileNum {
wc.curFile.Lock()
if wc.curFile.file != nil {
_ = wc.curFile.file.Close()
wc.curFile.file = nil
}
wc.curFile.Unlock()
}
for ; wc.curFileNum > oldBlockFileNum; wc.curFileNum-- {
if err := s.deleteFileFunc(wc.curFileNum); err != nil {
log.Warnf("ROLLBACK: Failed to delete block file "+
"number %d: %s", wc.curFileNum, err)
return
}
}
// Open the file for the current write cursor if needed.
wc.curFile.Lock()
if wc.curFile.file == nil {
obf, err := s.openWriteFileFunc(wc.curFileNum)
if err != nil {
wc.curFile.Unlock()
log.Warnf("ROLLBACK: %s", err)
return
}
wc.curFile.file = obf
}
// Truncate the to the provided rollback offset.
if err := wc.curFile.file.Truncate(int64(oldBlockOffset)); err != nil {
wc.curFile.Unlock()
log.Warnf("ROLLBACK: Failed to truncate file %d: %s",
wc.curFileNum, err)
return
}
// Sync the file to disk.
err := wc.curFile.file.Sync()
wc.curFile.Unlock()
if err != nil {
log.Warnf("ROLLBACK: Failed to sync file %d: %s",
wc.curFileNum, err)
return
}
}
// scanBlockFiles searches the database directory for all flat block files to
// find the end of the most recent file. This position is considered the
// current write cursor which is also stored in the metadata. Thus, it is used
// to detect unexpected shutdowns in the middle of writes so the block files
// can be reconciled.
func scanBlockFiles(dbPath string) (int, uint32) {
lastFile := -1
fileLen := uint32(0)
for i := 0; ; i++ {
filePath := blockFilePath(dbPath, uint32(i))
st, err := os.Stat(filePath)
if err != nil {
break
}
lastFile = i
fileLen = uint32(st.Size())
}
log.Tracef("Scan found latest block file #%d with length %d", lastFile,
fileLen)
return lastFile, fileLen
}
// newBlockStore returns a new block store with the current block file number
// and offset set and all fields initialized.
func newBlockStore(basePath string, network wire.KaspaNet) *blockStore {
// Look for the end of the latest block to file to determine what the
// write cursor position is from the viewpoing of the block files on
// disk.
fileNum, fileOff := scanBlockFiles(basePath)
if fileNum == -1 {
fileNum = 0
fileOff = 0
}
store := &blockStore{
network: network,
basePath: basePath,
maxBlockFileSize: maxBlockFileSize,
maxOpenFiles: maxOpenFiles,
openBlockFiles: make(map[uint32]*lockableFile),
openBlocksLRU: list.New(),
fileNumToLRUElem: make(map[uint32]*list.Element),
writeCursor: &writeCursor{
curFile: &lockableFile{},
curFileNum: uint32(fileNum),
curOffset: fileOff,
},
}
store.openFileFunc = store.openFile
store.openWriteFileFunc = store.openWriteFile
store.deleteFileFunc = store.deleteFile
return store
}

View File

@@ -1,108 +0,0 @@
package ffldb
import (
"os"
"testing"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
func TestDeleteFile(t *testing.T) {
testBlock := util.NewBlock(wire.NewMsgBlock(
wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
tests := []struct {
fileNum uint32
expectedErr bool
}{
{0, false},
{1, true},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestDeleteFile", t)
defer func() {
if !pdb.closed {
pdb.Close()
}
}()
err := pdb.Update(func(dbTx database.Tx) error {
dbTx.StoreBlock(testBlock)
return nil
})
if err != nil {
t.Fatalf("TestDeleteFile: Error storing block: %s", err)
}
err = pdb.Close()
if err != nil {
t.Fatalf("TestDeleteFile: Error closing file before deletion: %s", err)
}
err = pdb.store.deleteFile(test.fileNum)
if (err != nil) != test.expectedErr {
t.Errorf("TestDeleteFile: %d: Expected error status: %t, but got: %t",
test.fileNum, test.expectedErr, (err != nil))
}
if err == nil {
filePath := blockFilePath(pdb.store.basePath, test.fileNum)
if _, err := os.Stat(filePath); !os.IsNotExist(err) {
t.Errorf("TestDeleteFile: %d: File %s still exists", test.fileNum, filePath)
}
}
}()
}
}
// TestHandleRollbackErrors tests all error-cases in *blockStore.handleRollback().
// The non-error-cases are tested in the more general tests.
// Since handleRollback just logs errors, this test simply causes all error-cases to be hit,
// and makes sure no panic occurs, as well as ensures the writeCursor was updated correctly.
func TestHandleRollbackErrors(t *testing.T) {
testBlock := util.NewBlock(wire.NewMsgBlock(
wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
testBlockSize := uint32(testBlock.MsgBlock().SerializeSize())
tests := []struct {
name string
fileNum uint32
offset uint32
}{
// offset should be size of block + 12 bytes for block network, size and checksum
{"Nothing to rollback", 1, testBlockSize + 12},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestHandleRollbackErrors", t)
defer pdb.Close()
// Set maxBlockFileSize to testBlockSize so that writeCursor.curFileNum increments
pdb.store.maxBlockFileSize = testBlockSize
err := pdb.Update(func(dbTx database.Tx) error {
return dbTx.StoreBlock(testBlock)
})
if err != nil {
t.Fatalf("TestHandleRollbackErrors: %s: Error adding test block to database: %s", test.name, err)
}
pdb.store.handleRollback(test.fileNum, test.offset)
if pdb.store.writeCursor.curFileNum != test.fileNum {
t.Errorf("TestHandleRollbackErrors: %s: Expected fileNum: %d, but got: %d",
test.name, test.fileNum, pdb.store.writeCursor.curFileNum)
}
if pdb.store.writeCursor.curOffset != test.offset {
t.Errorf("TestHandleRollbackErrors: %s: offset fileNum: %d, but got: %d",
test.name, test.offset, pdb.store.writeCursor.curOffset)
}
}()
}
}

View File

@@ -1,43 +0,0 @@
package ffldb
import (
"os"
"path"
"path/filepath"
"testing"
"github.com/kaspanet/kaspad/wire"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
)
func newTestDb(testName string, t *testing.T) *db {
dbPath := path.Join(os.TempDir(), "db_test", testName)
err := os.RemoveAll(dbPath)
if err != nil && !os.IsNotExist(err) {
t.Fatalf("%s: Error deleting database folder before starting: %s", testName, err)
}
network := wire.Simnet
opts := opt.Options{
ErrorIfExist: true,
Strict: opt.DefaultStrict,
Compression: opt.NoCompression,
Filter: filter.NewBloomFilter(10),
}
metadataDbPath := filepath.Join(dbPath, metadataDbName)
ldb, err := leveldb.OpenFile(metadataDbPath, &opts)
if err != nil {
t.Errorf("%s: Error opening metadataDbPath: %s", testName, err)
}
err = initDB(ldb)
if err != nil {
t.Errorf("%s: Error initializing metadata Db: %s", testName, err)
}
store := newBlockStore(dbPath, network)
cache := newDbCache(ldb, store, defaultCacheSize, defaultFlushSecs)
return &db{store: store, cache: cache}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,658 +0,0 @@
package ffldb
import (
"bytes"
"testing"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
// TestCursorDeleteErrors tests all error-cases in *cursor.Delete().
// The non-error-cases are tested in the more general tests.
func TestCursorDeleteErrors(t *testing.T) {
pdb := newTestDb("TestCursorDeleteErrors", t)
nestedBucket := []byte("nestedBucket")
key := []byte("key")
value := []byte("value")
err := pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
_, err := metadata.CreateBucket(nestedBucket)
if err != nil {
return err
}
metadata.Put(key, value)
return nil
})
if err != nil {
t.Fatalf("TestCursorDeleteErrors: Error setting up test-database: %s", err)
}
// Check for error when attempted to delete a bucket
err = pdb.Update(func(dbTx database.Tx) error {
cursor := dbTx.Metadata().Cursor()
found := false
for ok := cursor.First(); ok; ok = cursor.Next() {
if bytes.Equal(cursor.Key(), nestedBucket) {
found = true
break
}
}
if !found {
t.Errorf("TestCursorDeleteErrors: Key '%s' not found", string(nestedBucket))
}
err := cursor.Delete()
if !database.IsErrorCode(err, database.ErrIncompatibleValue) {
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrIncompatibleValue, "+
"when deleting bucket, but got %v", err)
}
return nil
})
if err != nil {
t.Fatalf("TestCursorDeleteErrors: Unexpected error from pdb.Update "+
"when attempting to delete bucket: %s", err)
}
// Check for error when transaction is not writable
err = pdb.View(func(dbTx database.Tx) error {
cursor := dbTx.Metadata().Cursor()
if !cursor.First() {
t.Fatal("TestCursorDeleteErrors: Nothing in cursor when testing for delete in " +
"non-writable transaction")
}
err := cursor.Delete()
if !database.IsErrorCode(err, database.ErrTxNotWritable) {
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrTxNotWritable "+
"when calling .Delete() on non-writable transaction, but got '%v' instead", err)
}
return nil
})
if err != nil {
t.Fatalf("TestCursorDeleteErrors: Unexpected error from pdb.Update "+
"when attempting to delete on non-writable transaction: %s", err)
}
// Check for error when cursor was exhausted
err = pdb.Update(func(dbTx database.Tx) error {
cursor := dbTx.Metadata().Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
}
err := cursor.Delete()
if !database.IsErrorCode(err, database.ErrIncompatibleValue) {
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrIncompatibleValue "+
"when calling .Delete() on exhausted cursor, but got '%v' instead", err)
}
return nil
})
if err != nil {
t.Fatalf("TestCursorDeleteErrors: Unexpected error from pdb.Update "+
"when attempting to delete on exhausted cursor: %s", err)
}
// Check for error when transaction is closed
tx, err := pdb.Begin(true)
if err != nil {
t.Fatalf("TestCursorDeleteErrors: Error in pdb.Begin(): %s", err)
}
cursor := tx.Metadata().Cursor()
err = tx.Commit()
if err != nil {
t.Fatalf("TestCursorDeleteErrors: Error in tx.Commit(): %s", err)
}
err = cursor.Delete()
if !database.IsErrorCode(err, database.ErrTxClosed) {
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrTxClosed "+
"when calling .Delete() on with closed transaction, but got '%s' instead", err)
}
}
func TestSkipPendingUpdates(t *testing.T) {
pdb := newTestDb("TestSkipPendingUpdates", t)
defer pdb.Close()
value := []byte("value")
// Add numbered prefixes to keys so that they are in expected order, and before any other keys
firstKey := []byte("1 - first")
toDeleteKey := []byte("2 - toDelete")
toUpdateKey := []byte("3 - toUpdate")
secondKey := []byte("4 - second")
// create initial metadata for test
err := pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
if err := metadata.Put(firstKey, value); err != nil {
return err
}
if err := metadata.Put(toDeleteKey, value); err != nil {
return err
}
if err := metadata.Put(toUpdateKey, value); err != nil {
return err
}
if err := metadata.Put(secondKey, value); err != nil {
return err
}
return nil
})
if err != nil {
t.Fatalf("TestSkipPendingUpdates: Error adding to metadata: %s", err)
}
// test skips
err = pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
if err := metadata.Delete(toDeleteKey); err != nil {
return err
}
if err := metadata.Put(toUpdateKey, value); err != nil {
return err
}
cursor := metadata.Cursor().(*cursor)
dbIter := cursor.dbIter
// Check that first is ok
dbIter.First()
expectedKey := bucketizedKey(metadataBucketID, firstKey)
if !bytes.Equal(dbIter.Key(), expectedKey) {
t.Errorf("TestSkipPendingUpdates: 1: key expected to be %v but is %v", expectedKey, dbIter.Key())
}
// Go to the next key, which is toDelete
dbIter.Next()
expectedKey = bucketizedKey(metadataBucketID, toDeleteKey)
if !bytes.Equal(dbIter.Key(), expectedKey) {
t.Errorf("TestSkipPendingUpdates: 2: key expected to be %s but is %s", expectedKey, dbIter.Key())
}
// at this point toDeleteKey and toUpdateKey should be skipped
cursor.skipPendingUpdates(true)
expectedKey = bucketizedKey(metadataBucketID, secondKey)
if !bytes.Equal(dbIter.Key(), expectedKey) {
t.Errorf("TestSkipPendingUpdates: 3: key expected to be %s but is %s", expectedKey, dbIter.Key())
}
// now traverse backwards - should get toUpdate
dbIter.Prev()
expectedKey = bucketizedKey(metadataBucketID, toUpdateKey)
if !bytes.Equal(dbIter.Key(), expectedKey) {
t.Errorf("TestSkipPendingUpdates: 4: key expected to be %s but is %s", expectedKey, dbIter.Key())
}
// at this point toUpdateKey and toDeleteKey should be skipped
cursor.skipPendingUpdates(false)
expectedKey = bucketizedKey(metadataBucketID, firstKey)
if !bytes.Equal(dbIter.Key(), expectedKey) {
t.Errorf("TestSkipPendingUpdates: 5: key expected to be %s but is %s", expectedKey, dbIter.Key())
}
return nil
})
if err != nil {
t.Fatalf("TestSkipPendingUpdates: Error running main part of test: %s", err)
}
}
// TestCursor tests various edge-cases in cursor that were not hit by the more general tests
func TestCursor(t *testing.T) {
pdb := newTestDb("TestCursor", t)
defer pdb.Close()
value := []byte("value")
// Add numbered prefixes to keys so that they are in expected order, and before any other keys
firstKey := []byte("1 - first")
toDeleteKey := []byte("2 - toDelete")
toUpdateKey := []byte("3 - toUpdate")
secondKey := []byte("4 - second")
// create initial metadata for test
err := pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
if err := metadata.Put(firstKey, value); err != nil {
return err
}
if err := metadata.Put(toDeleteKey, value); err != nil {
return err
}
if err := metadata.Put(toUpdateKey, value); err != nil {
return err
}
if err := metadata.Put(secondKey, value); err != nil {
return err
}
return nil
})
if err != nil {
t.Fatalf("Error adding to metadata: %s", err)
}
// run the actual tests
err = pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
if err := metadata.Delete(toDeleteKey); err != nil {
return err
}
if err := metadata.Put(toUpdateKey, value); err != nil {
return err
}
cursor := metadata.Cursor().(*cursor)
// Check prev when currentIter == nil
if ok := cursor.Prev(); ok {
t.Error("1: .Prev() should have returned false, but have returned true")
}
// Same thing for .Next()
for ok := cursor.First(); ok; ok = cursor.Next() {
}
if ok := cursor.Next(); ok {
t.Error("2: .Next() should have returned false, but have returned true")
}
// Check that Key(), rawKey(), Value(), and rawValue() all return nil when currentIter == nil
if key := cursor.Key(); key != nil {
t.Errorf("3: .Key() should have returned nil, but have returned '%s' instead", key)
}
if key := cursor.rawKey(); key != nil {
t.Errorf("4: .rawKey() should have returned nil, but have returned '%s' instead", key)
}
if value := cursor.Value(); value != nil {
t.Errorf("5: .Value() should have returned nil, but have returned '%s' instead", value)
}
if value := cursor.rawValue(); value != nil {
t.Errorf("6: .rawValue() should have returned nil, but have returned '%s' instead", value)
}
// Check rawValue in normal operation
cursor.First()
if rawValue := cursor.rawValue(); !bytes.Equal(rawValue, value) {
t.Errorf("7: rawValue should have returned '%s' but have returned '%s' instead", value, rawValue)
}
return nil
})
if err != nil {
t.Fatalf("Error running the actual tests: %s", err)
}
}
// TestCreateBucketErrors tests all error-cases in *bucket.CreateBucket().
// The non-error-cases are tested in the more general tests.
func TestCreateBucketErrors(t *testing.T) {
testKey := []byte("key")
tests := []struct {
name string
key []byte
isWritable bool
isClosed bool
expectedErr database.ErrorCode
}{
{"empty key", []byte{}, true, false, database.ErrBucketNameRequired},
{"transaction is closed", testKey, true, true, database.ErrTxClosed},
{"transaction is not writable", testKey, false, false, database.ErrTxNotWritable},
{"key already exists", blockIdxBucketName, true, false, database.ErrBucketExists},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestCreateBucketErrors", t)
defer pdb.Close()
tx, err := pdb.Begin(test.isWritable)
defer tx.Commit()
if err != nil {
t.Fatalf("TestCreateBucketErrors: %s: error from pdb.Begin: %s", test.name, err)
}
if test.isClosed {
err = tx.Commit()
if err != nil {
t.Fatalf("TestCreateBucketErrors: %s: error from tx.Commit: %s", test.name, err)
}
}
metadata := tx.Metadata()
_, err = metadata.CreateBucket(test.key)
if !database.IsErrorCode(err, test.expectedErr) {
t.Errorf("TestCreateBucketErrors: %s: Expected error of type %d "+
"but got '%v'", test.name, test.expectedErr, err)
}
}()
}
}
// TestPutErrors tests all error-cases in *bucket.Put().
// The non-error-cases are tested in the more general tests.
func TestPutErrors(t *testing.T) {
testKey := []byte("key")
testValue := []byte("value")
tests := []struct {
name string
key []byte
isWritable bool
isClosed bool
expectedErr database.ErrorCode
}{
{"empty key", []byte{}, true, false, database.ErrKeyRequired},
{"transaction is closed", testKey, true, true, database.ErrTxClosed},
{"transaction is not writable", testKey, false, false, database.ErrTxNotWritable},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestPutErrors", t)
defer pdb.Close()
tx, err := pdb.Begin(test.isWritable)
defer tx.Commit()
if err != nil {
t.Fatalf("TestPutErrors: %s: error from pdb.Begin: %s", test.name, err)
}
if test.isClosed {
err = tx.Commit()
if err != nil {
t.Fatalf("TestPutErrors: %s: error from tx.Commit: %s", test.name, err)
}
}
metadata := tx.Metadata()
err = metadata.Put(test.key, testValue)
if !database.IsErrorCode(err, test.expectedErr) {
t.Errorf("TestPutErrors: %s: Expected error of type %d "+
"but got '%v'", test.name, test.expectedErr, err)
}
}()
}
}
// TestGetErrors tests all error-cases in *bucket.Get().
// The non-error-cases are tested in the more general tests.
func TestGetErrors(t *testing.T) {
testKey := []byte("key")
tests := []struct {
name string
key []byte
isClosed bool
}{
{"empty key", []byte{}, false},
{"transaction is closed", testKey, true},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestGetErrors", t)
defer pdb.Close()
tx, err := pdb.Begin(false)
defer tx.Rollback()
if err != nil {
t.Fatalf("TestGetErrors: %s: error from pdb.Begin: %s", test.name, err)
}
if test.isClosed {
err = tx.Rollback()
if err != nil {
t.Fatalf("TestGetErrors: %s: error from tx.Commit: %s", test.name, err)
}
}
metadata := tx.Metadata()
if result := metadata.Get(test.key); result != nil {
t.Errorf("TestGetErrors: %s: Expected to return nil, but got %v", test.name, result)
}
}()
}
}
// TestDeleteErrors tests all error-cases in *bucket.Delete().
// The non-error-cases are tested in the more general tests.
func TestDeleteErrors(t *testing.T) {
testKey := []byte("key")
tests := []struct {
name string
key []byte
isWritable bool
isClosed bool
expectedErr database.ErrorCode
}{
{"empty key", []byte{}, true, false, database.ErrKeyRequired},
{"transaction is closed", testKey, true, true, database.ErrTxClosed},
{"transaction is not writable", testKey, false, false, database.ErrTxNotWritable},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestDeleteErrors", t)
defer pdb.Close()
tx, err := pdb.Begin(test.isWritable)
defer tx.Commit()
if err != nil {
t.Fatalf("TestDeleteErrors: %s: error from pdb.Begin: %s", test.name, err)
}
if test.isClosed {
err = tx.Commit()
if err != nil {
t.Fatalf("TestDeleteErrors: %s: error from tx.Commit: %s", test.name, err)
}
}
metadata := tx.Metadata()
err = metadata.Delete(test.key)
if !database.IsErrorCode(err, test.expectedErr) {
t.Errorf("TestDeleteErrors: %s: Expected error of type %d "+
"but got '%v'", test.name, test.expectedErr, err)
}
}()
}
}
func TestForEachBucket(t *testing.T) {
pdb := newTestDb("TestForEachBucket", t)
// set-up test
testKey := []byte("key")
testValue := []byte("value")
bucketKeys := [][]byte{{1}, {2}, {3}}
err := pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
for _, bucketKey := range bucketKeys {
bucket, err := metadata.CreateBucket(bucketKey)
if err != nil {
return err
}
err = bucket.Put(testKey, testValue)
if err != nil {
return err
}
}
return nil
})
if err != nil {
t.Fatalf("TestForEachBucket: Error setting up test-database: %s", err)
}
// actual test
err = pdb.View(func(dbTx database.Tx) error {
i := 0
metadata := dbTx.Metadata()
err := metadata.ForEachBucket(func(bucketKey []byte) error {
if i >= len(bucketKeys) { // in case there are any other buckets in metadata
return nil
}
expectedBucketKey := bucketKeys[i]
if !bytes.Equal(expectedBucketKey, bucketKey) {
t.Errorf("TestForEachBucket: %d: Expected bucket key: %v, but got: %v",
i, expectedBucketKey, bucketKey)
return nil
}
bucket := metadata.Bucket(bucketKey)
if bucket == nil {
t.Errorf("TestForEachBucket: %d: Bucket is nil", i)
return nil
}
value := bucket.Get(testKey)
if !bytes.Equal(testValue, value) {
t.Errorf("TestForEachBucket: %d: Expected value: %s, but got: %s",
i, testValue, value)
return nil
}
i++
return nil
})
return err
})
if err != nil {
t.Fatalf("TestForEachBucket: Error running actual tests: %s", err)
}
}
// TestStoreBlockErrors tests all error-cases in *tx.StoreBlock().
// The non-error-cases are tested in the more general tests.
func TestStoreBlockErrors(t *testing.T) {
testBlock := util.NewBlock(wire.NewMsgBlock(wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
tests := []struct {
name string
isWritable bool
isClosed bool
expectedErr database.ErrorCode
}{
{"transaction is closed", true, true, database.ErrTxClosed},
{"transaction is not writable", false, false, database.ErrTxNotWritable},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestStoreBlockErrors", t)
defer pdb.Close()
tx, err := pdb.Begin(test.isWritable)
defer tx.Commit()
if err != nil {
t.Fatalf("TestStoreBlockErrors: %s: error from pdb.Begin: %s", test.name, err)
}
if test.isClosed {
err = tx.Commit()
if err != nil {
t.Fatalf("TestStoreBlockErrors: %s: error from tx.Commit: %s", test.name, err)
}
}
err = tx.StoreBlock(testBlock)
if !database.IsErrorCode(err, test.expectedErr) {
t.Errorf("TestStoreBlockErrors: %s: Expected error of type %d "+
"but got '%v'", test.name, test.expectedErr, err)
}
}()
}
}
// TestDeleteDoubleNestedBucket tests what happens when bucket.DeleteBucket()
// is invoked on a bucket that contains a nested bucket.
func TestDeleteDoubleNestedBucket(t *testing.T) {
pdb := newTestDb("TestDeleteDoubleNestedBucket", t)
defer pdb.Close()
firstKey := []byte("first")
secondKey := []byte("second")
key := []byte("key")
value := []byte("value")
var rawKey, rawSecondKey []byte
// Test setup
err := pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
firstBucket, err := metadata.CreateBucket(firstKey)
if err != nil {
return errors.Errorf("Error creating first bucket: %s", err)
}
secondBucket, err := firstBucket.CreateBucket(secondKey)
if err != nil {
return errors.Errorf("Error creating second bucket: %s", err)
}
secondBucket.Put(key, value)
// extract rawKey from cursor and make sure it's in raw database
c := secondBucket.Cursor()
for ok := c.First(); ok && !bytes.Equal(c.Key(), key); ok = c.Next() {
}
if !bytes.Equal(c.Key(), key) {
return errors.Errorf("Couldn't find key to extract rawKey")
}
rawKey = c.(*cursor).rawKey()
if dbTx.(*transaction).fetchKey(rawKey) == nil {
return errors.Errorf("rawKey not found")
}
// extract rawSecondKey from cursor and make sure it's in raw database
c = firstBucket.Cursor()
for ok := c.First(); ok && !bytes.Equal(c.Key(), secondKey); ok = c.Next() {
}
if !bytes.Equal(c.Key(), secondKey) {
return errors.Errorf("Couldn't find secondKey to extract rawSecondKey")
}
rawSecondKey = c.(*cursor).rawKey()
if dbTx.(*transaction).fetchKey(rawSecondKey) == nil {
return errors.Errorf("rawSecondKey not found for some reason")
}
return nil
})
if err != nil {
t.Fatalf("TestDeleteDoubleNestedBucket: Error in test setup pdb.Update: %s", err)
}
// Actual test
err = pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
err := metadata.DeleteBucket(firstKey)
if err != nil {
return err
}
if dbTx.(*transaction).fetchKey(rawSecondKey) != nil {
t.Error("TestDeleteDoubleNestedBucket: secondBucket was not deleted")
}
if dbTx.(*transaction).fetchKey(rawKey) != nil {
t.Error("TestDeleteDoubleNestedBucket: value inside secondBucket was not deleted")
}
return nil
})
if err != nil {
t.Fatalf("TestDeleteDoubleNestedBucket: Error in actual test pdb.Update: %s", err)
}
}

Some files were not shown because too many files have changed in this diff Show More