Compare commits

...

335 Commits

Author SHA1 Message Date
Ori Newman
1f04f30ea7 [NOD-1273] Order parents in PrepareBlockForTest (#872) 2020-08-17 14:26:56 +03:00
Ori Newman
3e4e8d8b6b Merge remote-tracking branch 'origin/v0.6.2-dev' into v0.6.3-dev 2020-08-16 18:13:46 +03:00
Ori Newman
31c0399484 Update to version 0.6.4 2020-08-16 17:55:49 +03:00
Ori Newman
f2a3ccd9ab [NOD-1271] Move version package to the top level (#871)
* [NOD-1271] Move version package to the top level

* [NOD-1271] Fix imports
2020-08-16 17:16:11 +03:00
Ori Newman
31b5cd8d28 Fix merge errors from v0.6.2-rc2 to v0.6.3-dev 2020-08-16 15:35:47 +03:00
Ori Newman
96bd1fa99b [NOD-1262] Add network name to MinimalNetAdapter handshake (#867) 2020-08-16 15:30:04 +03:00
Svarog
48d498e820 [NOD-1259] Do not panic on non-protocol errors from RPC (#863)
* [NOD-1259] All rule-errors should be protocol-errors

* [NOD-1259] Handle submitting of coinbase transactions properly

* Revert "[NOD-1259] All rule-errors should be protocol-errors"

This reverts commit 2fd30c1856.

* [NOD-1259] Don't panic on non-protocol errors in ProtocolManager.AddTransaction/AddBlock

* [NOD-1259] Implement subnetworkid.IsBuiltInOrNative and use where appropriate
2020-08-16 15:29:23 +03:00
Ori Newman
32c5cfeaf5 [NOD-1204] Add timestamp and message number to domain messages (#854) 2020-08-16 15:26:02 +03:00
stasatdaglabs
d55f4e8164 [NOD-1220] Add network string field to Version message (#852)
* [NOD-1220] Add network name to the version message.

* [NOD-1220] Ban peers from the wrong network.

* [NOD-1220] Add the network parameter to protowire.

* [NOD-1220] Add "kaspa-" to network names.
2020-08-16 15:25:25 +03:00
stasatdaglabs
1927e81202 [NOD-1129] Fix NewBlockTemplate creating incesous blocks (#870)
* [NOD-1129] Implement TestIncestousNewBlockTemplate.

* [NOD-1129] Add some debug logs to TestIncestousNewBlockTemplate.

* [NOD-1129] Fix merge errors.

* [NOD-1129] Narrow down on the failure.

* [NOD-1129] Fix bad initial value for child.interval in reachabilityTreeNode.addChild.

* [NOD-1129] Rewrite the test to be specific to reachability.
2020-08-16 13:14:44 +03:00
stasatdaglabs
8a4ece1101 [NOD-1223] Reorganize project (#868)
* [NOD-1223] Move all network stuff into a new network package.

* [NOD-1223] Delete the unused package testutil.

* [NOD-1223] Move infrastructure stuff into a new instrastructure package.

* [NOD-1223] Move domain stuff into a new domain package.
2020-08-13 17:27:25 +03:00
Elichai Turkel
0bf1052abf [NOD-1101] Hash data without serializing into a buffer first (#779)
* Add Hash Writers

* Add the hash writers to the tests

* Add the DoubleHash Writer to the benchmarks

* Remove buffers from hashing by using the Hash Writer

* Replace empty slice with nil in mempool test payload
2020-08-13 15:40:54 +03:00
Ori Newman
2af03c1ccf [NOD-1207] Send reject messages (#855)
* [NOD-1207] Send reject messages

* [NOD-1207] Empty outgoing route before disconnecting

* [NOD-1207] Renumber fields in RejectMessage

* [NOD-1207] Use more accurate log messages

* [NOD-1207] Call registerRejectsFlow

* [NOD-1207] Panic if outgoingRoute.Enqueue returns unexpected error

* [NOD-1207] Fix comment and rename variables

* [NOD-1207] Fix comment

* [NOD-1207] add baseMessage to MsgReject

* [NOD-1207] Fix comments and add block hash to error if it's rejected
2020-08-13 15:32:41 +03:00
Ori Newman
a2aa58c8a4 [NOD-1201] Panic if callbacks are not set (#856)
* [NOD-1201] Panic if necessary callback are not set in gRPCConnection and gRPCServer

* [NOD-1201] Fix comment and change return order

* [NOD-1201] Return nil instead of error on gRPCServer.Start

* [NOD-1201] Fix typo
2020-08-13 15:21:52 +03:00
oudeis
7e74fc0b2b [NOD-1248] netadapter unit test (#865)
* [NOD-1246/NOD-1248] Add unit test for NetAdapter

* [NOD-1246/NOD-1248] Do not ignore OK

* [NOD-1248] Lint code

- Move `t *testing.T` to be first parameter in test-helper function
- Rename `getRouterInitializer` to `routerInitializerForTest`
- Make test data constants

Co-authored-by: Yaroslav Reshetnyk <yaroslav.r@it-dimension.com>
2020-08-13 15:07:20 +03:00
stasatdaglabs
0653e59e16 [NOD-1190] Refactor process.go (#858)
* [NOD-1190] Move non-processBlock stuff out of process.go.

* [NOD-1190] Move everything out of accept.go.

* [NOD-1190] Move all processBlock functions to process.go.

* [NOD-1190] Move orphan stuff to orphan.go.

* [NOD-1190] Remove thresholdstate stuff.

* [NOD-1190] Move isSynced to sync_rate.go.

* [NOD-1190] Move delayed block stuff to delayed_blocks.go.

* [NOD-1190] Rename orphans.go to orphaned_blocks.go.

* [NOD-1190] Move non-BlockDAG structs out of dag.go.

* [NOD-1190] Remove unused fields.

* [NOD-1190] Fixup BlockDAG.New a bit.

* [NOD-1190] Move sequence lock stuff to sequence_lock.go

* [NOD-1190] Move some multiset stuff out of dag.go.

* [NOD-1190] Move finality stuff out of dag.go.

* [NOD-1190] Move blocklocator stuff out of dag.go.

* [NOD-1190] Move confirmation stuff out of dag.go.

* [NOD-1190] Move utxo and selected parent chain stuff out of dag.go.

* [NOD-1190] Move BlockDAG lock functions to the beginning of dag.go.

* [NOD-1190] Move verifyAndBuildUTXO out of process.go.

* [NOD-1190] Extract handleProcessBlockError to a function.

* [NOD-1190] Remove daglock unlock in notifyBlockAccepted.

* [NOD-1190] Extract checkDuplicateBlock to a method.

* [NOD-1190] Fix merge errors.

* [NOD-1190] Remove unused parameter from CalcSequenceLock.

* [NOD-1190] Extract processBlock contents into functions.

* [NOD-1190] Fix parent delayed blocks not marking their children as delayed

* [NOD-1190] Fix TestProcessDelayedBlocks.

* [NOD-1190] Extract stuff in maybeAcceptBlock to separate functions.

* [NOD-1190] Rename handleProcessBlockError to handleConnectBlockError.

* [NOD-1190] Remove some comments.

* [NOD-1190] Use lowercase in error messages.

* [NOD-1190] Rename createNewBlockNode to createBlockNodeFromBlock.

* [NOD-1190] Rename orphaned_blocks.go to orpan_blocks.go.

* [NOD-1190] Extract validateUTXOCommitment to a separate function.

* [NOD-1190] Fix a bug in validateUTXOCommitment.

* [NOD-1190] Rename checkBlockTxsFinalized to checkBlockTransactionsFinalized.

* [NOD-1190] Add a comment over createBlockNodeFromBlock.

* [NOD-1190] Fold validateAllTxsFinalized into checkBlockTransactionsFinalized.

* [NOD-1190] Return parents from checkBlockParents.

* [NOD-1190] Remove the processBlock prefix from the functions that had it.

* [NOD-1190] Begin extracting functions out of checkTransactionSanity.

* [NOD-1190] Finish extracting functions out of checkTransactionSanity.

* [NOD-1190] Remove an unused parameter.

* [NOD-1190] Fix merge errors.

* [NOD-1190] Added an explanation as to why we change the nonce in TestProcessDelayedBlocks.

* [NOD-1190] Fix a comment.

* [NOD-1190] Fix a comment.

* [NOD-1190] Fix a typo.

* [NOD-1190] Replace checkBlockParents with handleLookupParentNodesError.
2020-08-13 13:33:43 +03:00
oudeis
32463ce906 [NOD-1247] Add check for routerInitializer presence (#864)
Co-authored-by: Yaroslav Reshetnyk <yaroslav.r@it-dimension.com>
2020-08-13 12:04:43 +03:00
stasatdaglabs
23a3594c18 [NOD-1233] Go over all TODO(libp2p)s and either fix them or create tickets for them (#860)
* [NOD-1233] Remove HandleNewBlockOld.

* [NOD-1233] Make ErrRouteClosed not a protocol error.

* [NOD-1233] Fix ambiguous comments.

* [NOD-1233] Remove a no-longer-relevant comment.

* [NOD-1233] Remove some of the TODOs.

* [NOD-1233] Replace fakeSourceAddress with a real sourceAddress.

* [NOD-1233] Remove a no-longer-relevant TODO.

* [NOD-1233] Remove TODO from handleGetNetTotals.

* [NOD-1233] Remove a no-longer-relevant TODO.

* [NOD-1233] Disconnect if connected to wrong partial/full type.

* [NOD-1233] Get rid of mempool tags.

* [NOD-1233] Remove TODOs.

* [NOD-1233] Simplify a test.

* [NOD-1190] Remove getNetTotals.
2020-08-13 09:41:02 +03:00
Ori Newman
ffe153efa7 [NOD-1262] Add network name to MinimalNetAdapter handshake (#867) 2020-08-12 17:55:58 +03:00
Ori Newman
ca3172dad0 [NOD-1239] Delete app.WaitForShutdown() (#866) 2020-08-12 16:38:52 +03:00
Ori Newman
22dc3f998f [NOD-1256] Optimize PrepareBlockForTest (#861)
* [NOD-1256] Optimize PrepareBlockForTest

* [NOD-1256] Remove redundant comment
2020-08-12 16:25:42 +03:00
Svarog
91f4ed9825 [NOD-1259] Do not panic on non-protocol errors from RPC (#863)
* [NOD-1259] All rule-errors should be protocol-errors

* [NOD-1259] Handle submitting of coinbase transactions properly

* Revert "[NOD-1259] All rule-errors should be protocol-errors"

This reverts commit 2fd30c1856.

* [NOD-1259] Don't panic on non-protocol errors in ProtocolManager.AddTransaction/AddBlock

* [NOD-1259] Implement subnetworkid.IsBuiltInOrNative and use where appropriate
2020-08-12 12:29:58 +03:00
Ori Newman
aa9556aa59 [NOD-1257] Disable difficulty adjustment on simnet (#862)
* [NOD-1257] Disable difficulty adjustment on simnet

* [NOD-1257] Explictly set DisableDifficultyAdjustment everywhere
2020-08-12 12:24:37 +03:00
stasatdaglabs
91f0fe5740 [NOD-1238] Fix acceptance index never being initialized. (#859) 2020-08-11 16:53:34 +03:00
Ori Newman
b0fecc9f87 [NOD-1204] Add timestamp and message number to domain messages (#854) 2020-08-10 12:55:24 +03:00
stasatdaglabs
53cccd405f [NOD-1220] Add network string field to Version message (#852)
* [NOD-1220] Add network name to the version message.

* [NOD-1220] Ban peers from the wrong network.

* [NOD-1220] Add the network parameter to protowire.

* [NOD-1220] Add "kaspa-" to network names.
2020-08-09 18:11:13 +03:00
stasatdaglabs
5b84184921 [NOD-1221] Explicitly add a maximum message size in gRPC (#851)
* [NOD-1221] Explicitly add a maximum message size in gRPC.

* [NOD-1221] Limit sent message size and print a debug log on start.
2020-08-09 17:56:26 +03:00
Yuval Shaul
af1df425a2 update to version v0.6.2 2020-08-09 15:16:21 +03:00
Ori Newman
8e170cf327 [NOD-1225] Rename wire to domainmessage and get rid of InvType (#853)
* [NOD-1225] Rename wire to domainmessage

* [NOD-1225] Get rid of references to package wire in the code, and get rid of InvType
2020-08-09 12:39:15 +03:00
stasatdaglabs
b55cfee8c8 [NOD-1229] Fix node crashing if AntiPastHashesBetween lowHigh or highHash are not found in the DAG (#849)
* [NOD-1229] Fix node crashing if AntiPastHashesBetween lowHigh or highHash are not found in the DAG

* [NOD-1229] Rename InvalidParameterError to ErrInvalidParameter.

* [NOD-1229] Lowercasify errors.
2020-08-09 09:36:29 +03:00
stasatdaglabs
420c3d4258 [NOD-1222] Turn on gzip in gRPC. (#850) 2020-08-06 17:28:40 +03:00
Mike Zak
b92943a98c Update to version v0.6.1 2020-08-06 15:16:05 +03:00
Mike Zak
e1318aa326 [NOD-1208] Labels should be lower case 2020-08-06 11:34:22 +03:00
Mike Zak
2bd4a71913 [NOD-1208] Continue the correct loop 2020-08-06 11:11:44 +03:00
Mike Zak
5b206f4c9d [NOD-1208] Use lower case for errors + omit hard-coded numbers 2020-08-06 10:17:20 +03:00
Mike Zak
3f969a2921 [NOD-1208] Add comment to handlePingPong to explain it's one-sided. 2020-08-06 10:10:31 +03:00
Mike Zak
90be14fd57 [NOD-1208] Added comment explaining why Version.Address is optional 2020-08-06 10:00:25 +03:00
Mike Zak
1a5d9fc65c [NOD-1208] Renamed NetAdapterMock to standalone.MinimalNetAdapter 2020-08-05 15:59:23 +03:00
Mike Zak
ec03a094e5 [NOD-1208] Use ID netAdapter generates in netAdapterMock handshake 2020-08-05 15:50:46 +03:00
Mike Zak
9d60bb1ee7 [NOD-1208] Use netConnection.Disconnect in Routes.Disconnect 2020-08-05 15:46:32 +03:00
Mike Zak
cd10de2dce [NOD-1208] Add NetAdapterMock 2020-08-05 10:40:33 +03:00
Mike Zak
658fb08c02 [NOD-1208] Do not ban if DisableBanning was turned on 2020-08-05 10:40:33 +03:00
Mike Zak
3b40488877 [NOD-1208] Allow sending Version message without an address 2020-08-05 10:40:33 +03:00
Svarog
d3d0ad0cf3 [NOD-1226] Properly handle ErrIsClosed during handshake (#843)
* [NOD-1226] Consider ErrIsClosed as protocol error during handshake

* Revert "[NOD-1226] Consider ErrIsClosed as protocol error during handshake"

This reverts commit 74bb07b6cd.

* [NOD-1226] handle errors separately for handshake

* [NOD-1226] Wrap ErrRouteClosed as protocol error in handshake

* [NOD-1226] return if ErrRouteClosed

* [NOD-1226] Use atomic

* [NOD-1226] Don't wrap with protocol error

* [NOD-1226] Fix comment

* [NOD-1226] Fix comment
2020-08-05 10:37:19 +03:00
Svarog
473cc37a75 [NOD-1224] Fix duplicate connections and duplicate blocks bugs (#842)
* [NOD-1224] Make block already existing a ruleError

* [NOD-1224] Remove block from pendingBlocks list only after it was processed

* [NOD-1224] AddToPeers should have a Write Lock, not Read Lock

* [NOD-1224] Check for unrequested before processing
2020-08-04 11:07:21 +03:00
Svarog
966cba4a4e [NOD-1218] KaspadMessage_Pong.toWireMessage should return MsgPong, not MsgPing (#841) 2020-08-03 18:18:06 +03:00
Svarog
da90755530 [NOD-1215] Added integration test for address exchange (#839)
* [NOD-1215] Added integration test for address exchange

* [NOD-1215] Fix error message
2020-08-03 10:34:21 +03:00
Svarog
fa58623815 [NOD-1214] Added test for 64 incoming connections to single node (#838)
* [NOD-1214] Added test for 64 incoming connections to single node

* [NOD-1214] Expand comments, and a small rename

* [NOD-1214] Make sure no bully reports blockAdded twice
2020-08-03 10:30:38 +03:00
Svarog
26af4da507 [NOD-1217] MethodUsageText should take a write lock, to protect methodToInfo (#840) 2020-08-03 10:29:04 +03:00
Svarog
b527470153 [NOD-1211] Transaction relay integration test + fixes to flow (#836)
* [NOD-1162] Separate kaspad to it's own package, so that I can use it out of integration test

* [NOD-1162] Begin integration tests

* [NOD-1162] [FIX] Assign cfg to RPCServer

* [NOD-1162] Basic integration test ready

* [NOD-1162] Wait for connection for real

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] Make connect something that can be invoked in middle of test

* [NOD-1162] Complete first integration test

* [NOD-1162] Undo refactor error

* [NOD-1162] Rename Kaspad to App

* [NOD-1162] Convert checking connection to polling

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] Add comment

* [NOD-1162] Added support for 3 nodes and clients in integration tests

* [NOD-1162] Add third node to integration test

* [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock

* [NOD-1192] Broadcast transactions only if there's more then 0

* [NOD-1162] Removed double waitTillNextIteration

* [NOD-1192] Rename: broadcastTransactions -> broadcastTransactionsAfterBlockAdded

* [NOD-1162] Call NotifyBlocks on client3 as well

* [NOD-1162] ErrTimeout and ErrRouteClosed should be ProtocolErrors

* [NOD-1162] Added comment and removed redundant type PeerAddedCallback

* [NOD-1162] Revert overly eager rename

* [NOD-1162] Move DisalbeTLS to common config + minimize call for ioutil.TempDir()

* [NOD-1162] Add some clarifications in code

* [NOD-1193] Skip closed connections in NetAdapter.Broadcast

* [NOD-1193] Make sure to protect connectionsToRouters from concurrent access

* [NOD-1162] Add _test to all files in integration package

* [NOD-1162] Introduced appHarness to better encapsulate a single node

* [NOD-1162] Removed onChainChanged handler

* [NOD-1162] Remove redundant closure

* [NOD-1162] Correctly mark integration_test config as Simnet

* [NOD-1162] Rename app.ID -> app.P2PNodeID

* [NOD-1162] Move TestIntegrationBasicSync to basic_sync_test.go

* [NOD-1210] Made it possible to setup any number of harnesses needed

* [NOD-1210] Rename appHarness1/2 to incoming/outgoing in connect function

* [NOD-1210] Add the 117-incoming-connections integration test

* [NOD-1210] Delete 117-incoming-connections test because it opens too much files

* [NOD-1210] Added function to notify of blocks conveniently

* [NOD-1210] Added function to mine a block from-A-to-Z

* [NOD-1210] Added IBD integration test

* [NOD-1210] Finish test for IBD and fix bug where
requestSelectedTipsIfRequired ran in handshake's goroutine

* [NOD-1210] Set log level to debug

* [NOD-1211] Add test for transaction relay

* [NOD-1211] Compare fix incorrect comaprison in KaspadMessage_RequestTransactions.fromWireMessage

* [NOD-1211] Return ok instead of err from FetchTxDesc and FetchTransaction

* [NOD-1211] Added MsgTransactionNotFound type

* [NOD-1211] Added HandlRequestedTransactions flow

* [NOD-1211] Wait for blocks to be accepted before moving forward

* [NOD-1211] Rename CmdNotFound to CmdTransactionNotFound

* [NOD-1211] Rename: requestAndSolveTemplate -> mineNextBlock

* [NOD-1211] Renamed incoming/outgoing to appHarness1/appHarness2 in isConnected

* [NOD-1211] Move check of Hash == nil to outside wireHashToProto

* [NOD-1211] Instantiate payloadHash before *x
2020-08-02 16:11:16 +03:00
Ori Newman
e70561141d [NOD-1212] Request IBD blocks in batches (#835)
* [NOD-1212] Request IBD blocks in batches

* [NOD-1212] Fix condition

* [NOD-1212] Remove redundant functions

* [NOD-1212] gofmt

* [NOD-1212] Fix condition

* [NOD-1212] Fix off by one error and add messages to messages.proto

* [NOD-1212] Refactor downloadBlocks

* [NOD-1212] Fix comment

* [NOD-1212] Return DefaultTimeout to original value
2020-08-02 13:46:07 +03:00
Ori Newman
20b547984e [NOD-1191] Fix erroneous condition (#837)
* [NOD-1191] Convert wire protocol to 100% protobuf

* [NOD-1191] Simplify wire interface and remove redundant messages

* [NOD-1191] Map all proto to wire conversions

* [NOD-1203] Create netadapter outside of protocol manager

* [NOD-1191] Fix nil errors

* [NOD-1191] Fix comments

* [NOD-1191] Add converter interface

* [NOD-1191] Add missing GetBlockLocator message

* [NOD-1191] Change message names that starts with 'get' to 'request'

* [NOD-1191] Change message commands values

* [NOD-1191] Remove redundant methods

* [NOD-1191] Rename message constructors

* [NOD-1191] Change message commands to use iota

* [NOD-1191] Add missing outputs to protobuf conversion

* [NOD-1191] Make block header a required field

* [NOD-1191] Rename variables

* [NOD-1212] Fix test names

* [NOD-1191] Rename flow names

* [NOD-1191] Fix infinite loop

* [NOD-1191] Fix wrong condition
2020-08-02 11:10:56 +03:00
Svarog
16a658a5be [NOD-1210] Add integration test for IBD and fix bug where requestSelectedTipsIfRequired ran in handshake's goroutine (#834)
* [NOD-1162] Separate kaspad to it's own package, so that I can use it out of integration test

* [NOD-1162] Begin integration tests

* [NOD-1162] [FIX] Assign cfg to RPCServer

* [NOD-1162] Basic integration test ready

* [NOD-1162] Wait for connection for real

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] Make connect something that can be invoked in middle of test

* [NOD-1162] Complete first integration test

* [NOD-1162] Undo refactor error

* [NOD-1162] Rename Kaspad to App

* [NOD-1162] Convert checking connection to polling

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] Add comment

* [NOD-1162] Added support for 3 nodes and clients in integration tests

* [NOD-1162] Add third node to integration test

* [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock

* [NOD-1192] Broadcast transactions only if there's more then 0

* [NOD-1162] Removed double waitTillNextIteration

* [NOD-1192] Rename: broadcastTransactions -> broadcastTransactionsAfterBlockAdded

* [NOD-1162] Call NotifyBlocks on client3 as well

* [NOD-1162] ErrTimeout and ErrRouteClosed should be ProtocolErrors

* [NOD-1162] Added comment and removed redundant type PeerAddedCallback

* [NOD-1162] Revert overly eager rename

* [NOD-1162] Move DisalbeTLS to common config + minimize call for ioutil.TempDir()

* [NOD-1162] Add some clarifications in code

* [NOD-1193] Skip closed connections in NetAdapter.Broadcast

* [NOD-1193] Make sure to protect connectionsToRouters from concurrent access

* [NOD-1162] Add _test to all files in integration package

* [NOD-1162] Introduced appHarness to better encapsulate a single node

* [NOD-1162] Removed onChainChanged handler

* [NOD-1162] Remove redundant closure

* [NOD-1162] Correctly mark integration_test config as Simnet

* [NOD-1162] Rename app.ID -> app.P2PNodeID

* [NOD-1162] Move TestIntegrationBasicSync to basic_sync_test.go

* [NOD-1210] Made it possible to setup any number of harnesses needed

* [NOD-1210] Rename appHarness1/2 to incoming/outgoing in connect function

* [NOD-1210] Add the 117-incoming-connections integration test

* [NOD-1210] Delete 117-incoming-connections test because it opens too much files

* [NOD-1210] Added function to notify of blocks conveniently

* [NOD-1210] Added function to mine a block from-A-to-Z

* [NOD-1210] Added IBD integration test

* [NOD-1210] Finish test for IBD and fix bug where
requestSelectedTipsIfRequired ran in handshake's goroutine

* [NOD-1210] Set log level to debug

* [NOD-1210] A bunch of renamings
2020-08-02 09:42:27 +03:00
Ori Newman
42e50e6dc2 [NOD-1191] Convert wire protocol to proto (#831)
* [NOD-1191] Convert wire protocol to 100% protobuf

* [NOD-1191] Simplify wire interface and remove redundant messages

* [NOD-1191] Map all proto to wire conversions

* [NOD-1203] Create netadapter outside of protocol manager

* [NOD-1191] Fix nil errors

* [NOD-1191] Fix comments

* [NOD-1191] Add converter interface

* [NOD-1191] Add missing GetBlockLocator message

* [NOD-1191] Change message names that starts with 'get' to 'request'

* [NOD-1191] Change message commands values

* [NOD-1191] Remove redundant methods

* [NOD-1191] Rename message constructors

* [NOD-1191] Change message commands to use iota

* [NOD-1191] Add missing outputs to protobuf conversion

* [NOD-1191] Make block header a required field

* [NOD-1191] Rename variables

* [NOD-1212] Fix test names

* [NOD-1191] Rename flow names

* [NOD-1191] Fix infinite loop
2020-07-30 18:19:55 +03:00
Svarog
3d942ce355 [NOD-1162] Integration test (#822)
* [NOD-1162] Separate kaspad to it's own package, so that I can use it out of integration test

* [NOD-1162] Begin integration tests

* [NOD-1162] [FIX] Assign cfg to RPCServer

* [NOD-1162] Basic integration test ready

* [NOD-1162] Wait for connection for real

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] Make connect something that can be invoked in middle of test

* [NOD-1162] Complete first integration test

* [NOD-1162] Undo refactor error

* [NOD-1162] Rename Kaspad to App

* [NOD-1162] Convert checking connection to polling

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] Add comment

* [NOD-1162] Added support for 3 nodes and clients in integration tests

* [NOD-1162] Add third node to integration test

* [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock

* [NOD-1192] Broadcast transactions only if there's more then 0

* [NOD-1162] Removed double waitTillNextIteration

* [NOD-1192] Rename: broadcastTransactions -> broadcastTransactionsAfterBlockAdded

* [NOD-1162] Call NotifyBlocks on client3 as well

* [NOD-1162] ErrTimeout and ErrRouteClosed should be ProtocolErrors

* [NOD-1162] Added comment and removed redundant type PeerAddedCallback

* [NOD-1162] Revert overly eager rename

* [NOD-1162] Move DisalbeTLS to common config + minimize call for ioutil.TempDir()

* [NOD-1162] Add some clarifications in code

* [NOD-1193] Skip closed connections in NetAdapter.Broadcast

* [NOD-1193] Make sure to protect connectionsToRouters from concurrent access

* [NOD-1162] Add _test to all files in integration package

* [NOD-1162] Introduced appHarness to better encapsulate a single node

* [NOD-1162] Removed onChainChanged handler

* [NOD-1162] Remove redundant closure

* [NOD-1162] Correctly mark integration_test config as Simnet

* [NOD-1162] Rename app.ID -> app.P2PNodeID
2020-07-30 10:47:56 +03:00
Ori Newman
94f617b06a [NOD-1206] Call peer.StartIBD in new goroutine (#833) 2020-07-29 12:03:59 +03:00
Svarog
211c4d05e8 [NOD-1120] Separate registration of routes, and the starting of flows (#832)
* [NOD-1120] Separate flow registration and running

* [NOD-1120] Extract executeFunc to separate function

* [NOD-1120] Move the registration of flows out of goroutine

* [NOD-1120] Return after handleError

* [NOD-1120] Rename: addXXXFlow -> registerXXXFlow

* Rename: stop -> errChan

* [NOD-1120] Fix name of goroutine
2020-07-29 12:02:04 +03:00
Ori Newman
a9f3bdf4ab [NOD-1203] Create netadapter outside of protocol manager (#830) 2020-07-29 10:17:13 +03:00
Svarog
2303aecab4 [NOD-1198] Make router a property of netConnection, and remove map from connection to router in netAdapter (#829)
* [NOD-1198] Make router a property of netConnection, and remove map from connection to router in netAdapter

* [NOD-1198] Moved all router logic from netAdapter to netConnection

* [NOD-1198] Move disconnect to NetConnection

* [NOD-1198] Unexport netConnection.start

* [NOD-1198] Remove error from Disconnect functions

* [NOD-1198] Make sure OnDisconnectedHandler doesn't run when it shouldn't
2020-07-28 11:27:48 +03:00
Svarog
7655841e9f [NOD-1194] Make error handling more centralized, and ignore ErrRouteClosed (#828)
* [NOD-1194] Make error handling more centralized, and ignore ErrRouteClosed

* [NOD-1194] Ignore ErrRouteClosed in connection_loops as well

* [NOD-1194] Enhance comment

* [NOD-1194] Return after any HandleError

* [NOD-1194] Rephrased comment
2020-07-27 15:07:28 +03:00
stasatdaglabs
c4bbcf9de6 [NOD-1181] Mark banned peers in address manager and persist bans to disk (#826)
* [NOD-1079] Fix block rejects over "Already have block" (#783)

* [NOD-1079] Return regular error instead of ruleError on already-have-block in ProcessBlock.

* [NOD-1079] Fix bad implementation of IsSelectedTipKnown.

* [NOD-1079] In shouldQueryPeerSelectedTips use selected DAG tip timestamp instead of past median time.

* [NOD-1079] Remove redundant (and possibly buggy) clearing of sm.requestedBlocks.

* [NOD-684] change simnet block rate to block per ms (#782)

* [NOD-684] Get rid of dag.targetTimePerBlock and use finality duration in dag params

* [NOD-684] Fix regtest genesis block

* [NOD-684] Set simnet's TargetTimePerBlock to 1ms

* [NOD-684] Shorten simnet finality duration

* [NOD-684] Change isDAGCurrentMaxDiff to be written as number of blocks

* [NOD-684] Fix NextBlockMinimumTime to be add one millisecond after past median time

* [NOD-1004] Make AddrManager.getAddress use only 1 loop to check all address chances and pick one of them (#741)

* [NOD-1004] Remove code duplication in Good().

* [NOD-1004] Remove some more code duplication in Good().

* [NOD-1004] Remove some more code duplication in Good().

* [NOD-1004] Remove code duplication in GetAddress().

* [NOD-1004] Remove code duplication in updateAddress.

* [NOD-1004] Remove some more code duplication in updateAddress.

* [NOD-1004] Remove redundant check in expireNew.

* [NOD-1004] Remove superfluous existence check from updateAddress.

* [NOD-1004] Make triedBucket use a slice instead of a list.

* [NOD-1004] Remove code duplication in getAddress.

* [NOD-1004] Remove infinite loops out of getAddress.

* [NOD-1004] Made impossible branch panic.

* [NOD-1004] Remove a mystery comment.

* [NOD-1004] Remove an unnecessary sort.

* [NOD-1004] Make AddressKey a type alias.

* [NOD-1004] Added comment for AddressKey

* [NOD-1004] Fix merge errors.

* [NOD-1004] Fix merge errors.

* [NOD-1004] Do some renaming.

* [NOD-1004] Do some more renaming.

* [NOD-1004] Rename AddrManager to AddressManager.

* [NOD-1004] Rename AddrManager to AddressManager.

* [NOD-1004] Do some more renaming.

* [NOD-1004] Rename bucket to addressBucketArray.

* [NOD-1004] Fix a comment.

* [NOD-1004] Rename na to netAddress.

* [NOD-1004] Bring back an existence check.

* [NOD-1004] Fix an error message.

* [NOD-1004] Fix a comment.

* [NOD-1004] Use a boolean instead of -1.

* [NOD-1004] Use a boolean instead of -1 in another place.

Co-authored-by: Mike Zak <feanorr@gmail.com>

* Fix merge errors.

* [NOD-1181] Move isBanned logic into addressManager.

* [NOD-1181] Persist bans to disk.

* [NOD-1181] Add comments.

* [NOD-1181] Add an additional exit condition to the connection loop.

* [NOD-1181] Add a TODO.

* [NOD-1181] Wrap not-found errors in addressManager.

* [NOD-1181] Fix a comment.

* [NOD-1181] Rename banned to isBanned.

* [NOD-1181] Fix bad error handling in routerInitializer.

* [NOD-1181] Remove a TODO.

Co-authored-by: Ori Newman <orinewman1@gmail.com>
Co-authored-by: Mike Zak <feanorr@gmail.com>
2020-07-27 14:45:18 +03:00
stasatdaglabs
0cec1ce23e [NOD-1189] Exit early if we've filtered out all the hashes in block relay. (#827) 2020-07-27 12:23:43 +03:00
Svarog
089fe828aa [NOD-1193] Skip closed connections in NetAdapter.Broadcast (#825)
* [NOD-1193] Skip closed connections in NetAdapter.Broadcast

* [NOD-1193] Make sure to protect connectionsToRouters from concurrent access
2020-07-27 10:32:07 +03:00
stasatdaglabs
24a09fb3df Merge 0.6.0-dev into 0.6.0-libp2p (#824)
* [NOD-1079] Fix block rejects over "Already have block" (#783)

* [NOD-1079] Return regular error instead of ruleError on already-have-block in ProcessBlock.

* [NOD-1079] Fix bad implementation of IsSelectedTipKnown.

* [NOD-1079] In shouldQueryPeerSelectedTips use selected DAG tip timestamp instead of past median time.

* [NOD-1079] Remove redundant (and possibly buggy) clearing of sm.requestedBlocks.

* [NOD-684] change simnet block rate to block per ms (#782)

* [NOD-684] Get rid of dag.targetTimePerBlock and use finality duration in dag params

* [NOD-684] Fix regtest genesis block

* [NOD-684] Set simnet's TargetTimePerBlock to 1ms

* [NOD-684] Shorten simnet finality duration

* [NOD-684] Change isDAGCurrentMaxDiff to be written as number of blocks

* [NOD-684] Fix NextBlockMinimumTime to be add one millisecond after past median time

* [NOD-1004] Make AddrManager.getAddress use only 1 loop to check all address chances and pick one of them (#741)

* [NOD-1004] Remove code duplication in Good().

* [NOD-1004] Remove some more code duplication in Good().

* [NOD-1004] Remove some more code duplication in Good().

* [NOD-1004] Remove code duplication in GetAddress().

* [NOD-1004] Remove code duplication in updateAddress.

* [NOD-1004] Remove some more code duplication in updateAddress.

* [NOD-1004] Remove redundant check in expireNew.

* [NOD-1004] Remove superfluous existence check from updateAddress.

* [NOD-1004] Make triedBucket use a slice instead of a list.

* [NOD-1004] Remove code duplication in getAddress.

* [NOD-1004] Remove infinite loops out of getAddress.

* [NOD-1004] Made impossible branch panic.

* [NOD-1004] Remove a mystery comment.

* [NOD-1004] Remove an unnecessary sort.

* [NOD-1004] Make AddressKey a type alias.

* [NOD-1004] Added comment for AddressKey

* [NOD-1004] Fix merge errors.

* [NOD-1004] Fix merge errors.

* [NOD-1004] Do some renaming.

* [NOD-1004] Do some more renaming.

* [NOD-1004] Rename AddrManager to AddressManager.

* [NOD-1004] Rename AddrManager to AddressManager.

* [NOD-1004] Do some more renaming.

* [NOD-1004] Rename bucket to addressBucketArray.

* [NOD-1004] Fix a comment.

* [NOD-1004] Rename na to netAddress.

* [NOD-1004] Bring back an existence check.

* [NOD-1004] Fix an error message.

* [NOD-1004] Fix a comment.

* [NOD-1004] Use a boolean instead of -1.

* [NOD-1004] Use a boolean instead of -1 in another place.

Co-authored-by: Mike Zak <feanorr@gmail.com>

* Fix merge errors.

Co-authored-by: Ori Newman <orinewman1@gmail.com>
Co-authored-by: Mike Zak <feanorr@gmail.com>
2020-07-26 15:23:18 +03:00
Svarog
b2901454d6 [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock (#823)
* [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock

* [NOD-1192] Broadcast transactions only if there's more then 0

* [NOD-1192] Rename: broadcastTransactions -> broadcastTransactionsAfterBlockAdded
2020-07-26 14:42:59 +03:00
stasatdaglabs
6cf589dc9b [NOD-1145] Normalize panics in flows (#819)
* [NOD-1145] Remove panics from regular flows.

* [NOD-1145] Remove panics from the handshake flow.

* [NOD-1045] Fix merge errors.

* [NOD-1045] Remove a comment.

* [NOD-1045] Handle errors properly in AddTransaction and AddBlock.

* [NOD-1045] Remove a comment.

* [NOD-1045] Wrap ErrPeerWithSameIDExists with ProtocolError.

* [NOD-1145] Add TODOs.
2020-07-26 13:46:59 +03:00
stasatdaglabs
683ceda3a7 [NOD-1152] Move banning from netAdapter to connectionManager (#820)
* [NOD-1152] Move banning out of netadapter.

* [NOD-1152] Add a comment.

* [NOD-1152] Fix a comment.
2020-07-26 13:42:48 +03:00
Svarog
6a18b56587 [NOD-1162] Fixes from integration test (#821)
* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] Add comment
2020-07-26 11:44:16 +03:00
Ori Newman
2c9e5be816 [NOD-1177] In handleGetConnectedPeerInfo, populate the missing result fields (#818) 2020-07-23 13:35:30 +03:00
stasatdaglabs
5d5a0ef335 [NOD-1153] Remove redundant maps from NetAdapter (#817)
* [NOD-1153] Remove redundant maps from NetAdapter.

* [NOD-1153] Fix a comment.

* [NOD-1153] Fix a comment.
2020-07-23 12:07:53 +03:00
Ori Newman
428f16ffef [NOD-1185] Broadcast blocks submitted through RPC (#816)
* [NOD-1185] Broadcast blocks submitted through RPC

* [NOD-1185] Send inv instead of block

* [NOD-1185] Fix go.sum and go.mod
2020-07-23 11:50:44 +03:00
stasatdaglabs
f93e54b63c [NOD-1184] Protect incomingRoutes from concurrent read/write. (#815) 2020-07-23 11:17:52 +03:00
Ori Newman
c30b350e8e [NOD-1183] Fix rpc server config (#814) 2020-07-23 10:49:46 +03:00
stasatdaglabs
8fdb5aa024 [NOD-1123] Implement banning (#812)
* [NOD-1123] Bubble bad-message errors up to the protocol level.

* [NOD-1123] Implement Banning.

* [NOD-1123] Properly use &stopped.

* [NOD-1123] Ban by IP rather than IP and port.

* [NOD-1123] Don't initiate connections to banned peers.

* [NOD-1123] Fix infinite loop in checkOutgoingConnections.

* [NOD-1123] Fix bannedAddresses key.

* [NOD-1123] Rename onBadMessageHandler to onInvalidMessageHandler.
2020-07-23 10:47:28 +03:00
Ori Newman
83a3c30d01 [NOD-1176] Implement a struct for each flow to share flow data (#811)
* [NOD-1176] Implement a struct for each flow to share flow data

* [NOD-1178] Add empty contexts to flow structs for consistency
2020-07-22 15:12:54 +03:00
stasatdaglabs
63646c8c92 [NOD-1175] Implement AddBlock (#809)
* [NOD-1175] Get rid of something weird.

* [NOD-1175] Implement AddBlock.

* [NOD-1175] Implement BFDisallowOrphans.

* [NOD-1175] Pass flags into AddBlock.

* [NOD-1175] Remove isOrphan and isDelayed handling from AddBlock.

* [NOD-1175] Use default return values in error.

* [NOD-1175] Bring back a comment.

* [NOD-1175] Add ErrOrphanBlockIsNotAllowed to errorCodeStrings.
2020-07-22 13:47:38 +03:00
stasatdaglabs
097e7ab42a [NOD-1155] Always use NetConnection for disconnection. (#810) 2020-07-22 12:18:08 +03:00
stasatdaglabs
3d45c8de50 [NOD-1130] Integrate RPC with the new architecture (#807)
* [NOD-1130] Delete rpcadapters.go.

* [NOD-1130] Delete p2p. Move rpc to top level.

* [NOD-1130] Remove DAGParams from rpcserverConfig.

* [NOD-1130] Remove rpcserverPeer, rpcserverConnManager, rpcserverSyncManager, and rpcserverConfig.

* [NOD-1130] Remove wallet RPC commands.

* [NOD-1130] Remove wallet RPC commands.

* [NOD-1130] Remove connmgr and peer.

* [NOD-1130] Move rpcmodel into rpc.

* [NOD-1130] Implement ConnectionCount.

* [NOD-1130] Remove ping and node RPC commands.

* [NOD-1130] Dummify handleGetNetTotals.

* [NOD-1130] Add NetConnection to Peer.

* [NOD-1130] Fix merge errors.

* [NOD-1130] Implement Peers.

* [NOD-1130] Fix HandleGetConnectedPeerInfo.

* [NOD-1130] Fix SendRawTransaction.

* [NOD-1130] Rename addManualNode to connect and removeManualNode to disconnect.

* [NOD-1130] Add a stub for AddBlock.

* [NOD-1130] Fix tests.

* [NOD-1130] Replace half-baked contents of RemoveConnection with a stub.

* [NOD-1130] Fix merge errors.

* [NOD-1130] Make golint happy.

* [NOD-1130] Get rid of something weird.

* [NOD-1130] Rename minerClient back to client.

* [NOD-1130] Add a few fields to GetConnectedPeerInfoResult.

* [NOD-1130] Rename oneTry to isPermanent.

* [NOD-1130] Implement ConnectionCount in NetAdapter.

* [NOD-1130] Move RawMempoolVerbose out of mempool.

* [NOD-1130] Move isSynced into the mining package.

* [NOD-1130] Fix a compilation error.

* [NOD-1130] Make golint happy.

* [NOD-1130] Fix merge errors.
2020-07-22 10:26:39 +03:00
Ori Newman
8e1958c20b [NOD-1168] Add context interfaces for flows (#808)
* [NOD-1168] Add context interfaces to flows

* [NOD-1168] Move IBD state to protocol manager

* [NOD-1168] Move ready peers to protocol manager

* [NOD-1168] Add comments

* [NOD-1168] Separate context interfaces for send and receive pings

* [NOD-1168] Add protocol shared state to FlowContext

* [NOD-1168] Fix comment

* [NOD-1168] Rename Context->HandleHandshakeContext

* [NOD-1168] Initialize readyPeers and transactionsToRebroadcast

* [NOD-1168] Rename readyPeers -> peers
2020-07-21 18:02:33 +03:00
Ori Newman
3e6c1792ef [NOD-1170] Return a custom error when a route is closed (#805)
* [NOD-1170] Return a custom error when a route is closed

* [NOD-1170] Return ErrRouteClosed directly from route methods

* [NOD-1170] Fix comment location
2020-07-21 12:06:11 +03:00
Svarog
6b5b4bfb2a [NOD-1164] Remove the singleton from dbaccess, to enable multiple db connections in same run (#806)
* [NOD-1164] Defined DatabaseContext as the basic object of dbaccess

* [NOD-1164] Update everything to use databaseContext

* [NOD-1164] Fix tests

* [NOD-1164] Add comments

* [NOD-1164] Removed databaseContext from blockNode

* [NOD-1164] Enforce DatabaseContext != nil

* [NOD-1164] Remove redundant and wrong comment line
2020-07-21 12:02:44 +03:00
Ori Newman
b797436884 [NOD-1127] Implement transaction propagation (#803)
* [NOD-1128] Add all flows to a directory names flows

* [NOD-1128] Make everything in protocol package a manager method

* [NOD-1128] Add AddTransaction mechanism to protocol manager

* [NOD-1128] Add mempool related flows

* [NOD-1128] Add mempool related flows

* [NOD-1128] Add mempool related flows

* [NOD-1127] Fix router message types

* [NOD-1127] Inline updateQueues

* [NOD-1127] Rename acceptedTxs->transactionsAcceptedToMempool

* [NOD-1127] Add TODOs to notify transactions to RPC

* [NOD-1127] Fix comment

* [NOD-1127] Rename acceptedTxs->transactionsAcceptedToMempool

* [NOD-1127] Rename MsgTxInv->MsgInvTransaction

* [NOD-1127] Rename MsgTxInv.TXIDs->TxIDS

* [NOD-1127] Change flow name

* [NOD-1127] Call m.addTransactionRelayFlow

* [NOD-1127] Remove redundant line

* [NOD-1127] Use common.DefaultTimeout

* [NOD-1127] Return early if len(idsToRequest) == 0

* [NOD-1127] Add NewBlockHandler to IBD
2020-07-20 16:01:35 +03:00
Svarog
2de3c1d0d4 [NOD-1160] Convert *config.Config from singleton to an object that is being passed around (#802)
* [NOD-1160] remove activeConfig from config package + update main

* [NOD-1160] Update main and addrmanager

* [NOD-1160] Update netAdapater

* [NOD-1160] Update connmanager

* [NOD-1160] Fix connmgr package

* [NOD-1160] Fixed DNSSeed functions

* [NOD-1160] Fixed protocol package and subpackages

* [NOD-1160] Fix p2p package

* [NOD-1160] Fix rpc package

* [NOD-1160] Fix kaspad a final time

* [NOD-1160] Make dnsseed.SeedFromDNS callable outside kaspad

* [NOD-1160] Fix tests

* [NOD-1160] Pass cfg to kaspad

* [NOD-1160] Add comment and remove redundant object

* [NOD-1160] Fix typo
2020-07-20 14:33:35 +03:00
Ori Newman
7e81757e2f [NOD-1161] Name goroutines and log them by the name (#804)
* [NOD-1161] Name goroutines and log them by the name

* [NOD-1161] Fix some goroutine names
2020-07-20 13:00:23 +03:00
stasatdaglabs
4773f87875 [NOD-1125] Implement the IBD flow (#800)
* [NOD-1125] Write a skeleton for starting IBD.

* [NOD-1125] Add WaitForIBDStart to Peer.

* [NOD-1125] Move functions around.

* [NOD-1125] Fix merge errors.

* [NOD-1125] Fix a comment.

* [NOD-1125] Implement sendGetBlockLocator.

* [NOD-1125] Begin implementing findIBDLowHash.

* [NOD-1125] Finish implementing findIBDLowHash.

* [NOD-1125] Rename findIBDLowHash to findHighestSharedBlockHash.

* [NOD-1125] Implement downloadBlocks.

* [NOD-1125] Implement msgIBDBlock.

* [NOD-1125] Implement msgIBDBlock.

* [NOD-1125] Fix message types for HandleIBD.

* [NOD-1125] Write a skeleton for requesting selected tip hashes.

* [NOD-1125] Write a skeleton for the rest of the IBD requests.

* [NOD-1125] Implement HandleGetBlockLocator.

* [NOD-1125] Fix wrong timeout.

* [NOD-1125] Fix compilation error.

* [NOD-1125] Implement HandleGetBlocks.

* [NOD-1125] Fix compilation errors.

* [NOD-1125] Fix merge errors.

* [NOD-1125] Implement selectPeerForIBD.

* [NOD-1125] Implement RequestSelectedTip.

* [NOD-1125] Implement HandleGetSelectedTip.

* [NOD-1125] Make go lint happy.

* [NOD-1125] Add minGetSelectedTipInterval.

* [NOD-1125] Call StartIBDIfRequired where needed.

* [NOD-1125] Fix merge errors.

* [NOD-1125] Remove a redundant line.

* [NOD-1125] Rename shouldContinue to shouldStop.

* [NOD-1125] Lowercasify an error message.

* [NOD-1125] Shuffle statements around in findHighestSharedBlockHash.

* [NOD-1125] Rename hasRecentlyReceivedBlock to isDAGTimeCurrent.

* [NOD-1125] Scope minGetSelectedTipInterval.

* [NOD-1125] Handle an unhandled error.

* [NOD-1125] Use AddUint32 instead of LoadUint32 + StoreUint32.

* [NOD-1125] Use AddUint32 instead of LoadUint32 + StoreUint32.

* [NOD-1125] Use SwapUint32 instead of AddUint32.

* [NOD-1125] Remove error from requestSelectedTips.

* [NOD-1125] Actually stop IBD when it should stop.

* [NOD-1125] Actually stop RequestSelectedTip when it should stop.

* [NOD-1125] Don't ban peers that send us delayed blocks during IBD.

* [NOD-1125] Make unexpected message type messages nicer.

* [NOD-1125] Remove Peer.ready and make HandleHandshake return it to guarantee we never operate on a non-initialized peer.

* [NOD-1125] Remove errors associated with Peer.ready.

* [NOD-1125] Extract maxHashesInMsgIBDBlocks to a const.

* [NOD-1125] Move the ibd package into flows.

* [NOD-1125] Start IBD if required after getting an unknown block inv.

* [NOD-1125] Don't request blocks during relay if we're in the middle of IBD.

* [NOD-1125] Remove AddBlockLocatorHash.

* [NOD-1125] Extract runIBD to a seperate function.

* [NOD-1125] Extract runSelectedTipRequest to a seperate function.

* [NOD-1125] Remove EnqueueWithTimeout.

* [NOD-1125] Increase the capacity of the outgoingRoute.

* [NOD-1125] Fix some bad names.

* [NOD-1125] Fix a comment.

* [NOD-1125] Simplify a comment.

* [NOD-1125] Move WaitFor... functions into their respective run... functions.

* [NOD-1125] Return default values in case of error.

* [NOD-1125] Use CmdXXX in error messages.

* [NOD-1125] Use MaxInvPerMsg in outgoingRouteMaxMessages instead of MaxBlockLocatorsPerMsg.

* [NOD-1125] Fix a comment.

* [NOD-1125] Disconnect a peer that sends us a delayed block during IBD.

* [NOD-1125] Use StoreUint32 instead of SwapUint32.

* [NOD-1125] Add a comment.

* [NOD-1125] Don't ban peers that send us delayed blocks.
2020-07-20 12:52:23 +03:00
Svarog
aa5bc34280 [NOD-1148] P2P stabilization (#798)
* [NOD-1148] Add lock around route's close operation

* [NOD-1148] Added tracing of incoming and outgoing messages

* [NOD-1148] Cast to MsgPing should have been to MsgPong

* [NOD-1148] Check for NeedMoreAddresses before sending GetAddr message
and invert condition
2020-07-19 14:57:34 +03:00
Svarog
b9a25c1141 [NOD-1163] Combine seperated flows into single packages (#801)
* [NOD-1163] Combine seperated flows into single packages

* [NOD-1163] Move handshake.go to handshake package

* [NOD-1163] Use single logger prefix for everything under protocol

* [NOD-1163] Add comment

* [NOD-1163] Fix refactor error
2020-07-19 11:24:25 +03:00
Svarog
b42b8b16fd [NOD-1120] Connection Manager (#796)
* [NOD-1120] Removed closure in NetAdapter.onConnectedHanlder

* [NOD-1120] Implement all connection manager methods

* [NOD-1120] Integrated connmanager into kaspad + added call for dnsseeder

* [NOD-1120] Allow buffer to not be bytes.Buffer

* [NOD-1120] Added timeout to connect

* [NOD-1120] Don't enter connections to  add loop if none needed

* [NOD-1120] Add call for addressManager.Good

* [NOD-1120] Minor bug fixes

* [NOD-1120] Remove errChan from grpcConnection

* [NOD-1120] Add comments to exported methods

* [NOD-1120] cancel the context for DialContext in gRPCServer.Connect

* [NOD-1120] Don't try to remove from connSet a connection that doesn't exist

* [NOD-1120] add ok bool to connectionSet.get

* [NOD-1120] Remove overuse of if-else in checkConnectionRequests

* [NOD-1120] Made some order in ConnectionManager

* [NOD-1120] Moved checkIncomingConnections to it's own file

* [NOD-1120] cleanup in checkOutgoingConnections

* [NOD-1120] Cleanup in SeedDNS, and move call outside of connection manager

* [NOD-1120] Add check that both --connect and --addpeer aren't used

* [NOD-1120] Move dial timeout to constant

* [NOD-1120] Enhance comment

* [NOD-1120] Log connection failure out of initiateConnection

* [NOD-1148] Reshuffle checkRequestedConnections to make more sense

* [NOD-1120] Move continue to correct place + reshuffle logging code

* [NOD-1120] Don't expose server.Connection outside netAdapter - expose a wrapper instead

* [NOD-1120] Add comments

* [NOD-1120] Don't return the connection from netAdapter.Connect()

* [NOD-1120] Use .Address as key for connectionSet

* [NOD-1120] Fix minRetryDuration usage

* [NOD-1120] Remove the correct number of incoming connections

* [NOD-1120] Add comment

* [NOD-1120] Rename connSet -> incomingConnectionSet

* [NOD-1120] fix grammar
2020-07-16 17:15:58 +03:00
Ori Newman
e0aac68759 [NOD-1128] Convert message type to uint32 (#799)
* [NOD-1128] Change message command to uint32

* [NOD-1128] Don't use iota

* [NOD-1128] Remove redundant line
2020-07-16 17:11:05 +03:00
Ori Newman
9939671ccc [NOD-1147] Implement address exchange (#795)
* [NOD-1147] Implement address exchange

* [NOD-1147] Put placeholder for source address

* [NOD-1147] Fix tests

* [NOD-1147] Add comment

* [NOD-1147] Remove needAddresses from MsgGetAddr

* [NOD-1147] Use rand.Shuffle

* [NOD-1147] Remove redundant const

* [NOD-1147] Move defer to its correct place

* [NOD-1147] Fix typo

* [NOD-1147] Use EnqueueWithTimeout for outgoingRoute

* [NOD-1147] Rename MsgGetAddr->MsgGetAddresses

* [NOD-1147] Rename MsgGetAddr->MsgGetAddresses

* [NOD-1147] Rename MsgAddr->MsgAddresses

* [NOD-1147] Rename fakeSrcAddr->fakeSourceAddress

* [NOD-1147] Remove redundant files

* [NOD-1147] CmdAddr -> CmdAddress

* [NOD-1147] Rename addr to address in protocol package
2020-07-15 17:19:46 +03:00
Ori Newman
eaa8515442 [NOD-1150] Add in netadapter function to disconnect router (#797)
* [NOD-1150] Add in netadapter function to disconnect router

* [NOD-1150] Fix comment
2020-07-15 14:42:17 +03:00
Ori Newman
04b578cee1 [NOD-1137] Implement handshake protocol (#792)
* [NOD-1126] Implement block relay flow

* [NOD-1126] Implement block relay flow

* [NOD-1126] Add StartGetRelayBlocksListener

* [NOD-1126] Integrate with new interface

* [NOD-1126] Fix comments

* [NOD-1126] Refactor protocol.go

* [NOD-1126] Split long lines

* [NOD-1126] Fix comment

* [NOD-1126] move sharedRequestedBlocks to a separate file

* [NOD-1126] Fix error message

* [NOD-1126] Move handleInv to StartBlockRelay

* [NOD-1126] Create hashesQueueSet type

* [NOD-1126] Make deleteFromRequestedBlocks a method

* [NOD-1126] Fix comment

* [NOD-1126] Add block logger

* [NOD-1126] Rename advertisedProtoVer->advertisedProtocolVer

* [NOD-1126] Fix comment and an error message

* [NOD-1126] Remove redundant loop

* [NOD-1126] Move requestBlocks upper

* [NOD-1126] Remove exiting blocks in requestedBlocks from hashesToRequest

* [NOD-1126] Change comment

* [NOD-1126] Rename stallResponseTimeout->timeout

* [NOD-1126] Use switch inside readMsgBlock

* [NOD-1126] Fix error message and remove redundant log

* [NOD-1126] Rename pacakge names

* [NOD-1126] Fix comment

* [NOD-1126] Change file names

* [NOD-1126] Convert block to partial if needed

* [NOD-1126] Remove function redeclaration

* [NOD-1126] continue instead of return

* [NOD-1126] Rename LogBlockBlueScore->LogBlock

* [NOD-1126] Add minimum functions to utils

* [NOD-1126] Flip condition on readInv

* [NOD-1126] Rename utilMath->mathUtil

* [NOD-1126] Fix comment

* [NOD-1137] Implement handshake

* [NOD-1137] Replace version's nonce with ID

* [NOD-1137] Remove redundant function

* [NOD-1137] Move handshake to a separate file

* [NOD-1137] Add todo

* [NOD-1137] Replace peer internal id with global peer ID

* [NOD-1137] Add serializer/deserializer to ID

* [NOD-1137] Remove validation from AddUserAgent

* [NOD-1137] Add missing id package

* [NOD-1137] Rename variables

* [NOD-1137] Add comment

* [NOD-1137] Implement GetBestLocalAddress

* [NOD-1137] Implement TODOs

* [NOD-1137] Rename variables

* [NOD-1137] Move errors.Is inside err!=nil branch

* [NOD-1137] Fix erroneous condition on Dequeue

* [NOD-1137] Fix bug in GetReadyPeerIDs

* [NOD-1137] Handle external IP on GetBestLocalAddress

* [NOD-1137] Remove version and verack message types when handshake is over

* [NOD-1137] Add FromBytes to id package

* [NOD-1137] Add protocol error

* [NOD-1137] Add ErrTimeout

* [NOD-1137] Log error only if exists

* [NOD-1137] Replace idFromBytes->id.FromBytes

* [NOD-1137] Add comments

* [NOD-1137] Remove ErrTimeout

* [NOD-1137] Unremove ErrTimeout

* [NOD-1137] Change comment

* [NOD-1137] Use EnqueueWithTimeout everywhere in protocol
2020-07-14 17:20:29 +03:00
stasatdaglabs
f8e53d309c [NOD-1142] Implement EnqueueWithTimeout and DequeueWithTimeout (#794)
* [NOD-1142] Implement EnqueueWithTimeout and DequeueWithTimeout.

* [NOD-1142] Use DequeueWithTimeout in readMsgBlock.

* [NOD-1142] Add comments about the new methods.
2020-07-14 16:14:27 +03:00
stasatdaglabs
6076309b3e [NOD-1142] Implement ping flow (#793)
* [NOD-1124] Move Router to the router package.

* [NOD-1124] Implement SetOnRouteCapacityReachedHandler.

* [NOD-1124] Use Routes instead of bare channels.

* [NOD-1124] Fix merge errors.

* [NOD-1124] Connect the Router to the Connection.

* [NOD-1124] Fix merge errors.

* [NOD-1124] Move some variables around.

* [NOD-1124] Fix unreachable code.

* [NOD-1124] Fix a variable name.

* [NOD-1142] Implement ping flows.

* [NOD-1142] Add ping flows to startFlows.

* [NOD-1142] Fix merge errors.

* [NOD-1142] Fix a typo.

* [NOD-1142] Add comments to exported functions.

* [NOD-1142] Fix bad flow name.

* [NOD-1142] Remove a redundant empty line.

* [NOD-1142] Fix a typo.

* [NOD-1142] Simplify for loop.

* [NOD-1142] Rename HandlePing to HandleIncomingPings and StartPingLoop to StartSendingPings.

* [NOD-1142] Fix no-longer-infinite loop.

* [NOD-1142] Represent ping duration as time.Duration instead of an int64.

* [NOD-1142] Rename HandleIncomingPings to ReceivePings and StartSendingPings to SendPings.

* [NOD-1142] Move pingInterval to within SendPings.

* [NOD-1142] Rephrase a comment.
2020-07-14 12:41:37 +03:00
stasatdaglabs
05db135d23 [NOD-1124] Implement the Flow thread model and architecture (#791)
* [NOD-1124] Move Router to the router package.

* [NOD-1124] Implement SetOnRouteCapacityReachedHandler.

* [NOD-1124] Use Routes instead of bare channels.

* [NOD-1124] Fix merge errors.

* [NOD-1124] Connect the Router to the Connection.

* [NOD-1124] Fix merge errors.

* [NOD-1124] Move some variables around.

* [NOD-1124] Fix unreachable code.

* [NOD-1124] Fix a variable name.

* [NOD-1124] Rename AddRoute to AddIncomingRoute.

* [NOD-1124] Rename SetRouter to Start.

* [NOD-1124] Make AddIncomingRoute create a Route by itself.

* [NOD-1124] Replace IncomingRoute with EnqueueIncomingMessage.

* [NOD-1124] Make Enqueue and Dequeue return isOpen instead of err.

* [NOD-1124] Remove writeDuringDisconnectLock.

* [NOD-1124] In sendLoop, move outgoingRoute to outside the loop.

* [NOD-1124] Start the connection loops only when Start is called.

* [NOD-1124] Replace OnIDReceivedHandler with AssociateRouterID.

* [NOD-1124] Add isOpen to Enqueue and Dequeue.

* [NOD-1124] Protect errChan from writing during disconnect.
2020-07-13 16:51:13 +03:00
Ori Newman
433cdb6006 [NOD-1126] implement block relay flow (#786)
* [NOD-1126] Implement block relay flow

* [NOD-1126] Add StartGetRelayBlocksListener

* [NOD-1126] Implement block relay flow

* [NOD-1126] Integrate with new interface

* [NOD-1126] Fix comments

* [NOD-1126] Refactor protocol.go

* [NOD-1126] Split long lines

* [NOD-1126] Fix comment

* [NOD-1126] move sharedRequestedBlocks to a separate file

* [NOD-1126] Fix error message

* [NOD-1126] Move handleInv to StartBlockRelay

* [NOD-1126] Create hashesQueueSet type

* [NOD-1126] Make deleteFromRequestedBlocks a method

* [NOD-1126] Fix comment

* [NOD-1126] Add block logger

* [NOD-1126] Rename advertisedProtoVer->advertisedProtocolVer

* [NOD-1126] Fix comment and an error message

* [NOD-1126] Remove redundant loop

* [NOD-1126] Move requestBlocks upper

* [NOD-1126] Remove exiting blocks in requestedBlocks from hashesToRequest

* [NOD-1126] Change comment

* [NOD-1126] Rename stallResponseTimeout->timeout

* [NOD-1126] Use switch inside readMsgBlock

* [NOD-1126] Fix error message and remove redundant log

* [NOD-1126] Rename pacakge names

* [NOD-1126] Fix comment

* [NOD-1126] Change file names

* [NOD-1126] Convert block to partial if needed

* [NOD-1126] Remove function redeclaration

* [NOD-1126] continue instead of return

* [NOD-1126] Rename LogBlockBlueScore->LogBlock

* [NOD-1126] Add minimum functions to utils

* [NOD-1126] Flip condition on readInv

* [NOD-1126] Rename utilMath->mathUtil

* [NOD-1126] Fix comment
2020-07-12 16:11:42 +03:00
Svarog
4a4dca1926 [NOD-1118] Implement gRPC basic connectivity (#790)
* [NOD-1118] Added protobufs for the MessageStream

* [NOD-1118] Implement some of the basic grpc methods

* [NOD-1118] Implemented gRPCConnection send and receive

* [NOD-1118] Implemented basic connection loops

* [NOD-1118] gRPC server implementation ready

* [NOD-1118] Add connection management

* [NOD-1118] Sort out the connection loops

* [NOD-1118] Add temporary testConnection

* [NOD-1118] Send to c.errChan whether error was recieved or not

* [NOD-1118] Call OnConnectHandler in time

* [NOD-1118] Handle closing connections properly

* [NOD-1118] Add comments to exported functions

* [NOD-1118] Call server.addConnection on newConnection

* [NOD-1118] Add a TODO comment

* [NOD-1118] Add a TODO comment

* [NOD-1118] Make connection a Stringer

* [NOD-1118] Made the connection loops 100% synchronic

* [NOD-1118] Make connection.isConnected uint32

* [NOD-1118] Move the Add/Remove connection from grpcConnection to register/unregister connection

* [NOD-1118] Convert error messages to lower case

* [NOD-1118] Remove protoc inline dependency

* [NOD-1118] Fix comment

* [NOD-1118] Exit if there was an error starting the protocol manager

* [NOD-1118] Fix error message

* [NOD-1118] Fixed a few comments

* [NOD-1118] Extract listenOn to a method

* [NOD-1118] Use !=0 for isConnected

* [NOD-1118] Refactor listenOn

* [NOD-1118] Add lock on channelWrites in gRPCConnection

* [NOD-1118] Rename channelWriteLock -> writeDuringDisconnectLock

* [NOD-1118] Reshuffle a comment

* [NOD-1118] Add a TODO comment
2020-07-12 15:22:49 +03:00
stasatdaglabs
6d591dde74 [NOD-1124] Implement the Flow thread model and architecture (#789)
* [NOD-1124] Rename Peer to Connection (because Peer is a business logic term)

* [NOD-1124] Implement Close for Router.

* [NOD-1124] Add SetPeerDisconnectedHandler.

* [NOD-1124] Remove mentions of "peer" from the netadapter package.

* [NOD-1124] Handle errors/stopping in netadapter.

* [NOD-1124] Remove netadapter.Connection.

* [NOD-1124] Add startSendLoop.

* [NOD-1124] Implement network IDs.

* [NOD-1124] Implement a map between IDs and routes.

* [NOD-1124] Implement Broadcast.

* [NOD-1124] Fix rename error.

* [NOD-1124] Fix copy+paste error.

* [NOD-1124] Change the type of NetAdapter.stop to uint32.

* [NOD-1124] If NetAdapter is stopped more than once, return an error.

* [NOD-1124] Add an error case to RouteInputMessage.

* [NOD-1124] Rename CreateID to NewID.

* [NOD-1124] Spawn from outside startReceiveLoop and startSendLoop.

* [NOD-1124] Fix a comment.

* [NOD-1124] Replace break with for condition.

* [NOD-1124] Don't disconnect from disconnected peers.

* [NOD-1124] Fix a for condition.

* [NOD-1124] Handle an error.
2020-07-09 09:34:28 +03:00
Svarog
8e624e057e [NOD-1134] Integrate Protocol into main (#788)
* [NOD-1134] Integrate Protocol into main

* [NOD-1134] Fix typo

* [NOD-1134] Added comments

* [NOD-1134] A series of renames to protocol

* [NOD-1134] Fix comment

* [NOD-1134] protocol.ProtocolManager -> Manager

* [NOD-1134] Update comment

* [NOD-1134] protocol.New() -> protocol.NewManager()
2020-07-08 11:52:53 +03:00
stasatdaglabs
eb2642ba90 [NOD-1124] Implement the Flow thread model and architecture (#787)
* [NOD-1124] Begin implementing netadapter.

* [NOD-1124] Implementing a stub gRPC server..

* [NOD-1124] Construct the server inside the netadapter.

* [NOD-1124] Rewrite protocol.go to fit with the new netAdapter model.

* [NOD-1124] Wrap a connection in Peer.

* [NOD-1124] Add a peerstate object.

* [NOD-1124] Remove the peerstate object.

* [NOD-1124] Remove router out of Peer.

* [NOD-1124] Tag a TODO.

* [NOD-1124] Return an error out of AddRoute if a route already exists for some message type.

* [NOD-1124] Rename the package grpc to grpcserver.

* [NOD-1124] Extracted newConnectionHandler into a type.

* [NOD-1124] Extract routerInitializer into a type.

* [NOD-1124] Panic/Add TODOs everywhere that isn't implemented.

* [NOD-1124] Improve the NetAdapter comment.

* [NOD-1124] Rename NewConnectionHandler to PeerConnectedHandler.

* [NOD-1124] Rename buildRouterInitializer to newRouterInitializer.

* [NOD-1124] Remove unreachable code.

* [NOD-1124] Make go vet happy.
2020-07-08 10:14:52 +03:00
Svarog
1a43cabfb9 [NOD-1119] Refactor main, and remove p2p layer from it (#785)
* [NOD-1119] Removed all p2p server from all the initialization of server

* [NOD-1119] Removed any calling for p2p server in main

* [NOD-1119] Simplified some functions to not take both dag and dagParams

* [NOD-1119] Simplify creation of mempool and rpc server

* [NOD-1119] Setup indexes in separate function

* [NOD-1119] Some cleanup in NewServer

* [NOD-1119] Fix mempool test

* [NOD-1119] Fix go format

* [NOD-1119] Unexport dag.timeSource

* [NOD-1119] Removed server package + renamed the Server object to Kaspad, and made it minimal

* [NOD-1119] Delete redundant functions

* Unexported kaspad and related methods

* [NOD-1119] Unexported newKaspad

* [NOD-1119] Revise comments and remove redundant function

* [NOD-1119] Make comments of unexported methods lower-case

* [NOD-1119] Some more refactoring in newKaspad
2020-07-06 18:00:28 +03:00
Ori Newman
580e37943b [NOD-1117] Write interfaces for P2P layer (#784)
* [NOD-1117] Write interfaces for P2P layer

* [NOD-1117] Add logs
2020-07-05 12:10:01 +03:00
Ori Newman
749775c7ea [NOD-1098] Change timestamp precision to one millisecond (#778)
* [NOD-1098] Change timestamps to be millisecond precision

* [NOD-1098] Change lock times to use milliseconds

* [NOD-1098] Use milliseconds precision everywhere

* [NOD-1098] Implement type mstime.Time

* [NOD-1098] Fix block 100000 timestamp

* [NOD-1098] Change orphan child to be one millisecond delay after its parent

* [NOD-1098] Remove test that checks if header timestamps have the right precision, and instead add tests for mstime, and fix genesis for testnet and devnet

* [NOD-1098] Fix comment

* [NOD-1098] Fix comment

* [NOD-1098] Fix testnet genesis

* [NOD-1098] Rename UnixMilli->UnixMilliseconds
2020-07-01 16:09:04 +03:00
Mike Zak
8ff8c30fb4 Merge remote-tracking branch 'origin/v0.5.0-dev' into v0.6.0-dev 2020-07-01 15:05:17 +03:00
stasatdaglabs
9893b7396c [NOD-1105] When recovering acceptance index, use a database transaction per block instead of for the entire recovery (#781)
* [NOD-1105] Don't use a database transaction when recovering acceptance index.

* Revert "[NOD-1105] Don't use a database transaction when recovering acceptance index."

This reverts commit da550f8e

* [NOD-1105] When recovering acceptance index, use a database transaction per block instead of for the entire recovery.
2020-07-01 13:43:51 +03:00
Svarog
8c90344f28 [NOD-1103] Fix testnetGenesisTxPayload with 8-byte blue-score (#780)
* [NOD-1103] Fix testnetGenesisTxPayload with 8-byte blue-score

* [NOD-1103] Fix genesis block bytes
2020-07-01 09:21:42 +03:00
Mike Zak
e4955729d2 Merge remote-tracking branch 'origin/v0.5.0-dev' into v0.6.0-dev 2020-06-30 08:48:31 +03:00
Mike Zak
8a7b0314e5 Merge branch 'v0.5.0-dev' of github.com:kaspanet/kaspad into v0.5.0-dev 2020-06-29 12:17:41 +03:00
Mike Zak
e87d00c9cf [NOD-1063] Fix a bug in which a block is pointing directly to a block in the selected parent chain below the reindex root
commit e303efef42
Author: stasatdaglabs <stas@daglabs.com>
Date:   Mon Jun 29 11:59:36 2020 +0300

    [NOD-1063] Rename a test.

commit bfecd57470
Author: stasatdaglabs <stas@daglabs.com>
Date:   Mon Jun 29 11:57:36 2020 +0300

    [NOD-1063] Fix a comment.

commit b969e5922d
Author: stasatdaglabs <stas@daglabs.com>
Date:   Sun Jun 28 18:14:44 2020 +0300

    [NOD-1063] Convert modifiedTreeNode to an out param.

commit 170f9872f4
Author: stasatdaglabs <stas@daglabs.com>
Date:   Sun Jun 28 17:05:01 2020 +0300

    [NOD-1063] Fix a bug in which a block is added to the selected parent chain below the reindex root.
2020-06-29 12:16:47 +03:00
stasatdaglabs
336347b3c5 [NOD-1063] Fix a bug in which a block is pointing directly to a block in the selected parent chain below the reindex root (#777)
* [NOD-1063] Fix a bug in which a block is added to the selected parent chain below the reindex root.

* [NOD-1063] Convert modifiedTreeNode to an out param.

* [NOD-1063] Fix a comment.

* [NOD-1063] Rename a test.
2020-06-29 12:13:51 +03:00
Mike Zak
15d0899406 Update to version v0.6.0 2020-06-29 09:17:45 +03:00
Mike Zak
ad096f9781 Update to version 0.5.0 2020-06-29 08:59:56 +03:00
Elichai Turkel
d3c6a3dffc [NOD-1093] Add hashMerkleRoot to GetBlockTemplateResult (#776)
* Add hashMerkleRoot field to GetBlockTemplateResult

* Use hashMerkleRoot from template instead of recalculating

* Move ParseBlock from kaspaminer into rpcclient

* Rename ParseBlock to ConvertGetBlockTemplateResultToBlock and wrap errors
2020-06-28 16:53:09 +03:00
stasatdaglabs
57b1653383 [NOD-1063] Optimize deep reachability tree insertions (#773)
* [NOD-1055] Give higher priority for requesting missing ancestors when sending a getdata message (#767)

* [NOD-1063] Remove the remainingInterval field.

* [NOD-1063] Add helper functions to reachabilityTreeNode.

* [NOD-1063] Add reachabilityReindexRoot.

* [NOD-1063] Start implementing findNextReachabilityReindexRoot.

* [NOD-1063] Implement findCommonAncestor.

* [NOD-1063] Implement findReachabilityTreeAncestorInChildren.

* [NOD-1063] Add reachabilityReindexWindow.

* [NOD-1063] Fix findReachabilityTreeAncestorInChildren.

* [NOD-1063] Remove BlockDAG reference in findReachabilityTreeAncestorInChildren.

* [NOD-1063] Extract updateReachabilityReindexRoot to a separate function.

* [NOD-1063] Add reachabilityReindexSlack.

* [NOD-1063] Implement splitReindexRootChildrenAroundChosen.

* [NOD-1063] Implement calcReachabilityTreeNodeSizes.

* [NOD-1063] Implement propagateChildIntervals.

* [NOD-1063] Extract tightenReachabilityTreeIntervalsBeforeChosenReindexRootChild and tightenReachabilityTreeIntervalsAfterChosenReindexRootChild to separate functions.

* [NOD-1063] Implement expandReachabilityTreeIntervalInChosenReindexRootChild.

* [NOD-1063] Finished implementing concentrateReachabilityTreeIntervalAroundReindexRootChild.

* [NOD-1063] Begin implementing reindexIntervalsBeforeReindexRoot.

* [NOD-1063] Implement top-level logic of reindexIntervalsBeforeReindexRoot.

* [NOD-1063] Implement reclaimIntervalBeforeChosenChild.

* [NOD-1063] Add a debug log for reindexIntervalsBeforeReindexRoot.

* [NOD-1063] Rename reindexIntervalsBeforeReindexRoot to reindexIntervalsEarlierThanReindexRoot.

* [NOD-1063] Implement reclaimIntervalAfterChosenChild.

* [NOD-1063] Add a debug log for updateReachabilityReindexRoot.

* [NOD-1063] Convert modifiedTreeNodes from slices to sets.

* [NOD-1063] Fix findCommonAncestor.

* [NOD-1063] Fix reindexIntervalsEarlierThanReindexRoot.`

* [NOD-1063] Remove redundant nil conditions.

* [NOD-1063] Make map[*reachabilityTreeNode]struct{} into a type alias with a copyAllFrom method.

* [NOD-1063] Remove setInterval.

* [NOD-1063] Create a new struct to hold reachability stuff called reachabilityTree.

* [NOD-1063] Rename functions under reachabilityTree.

* [NOD-1063] Move reachabilityStore into reachabilityTree.

* [NOD-1063] Move the rest of the functions in reachability.go into the reachabilityTree struct.

* [NOD-1063] Update newReachabilityTree to take an instance of reachabilityStore.

* [NOD-1063] Fix merge errors.

* [NOD-1063] Fix merge errors.

* [NOD-1063] Pass a reference to the dag into reachabilityTree.

* [NOD-1063] Use Wrapf instead of Errorf.

* [NOD-1063] Merge assignments.

* [NOD-1063] Disambiguate a varaible name.

* [NOD-1063] Add a test case for intervalBefore.

* [NOD-1063] Simplify splitChildrenAroundChosenChild.

* [NOD-1063] Fold temporary variables into newReachabilityInterval.

* [NOD-1063] Fold more temporary variables into newReachabilityInterval.

* [NOD-1063] Fix a bug in expandIntervalInReindexRootChosenChild.

* [NOD-1063] Remove blockNode from futureCoveringBlock.

* [NOD-1063] Get rid of futureCoveringBlock.

* [NOD-1063] Use findIndex directly in findAncestorAmongChildren.

* [NOD-1063] Make findIndex a bit nicer to use. Also rename it to findAncestorIndexOfNode.

* [NOD-1063] Rename childIntervalAllocationRange to intervalRangeForChildAllocation.

* [NOD-1063] Optimize findCommonAncestor.

* [NOD-1063] In reindexIntervalsBeforeChosenChild, use chosenChild.interval.start - 1 instead of childrenBeforeChosen[len(childrenBeforeChosen)-1].interval.end + 1.

* [NOD-1063] Rename reindexIntervalsBeforeChosenChild to reindexIntervalsBeforeNode.

* [NOD-1063] Add a comment explain what "the chosen child" is.

* [NOD-1063] In concentrateIntervalAroundReindexRootChosenChild, rename modifiedTreeNodes to allModifiedTreeNodes.

* [NOD-1063] Extract propagateIntervals to a function.

* [NOD-1063] Extract interval "contains" logic to a separate function.

* [NOD-1063] Simplify "looping up" logic in reclaimIntervalXXXChosenChild.

* [NOD-1063] Add comments to reclaimIntervalXXXChosenChild.

* [NOD-1063] Rename copyAllFrom to addAll.

* [NOD-1063] Rename reachabilityStore (the variable) to just store.

* [NOD-1063] Fix an error message.

* [NOD-1063] Reword a comment.

* [NOD-1063] Don't return -1 from findAncestorIndexOfNode.

* [NOD-1063] Extract slackReachabilityIntervalForReclaiming to a constant.

* [NOD-1063] Add a missing condition.

* [NOD-1063] Call isAncestorOf directly in insertNode.

* [NOD-1063] Rename chosenReindexRootChild to reindexRootChosenChild.

* [NOD-1063] Rename treeNodeSet to orderedTreeNodeSet.

* [NOD-1063] Add a disclaimer to orderedTreeNodeSet.

* [NOD-1063] Implement StoreReachabilityReindexRoot and FetchReachabilityReindexRoot.

* [NOD-1063] Move storing the reindex root to within reachabilityTree.

* [NOD-1063] Remove isAncestorOf from reachabilityInterval.

* [NOD-1063] Add a comment about graph theory conventions.

* [NOD-1063] Fix tests.

* [NOD-1063] Change inclusion in isAncestorOf functions.

* [NOD-1063] Rename a test.

* [NOD-1063] Implement TestIsInFuture.

* [NOD-1063] Fix error messages in TestIsInFuture.

* [NOD-1063] Fix error messages in TestIsInFuture.

* [NOD-1063] Rename isInSelectedParentChain to isInSelectedParentChainOf.

* [NOD-1063] Rename isInFuture to isInPast.

* [NOD-1063] Expand on a comment.

* [NOD-1063] Rename modifiedTreeNodes.

* [NOD-1063] Implement test: TestReindexIntervalsEarlierThanReindexRoot.

* [NOD-1063] Implement test: TestUpdateReindexRoot.

* [NOD-1063] Explain a check.

* [NOD-1063] Use a method instead of calling reachabilityStore.loaded directly.

* [NOD-1063] Lowercasified an error message.

* [NOD-1063] Fix failing test.

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-06-28 14:27:01 +03:00
Ori Newman
a86255ba51 [NOD-1088] Rename RejectReasion to RejectReason (#775) 2020-06-25 18:08:58 +03:00
Ori Newman
0a7a4ce7d6 [NOD-1085] Use all nonce space in kaspaminer (#774) 2020-06-24 11:12:57 +03:00
Ori Newman
4c3735a897 [NOD-906] Move transaction mass limit from CheckTransactionSanity to mempool (#772)
* [NOD-906] Move transaction mass limit from CheckTransactionSanity to mempool

* [NOD-906] Fix tests

* [NOD-906] Add spaces to comments
2020-06-22 17:20:17 +03:00
Ori Newman
22fd38c053 [NOD-1060] Don't sync from misbehaving peer (#768)
* [NOD-1038] Give higher priority for requesting missing ancestors when sending a getdata message (#767)

* [NOD-1060] Don't sync from peers that break the netsync protocol
2020-06-22 17:15:03 +03:00
Ori Newman
895f67a8d4 [NOD-1035] Use reachability to check finality (#763)
* [NOD-1034] Use reachability to check finality

* [NOD-1034] Add comments and rename variables

* [NOD-1034] Fix comments

* [NOD-1034] Rename checkFinalityRules->checkFinalityViolation

* [NOD-1034] Change isAncestorOf to be exclusive

* [NOD-1034] Make isAncestorOf exclusive and also more explicit, and add TestReachabilityTreeNodeIsAncestorOf
2020-06-22 17:14:59 +03:00
Svarog
56e807b663 [NOD-999] Set TargetOutbound=0 to prevent rogue connectionRequests except the one requested (#771) 2020-06-22 14:20:12 +03:00
Mike Zak
af64c7dc2d Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-21 16:15:05 +03:00
Svarog
1e6458973b [NOD-1064] Don't send GetBlockInvsMsg with lowHash = nil (#769) 2020-06-21 09:09:07 +03:00
Ori Newman
7bf8bb5436 [NOD-1017] Move peers.json to db (#733)
* [NOD-1017] Move peers.json to db

* [NOD-1017] Fix tests

* [NOD-1017] Change comments and rename variables

* [NOD-1017] Separate to smaller functions

* [NOD-1017] Renames

* [NOD-1017] Name newAddrManagerForTest return params

* [NOD-1017] Fix handling of non existing peersState

* [NOD-1017] Add getPeersState rpc command

* [NOD-1017] Fix comment

* [NOD-1017] Split long line

* [NOD-1017] Rename getPeersState->getPeerAddresses

* [NOD-1017] Rename getPeerInfo->getConnectedPeerInfo
2020-06-18 12:12:49 +03:00
Svarog
1358911d95 [NOD-1064] Don't send GetBlockInvsMsg with lowHash = nil (#769) 2020-06-17 14:18:00 +03:00
Ori Newman
1271d2f113 [NOD-1038] Give higher priority for requesting missing ancestors when sending a getdata message (#767) 2020-06-17 11:52:10 +03:00
Ori Newman
bc0227b49b [NOD-1059] Always call sm.restartSyncIfNeeded() when getting selectedTip message (#766) 2020-06-16 16:51:38 +03:00
Ori Newman
dc643c2d76 [NOD-833] Remove getBlockTemplate capabilites and move mining address to getBlockTemplate (#762)
* [NOD-833] Remove getBlockTemplate capabilites and move mining address to getBlockTemplate

* [NOD-833] Fix tests

* [NOD-833] Break long lines
2020-06-16 11:01:06 +03:00
Ori Newman
0744e8ebc0 [NOD-1042] Ignore very high orphans (#761)
* [NOD-530] Remove coinbase inputs and add blue score to payload

* [NOD-1042] Ignore very high orphans

* [NOD-1042] Add ban score to an orphan with malformed blue score

* [NOD-1042] Fix log
2020-06-15 16:08:25 +03:00
Ori Newman
d4c9fdf6ac [NOD-614] Add ban score (#760)
* [NOD-614] Copy bitcoin-core ban score policy

* [NOD-614] Add ban score to disconnects

* [NOD-614] Fix wrong branch of AddBanScore

* [NOD-614] Add ban score on sending too many addresses

* [NOD-614] Add comments

* [NOD-614] Remove redundant reject messages

* [NOD-614] Fix log message

* [NOD-614] Ban every node that sends invalid invs

* [NOD-614] Make constants for ban scores
2020-06-15 12:12:38 +03:00
stasatdaglabs
829979b6c7 [NOD-1007] Split checkBlockSanity subroutines (#743)
* [NOD-1007] Split checkBlockSanity subroutines.

* [NOD-1007] Put back the comments about performance.

* [NOD-1007] Make all the functions in checkBlockSanity take a *util.Block.

* [NOD-1007] Rename checkBlockTransactionsOrderedBySubnetwork to checkBlockTransactionOrder.

* [NOD-1007] Move a comment up a scope level.
2020-06-15 11:07:52 +03:00
Mike Zak
32cd29bf70 Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-14 12:51:59 +03:00
stasatdaglabs
03cb6cbd4d [NOD-1048] Use a smaller writeBuffer and use disableSeeksCompaction directly. (#759) 2020-06-11 16:11:22 +03:00
Ori Newman
ba4a89488e [NOD-530] Remove coinbase inputs and add blue score to payload (#752)
* [NOD-530] Remove coinbase inputs and add blue score to payload

* [NOD-530] Fix comment

* [NOD-530] Change util.Block private fields comments
2020-06-11 15:54:11 +03:00
Ori Newman
b0d4a92e47 [NOD-1046] Delete redundant conversion from rule error (#755) 2020-06-11 12:19:49 +03:00
stasatdaglabs
3e5a840c5a [NOD-1052] Add a lock around clearOldEntries to protect against concurrent access of utxoDiffStore.loaded. (#758) 2020-06-11 11:56:25 +03:00
Ori Newman
d6d34238d2 [NOD-1049] Allow empty addr messages (#753) 2020-06-10 16:13:13 +03:00
Ori Newman
8bbced5925 [NOD-1051] Don't disconnect from sync peer if it sends an orphan (#757) 2020-06-10 16:05:48 +03:00
stasatdaglabs
20da1b9c9a [NOD-1048] Make leveldb compaction much less frequent (#756)
* [NOD-1048] Make leveldb compaction much less frequent. Also, allocate an entire gigabyte for leveldb's blockCache and writeBuffer.

* [NOD-1048] Implement changing the options for testing purposes.

* [NOD-1048] Rename originalOptions to originalLDBOptions.

* [NOD-1048] Add a comment.
2020-06-10 16:05:02 +03:00
Ori Newman
b6a6e577c4 [NOD-1013] Don't block handleBlockDAGNotification when calling peerNotifier (#749)
* [NOD-1013] Don't block handleBlockDAGNotification when calling peerNotifier

* [NOD-1013] Add comment
2020-06-09 12:12:18 +03:00
Mike Zak
84888221ae Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-08 12:23:33 +03:00
stasatdaglabs
222477b33e [NOD-1040] Don't remove DAG tips from the diffStore's loaded set (#750)
* [NOD-1040] Don't remove DAG tips from the diffStore's loaded set

* [NOD-1040] Remove a debug log.
2020-06-08 12:14:58 +03:00
Mike Zak
4a50d94633 Update to v0.4.1 2020-06-07 17:54:30 +03:00
stasatdaglabs
b4dba782fb [NOD-1040] Increase maxBlueScoreDifferenceToKeepLoaded to 1500 (#746)
* [NOD-1040] Don't remove DAG tips from the diffStore's loaded set

* [NOD-1040] Fix TestClearOldEntries.

* Revert "[NOD-1040] Fix TestClearOldEntries."

This reverts commit e0705814

* Revert "[NOD-1040] Don't remove DAG tips from the diffStore's loaded set"

This reverts commit d3eba1c1

* [NOD-1040] Increase maxBlueScoreDifferenceToKeepLoaded to 1500.
2020-06-07 17:50:57 +03:00
stasatdaglabs
9c78a797e4 [NOD-1041] Call outboundPeerConnected and outboundPeerConnectionFailed directly instead of routing them through peerHandler (#748)
* [NOD-1041] Fix a deadlock between connHandler and peerHandler.

* [NOD-1041] Simplified the fix.
2020-06-07 16:35:48 +03:00
Ori Newman
35c733a4c1 [NOD-970] Add isSyncing flag (#747)
* [NOD-970] Add isSyncing flag

* [NOD-970] Rename shouldSendSelectedTip->peerShouldSendSelectedTip
2020-06-07 16:31:17 +03:00
Mike Zak
e5810d023e Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-07 14:21:49 +03:00
stasatdaglabs
96930bd6ea [NOD-1039] Remove the call to SetGCPercent. (#745) 2020-06-07 09:19:28 +03:00
Mike Zak
e09ce32146 Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-04 15:11:40 +03:00
stasatdaglabs
d15c009b3c [NOD-1030] Disconnect from syncPeers that send orphan blocks (#744)
* [NOD-1030] Disconnect from syncPeers that send orphan blocks.

* [NOD-1030] Remove debug log.

* [NOD-1030] Remove unnecessary call to stopSyncFromPeer.
2020-06-04 15:11:05 +03:00
stasatdaglabs
95c8b8e9d8 [NOD-1023] Rename isCurrent/current to isSynced/synced (#742)
* [NOD-1023] Rename BlockDAG.isCurrent to isSynced.

* [NOD-1023] Rename SyncManager.current to synced.

* [NOD-1023] Fix comments.
2020-06-03 16:04:14 +03:00
stasatdaglabs
2d798a5611 [NOD-1020] Do send addr response to getaddr messages even if there aren't any addresses to send. (#740) 2020-06-01 14:09:18 +03:00
stasatdaglabs
3a22249be9 [NOD-1012] Fix erroneous partial node check (#739)
* [NOD-1012] Fix bad partial node check.

* [NOD-1012] Fix unit tests.
2020-05-31 14:13:30 +03:00
stasatdaglabs
a4c1898624 [NOD-1012] Disable subnetworks (#731)
* [NOD-1012] Disallow non-native/coinbase transactions.

* [NOD-1012] Fix logic error.

* [NOD-1012] Fix/skip tests and remove --subnetwork.

* [NOD-1012] Disconnect from non-native peers.

* [NOD-1012] Don't skip subnetwork tests.

* [NOD-1012] Use EnableNonNativeSubnetworks in peer.go.

* [NOD-1012] Set EnableNonNativeSubnetworks = true in the tests that need them rather than by default in Simnet.
2020-05-31 10:50:46 +03:00
Ori Newman
672f02490a [NOD-763] Change genesis version (#737) 2020-05-28 09:55:59 +03:00
stasatdaglabs
fc00275d9c [NOD-553] Get rid of base58 (#735)
* [NOD-553] Get rid of wif.

* [NOD-553] Get rid of base58.
2020-05-27 17:37:03 +03:00
Ori Newman
6219b93430 [NOD-1018] Exit after 2 minutes if graceful shutdown fails (#732)
* [NOD-1018] Exit after 2 minutes if graceful shutdown fails

* [NOD-1018] Change time.Tick to time.After
2020-05-25 14:30:43 +03:00
Ori Newman
3a4571d671 [NOD-965] Make LookupNode return boolean (#729)
* [NOD-965] Make dag.index.LookupNode return false if node is not found

* [NOD-965] Rename blockDAG->dag

* [NOD-965] Remove irrelevant test

* [NOD-965] Use bi.index's ok in LookupNode
2020-05-25 12:51:30 +03:00
Ori Newman
96052ac69a [NOD-809] Change fee rate to fee per megagram (#730) 2020-05-24 16:59:37 +03:00
Ori Newman
6463a4b5d0 [NOD-1011] Don't cache isSynced on getBlockTemplate (#728) 2020-05-20 14:38:24 +03:00
Svarog
0ca127853d [NOD-974] UTXO-Commitments shouldn't include the new block's transactions (#727)
* [NOD-975] Don't include block transactions inside its UTXO commitment (#711)

* [NOD-975] Don't include block transactions inside its UTXO commitment.

* Revert "[NOD-975] Don't include block transactions inside its UTXO commitment."

This reverts commit b1a2ae66

* [NOD-975] Implement a (currently failing) TestUTXOCommitment.

* [NOD-975] Remove the block's own transactions from calcMultiset.

* [NOD-975] Simplify calcMultiset.

* [NOD-975] Add a comment on top of selectedParentMultiset.

* [NOD-975] Use pastUTXO instead of selectedParentUTXO in calcMultiset.

* [NOD-975] Use selected parent's pastUTXO instead of this block's pastUTXO in calcMultiset.

* [NOD-975] Extract selectedParentPastUTXO to a separate function.

* [NOD-975] Remove selectedParentUTXO from pastUTXO's return values.

* [NOD-975] Add txs to TestUTXOCommitment.

* [NOD-975] Remove debug code.

* [NOD-975] In pastUTXOMultiSet, copy the multiset to avoid modifying the original.

* [NOD-975] Add a test: TestPastUTXOMultiSet.

* [NOD-975] Improve TestPastUTXOMultiSet.

* [NOD-976] Implement tests for UTXO commitments (#715)

* [NOD-975] Don't include block transactions inside its UTXO commitment.

* Revert "[NOD-975] Don't include block transactions inside its UTXO commitment."

This reverts commit b1a2ae66

* [NOD-975] Implement a (currently failing) TestUTXOCommitment.

* [NOD-975] Remove the block's own transactions from calcMultiset.

* [NOD-975] Simplify calcMultiset.

* [NOD-975] Add a comment on top of selectedParentMultiset.

* [NOD-975] Use pastUTXO instead of selectedParentUTXO in calcMultiset.

* [NOD-975] Use selected parent's pastUTXO instead of this block's pastUTXO in calcMultiset.

* [NOD-975] Extract selectedParentPastUTXO to a separate function.

* [NOD-975] Remove selectedParentUTXO from pastUTXO's return values.

* [NOD-975] Add txs to TestUTXOCommitment.

* [NOD-976] Generate new blockDB blocks for tests.

* [NOD-976] Fix TestBlueBlockWindow.

* [NOD-976] Fix TestIsKnownBlock.

* [NOD-976] Fix TestGHOSTDAG.

* [NOD-976] Fix TestUTXOCommitment.

* [NOD-976] Remove kaka.

* [NOD-990] Save utxo diffs of past UTXO (#724)

* [NOD-990] Save UTXO diffs of past UTXO

* [NOD-990] Check for block double spends with its past instead of building its UTXO

* [NOD-990] Call resetExtraNonceForTest in TestUTXOCommitment

* [NOD-990] Remove redundant functions diffFromTx and diffFromAcceptedTx

* [NOD-990] Rename i->j to avoid confusion

* [NOD-990] Break long lines

* [NOD-990] Rename ErrDoubleSpendsWithBlockTransaction -> ErrDoubleSpendInSameBlock

* [NOD-990] Make ErrDoubleSpendInSameBlock more detailed

* [NOD-990] Add testProcessBlockRuleError

* [NOD-990] Fix comment

* [NOD-990] Add test for duplicate transactions on the same block

* [NOD-990] Use pkg/errors on panic

* [NOD-990] Make cloneWithoutBase method

* [NOD-990] Break long lines

* [NOD-990] Fix comment

* [NOD-990] Fix wrong variable names

* [NOD-990] Fix comment

* [NOD-974] Generate new test blocks.

* [NOD-974] Fix TestIsKnownBlock and TestGHOSTDAG.

* [NOD-974] Fix TestUTXOCommitment.

* [NOD-974] Fix comments

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
Co-authored-by: stasatdaglabs <stas@daglabs.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-05-20 12:43:52 +03:00
stasatdaglabs
b884ba128e [NOD-1008] In utxoDiffStore, keep diffData in memory for blocks whose blueScore is at least virtualBlueScore - X (#726)
* [NOD-1008] Use *blockNode as keys in utxoDiffStore.loaded and .dirty.

* [NOD-1008] Implement clearOldEntries.

* [NOD-1008] Increase maxBlueScoreDifferenceToKeepLoaded to 100.

* [NOD-1008] Fix a typo.

* [NOD-1008] Add clearOldEntries to saveChangesFromBlock.

* [NOD-1008] Begin implementing TestClearOldEntries.

* [NOD-1008] Finish implementing TestClearOldEntries.

* [NOD-1008] Fix a comment.

* [NOD-1008] Rename diffDataByHash to diffDataByBlockNode.

* [NOD-1008] Use dag.TipHashes instead of tracking tips manually.
2020-05-20 10:47:01 +03:00
Svarog
fe25ea3d8c [NOD-1001] Make an error in Peer.start() stop the connection process from continuing. (#723)
* [NOD-1001] Move side-effects of connection out of OnVersion

* [NOD-1001] Make AssociateConnection synchronous

* [NOD-1001] Wait for 2 veracks in TestPeerListeners

* [NOD-1001] Made AssociateConnection return error

* [NOD-1001] Remove temporary logs

* [NOD-1001] Fix typos and find-and-replace errors

* [NOD-1001] Move example_test back out of peer package + fix some typos

* [NOD-1001] Use correct remote address in setupPeersWithConns and return to address string literals

* [NOD-1001] Use separate verack channels for inPeer and outPeer

* [NOD-1001] Make verack channels buffered

* [NOD-1001] Removed temporary sleep of 1 second

* [NOD-1001] Removed redundant //
2020-05-20 10:36:44 +03:00
stasatdaglabs
e0f587f599 [NOD-877] Separate UTXO header code to two fields in serialization: blue score and packed flags (#725)
* [NOD-877] In UTXOEntry serialization, extract packedFlags out to a separate Uint8.

* [NOD-877] Generate new test blocks.

* [NOD-877] Fix TestIsKnownBlock.

* [NOD-877] Fix TestBlueBlockWindow.

* [NOD-877] Fix TestUTXOSerialization and TestGHOSTDAG.

* [NOD-877] Fix TestVirtualBlock.
2020-05-19 17:56:07 +03:00
stasatdaglabs
e9e1ef4772 [NOD-1006] Make use of a pool to avoid excessive allocation of big.Ints (#722)
* [NOD-1006] Make CompactToBig take an out param so that we can reuse the same big.Int in averageTarget.

* [NOD-1006] Fix merge errors.

* [NOD-1006] Use CompactToBigWithDestination only in averageTarget.

* [NOD-1006] Fix refactor errors.

* [NOD-1006] Fix refactor errors.

* [NOD-1006] Optimize averageTarget with a big.Int pool.

* [NOD-1006] Defer releasing bigInts.

* [NOD-1006] Use a pool for requiredDifficulty as well.

* [NOD-1006] Move the big int pool to utils.

* [NOD-1006] Remove unnecessary line.
2020-05-19 16:29:21 +03:00
Svarog
eb8b841850 [NOD-1005] Use sm.isSynced to check whether should request blocks from invs (#721)
* [NOD-1005] Moved isSyncedForMining to netsync manager, and renamed to isSynced + removed isCurrent

* [NOD-1005] Use sm.isSynced to check whether should request blocks from invs

* [NOD-1005] Use private version of isSynced to avoid infinite loop

* [NOD-1005] Fix a few typos
2020-05-18 10:42:58 +03:00
Svarog
28681affda [NOD-994] Greatly increase the amount of logs kaspad keeps before rotating them away (#720)
* [NOD-994] Greatly increased the amount of logs kaspad keeps before rotating them away

* [NOD-994] Actually invcrease the log file

* [NOD-994] Update comments

* [NOD-994] Fix typo
2020-05-14 10:58:46 +03:00
Svarog
378f0b659a [NOD-993] Get rid of redundant error types + Use %+v when printing startup errors (#719)
* [NOD-993] Use %+v when printing errors

* [NOD-993] Get rid of AssertError

* [NOD-993] Made ruleError use github.com/pkg/errors

* [NOD-993] remove redundant TODO

* [NOD-993] remove redundant Comment

* [NOD-993] Removed DeploymentError
2020-05-13 17:27:53 +03:00
stasatdaglabs
35b943e04f [NOD-996] Disable kaspad logs in TestScripts (#718)
* [NOD-996] Disable kaspad logs in TestScripts.

* [NOD-996] Return the log level to its original state after TestScripts is done.
2020-05-13 15:57:30 +03:00
stasatdaglabs
65f75c17fc [NOD-982] Log message with level WARN when getting MsgReject (#717)
* [NOD-982] Log message with level WARN when getting MsgReject.

* [NOD-982] Fix wrong logLevel in Write and Writef.

* [NOD-982] Use Write and Writef inside Trace, Tracef, Debug, Debugf, etc...

* [NOD-982] Move peer message logging to a separate file.
2020-05-13 10:03:37 +03:00
stasatdaglabs
806eab817c [NOD-820] When the node isn't synced, make getBlockTemplate return a boolean isSynced instead of an error (#716)
* [NOD-820] Add IsSynced to GetBlockTemplateResult.

* [NOD-820] Add isSynced to the help file.

* [NOD-820] Add MineWhenNotSynced to the kaspaminer config.

* [NOD-820] Implement miner MineWhenNotSynced logic.

* [NOD-820] Fixed capitalization in an error message.
2020-05-12 15:08:24 +03:00
Ori Newman
585510d76c [NOD-847] Fix CIDR protection and prevent connecting to the same address twice (#714)
* [NOD-847] Fix CIDR protection and prevent connecting to the same address twice

* [NOD-847] Fix Tests

* [NOD-847] Add TestDuplicateOutboundConnections and TestSameOutboundGroupConnections

* [NOD-847] Fix TestRetryPermanent, TestNetworkFailure and wait 10 ms before restoring the previous active config

* [NOD-847] Add "is" before boolean methods

* [NOD-847] Fix Connect's lock

* [NOD-847] Make numAddressesInAddressManager an argument

* [NOD-847] Add teardown function for address manager

* [NOD-847] Add stack trace to ConnManager errors

* [NOD-847] Change emptyAddressManagerForTest->createEmptyAddressManagerForTest and fix typos

* [NOD-847] Fix wrong test name for addressManagerForTest

* [NOD-847] Change error message if New fails

* [NOD-847] Add new line on releaseAddress

* [NOD-847] Always try to reconnect on disconnect
2020-05-12 13:47:15 +03:00
Svarog
c8a381d5bb [NOD-981] Fixed error message when both --notls and --rpccert ommited (#713) 2020-05-06 13:05:48 +03:00
Ori Newman
3d04e6bded [NOD-943] Add acceptedBlockHashes to GetBlockVerboseResult (#708)
* [NOD-943] Add acceptedBlockHashes to GetBlockVerboseResult

* [NOD-943] Remove intermediate variables

* [NOD-943] Add block hash to error message

* [NOD-943] Change comment
2020-05-05 17:26:54 +03:00
Svarog
f8e851a6ed [NOD-968] Wrap all ldb errors with pkg/errors (#712) 2020-05-04 16:33:23 +03:00
stasatdaglabs
e70a615135 [NOD-872] Defer all currently undeferred unlocks in the database package (#706)
* [NOD-872] Defer unlocks in write.go.

* [NOD-872] Defer unlocks in rollback.go.

* [NOD-872] Defer unlocks in read.go.

* [NOD-872] Fix duplicate RUnlock.

* [NOD-872] Remove a redundant empty line.

* [NOD-872] Extract closeCurrentWriteCursorFile to a separate method.
2020-05-04 13:07:40 +03:00
Ori Newman
73ad0adf72 [NOD-913] Use sync rate in getBlockTemplate (#705)
* [NOD-913] Use sync rate in getBlockTemplate

* [NOD-913] Rename addBlockProcessTime->addBlockProcessTimestamp, maxDiff->maxTipAge

* [NOD-913] Pass maxDeviation as an argument

* [NOD-913] Change maxDeviation to +5%

* [NOD-913] Rename variables

* [NOD-913] Rename variables and functions and change comments

* [NOD-913] Split addBlockProcessingTimestamp
2020-05-04 09:09:23 +03:00
stasatdaglabs
5b74e51db1 [NOD-956] Increase K to 15. (#710) 2020-05-03 14:56:47 +03:00
stasatdaglabs
2e2492cc5d [NOD-849] Database tests (#695)
* [NOD-849] Cover ffldb/transaction with tests.

* [NOD-849] Cover cursor.go with tests.

* [NOD-849] Cover ldb/transaction with tests.

* [NOD-849] Cover location.go with tests.

* [NOD-849] Write TestFlatFileMultiFileRollback.

* [NOD-849] Fix merge errors.

* [NOD-849] Fix a comment.

* [NOD-849] Fix a comment.

* [NOD-849] Add a test that makes sure that files get deleted on rollback.

* [NOD-849] Add a test that makes sure that serializeLocation serialized to an expected value.

* [NOD-849] Improve TestFlatFileLocationDeserializationErrors.

* [NOD-849] Fix a copy+paste error.

* [NOD-849] Explain maxFileSize = 16.

* [NOD-849] Remove redundant RollbackUnlessClosed call.

* [NOD-849] Extract bucket to a variable in TestCursorSanity.

* [NOD-849] Rename TestKeyValueTransactionCommit to TestTransactionCommitForLevelDBMethods.

* [NOD-849] Extract prepareXXX into separate functions.

* [NOD-849] Simplify function calls in TestTransactionCloseErrors.

* [NOD-849] Extract validateCurrentCursorKeyAndValue to a separate function.

* [NOD-849] Add a comment over TestCursorSanity.

* [NOD-849] Add a comment over function in TestCursorCloseErrors.

* [NOD-849] Add a comment over function in TestTransactionCloseErrors.

* [NOD-849] Separate TestTransactionCloseErrors to TestTransactionCommitErrors and TestTransactionRollbackErrors.

* [NOD-849] Separate TestTransactionCloseErrors to TestTransactionCommitErrors and TestTransactionRollbackErrors.

* [NOD-849] Fix copy+paste error in comments.

* [NOD-849] Fix merge errors.

* [NOD-849] Merge TestTransactionCommitErrors and TestTransactionRollbackErrors into TestTransactionCloseErrors.

* [NOD-849] Move prepareDatabaseForTest into ffldb_test.go.

* [NOD-849] Add cursorKey to Value error messages in validateCurrentCursorKeyAndValue.
2020-05-03 12:19:09 +03:00
Ori Newman
2ef5c2cbac [NOD-915] Check if lockableFile underlying file is nil before closing it (#709) 2020-04-30 14:43:38 +03:00
Ori Newman
3c89e1f7b3 [NOD-952] Fix nil derefernce bug on outboundPeerConnectionFailed (#704) 2020-04-27 13:50:09 +03:00
stasatdaglabs
2910724b49 [NOD-934] Fix addresses not getting their retry attempt counter incremented if they fail to connect (#702)
* [NOD-934] Fix addresses not getting their retry attempt counter incremented if they fail to connect.

* [NOD-922] Inline parseNetAddress.

* [NOD-922] Fix debug logs.
2020-04-23 17:01:09 +03:00
stasatdaglabs
3af945692e [NOD-922] Panic from cursor Next and First (#703)
* [NOD-922] Panic in Cursor First and Next if the cursor is closed.

* [NOD-922] Fix broken tests.

* [NOD-922] Fix a comment.
2020-04-23 16:55:25 +03:00
stasatdaglabs
5fe9dae557 [NOD-863] Write interface tests for the new database (#697)
* [NOD-863] Write TestCursorNext.

* [NOD-863] Write TestCursorFirst.

* [NOD-863] Fix merge errors.

* [NOD-863] Add TestCursorSeek.

* [NOD-863] Add TestCursorCloseErrors.

* [NOD-863] Add TestCursorCloseFirstAndNext.

* [NOD-863] Add TestDataAccessorPut.

* [NOD-863] Add TestDataAccessorGet.

* [NOD-863] Add TestDataAccessorHas.

* [NOD-863] Add TestDatabaseDelete.

* [NOD-863] Add TestDatabaseAppendToStoreAndRetrieveFromStore.

* [NOD-863] Add TestTransactionAppendToStoreAndRetrieveFromStore.

* [NOD-863] Add TestTransactionDelete.

* [NOD-863] Add TestTransactionHas.

* [NOD-863] Add TestTransactionGet.

* [NOD-863] Add TestTransactionPut.

* [NOD-863] Move cursor tests to the bottom of interface_test.go.

* [NOD-863] Move interface_test.go to a database_test package.

* [NOD-863] Make each test in interface_test.go run for every database driver. Currently, only ffldb.

* [NOD-863] Make each cursor test in interface_test.go run for every database driver. Currently, only ffldb.

* [NOD-863] Split interface_test.go into separate files.

* [NOD-863] Rename interface_test.go to common_test.go.

* [NOD-863] Extract testForAllDatabaseTypes to a separate function.

* [NOD-863] Reorganize how test data gets added to the database.

* [NOD-863] Add explanations about testForAllDatabaseTypes.

* [NOD-863] Add tests that make sure that database changes don't affect previously opened transactions.

* [NOD-863] Extract databasePrepareFunc to a type alias.

* [NOD-863] Fix comments.

* [NOD-863] Add cursor exhaustion test to testCursorFirst.

* [NOD-863] Add cursor Next clause to testCursorSeek.

* [NOD-863] Add additional varification to testDatabasePut.

* [NOD-863] Add an additional verification into to testTransactionGet.

* [NOD-863] Add TestTransactionCommit.

* [NOD-863] Add TestTransactionRollback.

* [NOD-863] Add TestTransactionRollbackUnlessClosed.

* [NOD-863] Remove equals sign from databasePrepareFunc declaration.
2020-04-20 12:14:55 +03:00
Svarog
42c53ec3e2 [NOD-869] Add a print after os.Exit(1) to see if it is ever called (#701) 2020-04-16 16:08:32 +03:00
Ori Newman
291df8bfef [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer (#700)
* [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer

* [NOD-858] SetShouldSendBlockLocator(false) on OnBlockLocator

* [NOD-858] Rename shouldSendBlockLocator->wasBlockLocatorRequested

* [NOD-858] Move panic to shouldReplaceSyncPeer
2020-04-13 15:50:55 +03:00
Ori Newman
d015286f65 [NOD-909] Add tests for double spends (#694)
* [NOD-909] Add tests for double spends

* [NOD-909] Add prepareAndProcessBlock that gets parent hashes and transactions as argument

* [NOD-909] Use PrepareAndProcessBlockForTest where possible

* [NOD-909] Use more meaningful names

* [NOD-909] Change a comment

* [NOD-909] Fix comment

* [NOD-909] Fix comment
2020-04-13 12:28:59 +03:00
Ori Newman
fe91b4c878 [NOD-914] Make LevelDB.Cursor receive bucket instead of prefix (#696) 2020-04-12 09:25:40 +03:00
Ori Newman
7609c50641 [NOD-885] Use database.Key and database.Bucket instead of byte slices (#692)
* [NOD-885] Create database.Key type

* [NOD-885] Rename FullKey()->FullKeyBytes() and Key()->KeyBytes()

* [NOD-885] Make Key.String return a hex string

* [NOD-885] Rename key parts

* [NOD-885] Rename separator->bucketSeparator

* [NOD-885] Rename SuffixBytes->Suffix and PrefixBytes->Prefix

* [NOD-885] Change comments

* [NOD-885] Change key prefix to bucket

* [NOD-885] Don't use database.NewKey inside dbaccess

* [NOD-885] Fix nil bug in Bucket.Path()

* [NOD-885] Rename helpers.go -> keys.go

* [NOD-885] Unexport database.NewKey

* [NOD-885] Remove redundant code in Bucket.Path()
2020-04-08 12:12:21 +03:00
Ori Newman
df934990d7 [NOD-822] Don't return rule errors from utxoset code (#693)
* [NOD-822] Remove rule errors from the UTXO diff code

* [NOD-822] Rename applyTransactions -> applyAndVerifyBlockTransactionsToPastUTXO

* [NOD-822] Fix comment
2020-04-07 12:45:12 +03:00
stasatdaglabs
3c4a80f16d [NOD-899] Inside the database, in case we're out of disk space, panic without printing the stack trace (#691)
* [NOD-899] Inside the database, in case we're out of disk space, panic without printing the stack trace.

* [NOD-899] Fix bad variable name.

* [NOD-899] Reduce code duplication.
2020-04-06 16:00:48 +03:00
stasatdaglabs
a31139d4a5 [NOD-895] Break down initDAGState to sub-routines (#690) 2020-04-06 11:08:57 +03:00
Mike Zak
6da3606721 Update to version 0.4.0 2020-04-05 16:23:01 +03:00
Ori Newman
bfbc72724d [NOD-873] Reuse allocated space when updating the UTXO set in database (#688) 2020-04-05 11:46:16 +03:00
stasatdaglabs
956b6f7d95 [NOD-900] Fix bad key in Seek (#687)
* [NOD-900] Fix Seek not working at expected.

* [NOD-900] Wrap error messages.

* [NOD-900] Use ldbIterator.Key instead of LevelDBCursor.Key.

* [NOD-900] Add a comment.
2020-04-02 17:47:51 +03:00
stasatdaglabs
c1a039de3f [NOD-900] Fix Seek not working as expected (#686)
* [NOD-900] Fix Seek not working at expected.

* [NOD-900] Wrap error messages.
2020-04-02 17:05:58 +03:00
stasatdaglabs
f8b18e09d6 [NOD-805] Redesign the database (#685)
* [NOD-828] Reimplement FFLDB (#663)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-887] Add a couple of QoL features to Cursor (#674)

* [NOD-887] Changed First to not return an error.

* [NOD-887] Fix merge error.

* [NOD-887] Make Cursor.Key not return the entire key path.

* [NOD-888] Add RollbackUnlessClosed to Context (#676)

* [NOD-888] Add RollbackUnlessClosed to Context.

* [NOD-888] Fix copy+paste error.

* [NOD-889] Instead of returning a boolean for not-found, return an error (#677)

* [NOD-889] Instead of returning a boolean for not-found, return an error.

* [NOD-889] Wrapped ErrNotFound for Get calls with nicer error messages.

* [NOD-889] Fix format.

* [NOD-889] Fix double space in a comment.

* [NOD-889] Add IsNotFoundError to dbaccess.

* [NOD-862] Replace calls to Tx.StoreBlock, Tx.HasBlock, Tx.FetchBlock with appropriate calls in dbaccess (#672)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-862] Use dbaccess.HasBlock instead of Tx.HasBlock in initDAGState.

* [NOD-862] Use dbaccess.StoreBlock instead of dbStoreBlock.

* [NOD-862] Use dbaccess.FetchBlock instead of various block fetching mechanisms.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Implement a dbaccess.BlockNode struct.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* [NOD-828] Implement toDBBlockNode and fromDBBlockNode.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Implement storing index blocks.

* [NOD-862] Use database transactions where appropriate.

* [NOD-862] Fix tests failing on DAGSetup.

* [NOD-862] Fix bad make call.

* [NOD-862] Fix remaining database opening problems in tests.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Iterate over the new block index in dagio.

* [NOD-862] Fix block index key.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-862] Pass byte slices to dbaccess instead of objects.

* [NOD-862] Fix errors.

* [NOD-862] Fix maybeAcceptBlock not checking block existence.

* [NOD-862] Fix TestAcceptanceIndexRecover.

* [NOD-862] Add comments to StoreBlockIndex and BlockIndexCursor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-862] Fix merge errors.

* [NOD-862] Fix DAGSetup using bad temp directories.

* [NOD-862] Fix TestProcessDelayedBlocks not closing the database properly.

* [NOD-862] Fix merge errors.

* [NOD-862] Merge flushToDBWithContext and flushToDB.

* [NOD-862] Remove TODO.

* [NOD-862] Add prefix to the temp dir in DAGSetup.

* [NOD-862] Bring back dbFetchBlockByHash.

* [NOD-862] Use BlockDAG.BlockByHash in p2p and rpc.

* [NOD-862] Use daghash.Hash in dbaccess.

* [NOD-862] Add defer to RollbackUnlessClosed after NewTx().

* [NOD-862] Extract dbStoreBlock to a separate function.

* [NOD-862] Fix grammar in comment.

* [NOD-862] Fix merge errors.

* [NOD-867] Migrate database logic in blockdag/dagio.go to dbaccess (#675)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-862] Use dbaccess.HasBlock instead of Tx.HasBlock in initDAGState.

* [NOD-862] Use dbaccess.StoreBlock instead of dbStoreBlock.

* [NOD-862] Use dbaccess.FetchBlock instead of various block fetching mechanisms.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Implement a dbaccess.BlockNode struct.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* [NOD-828] Implement toDBBlockNode and fromDBBlockNode.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Implement storing index blocks.

* [NOD-862] Use database transactions where appropriate.

* [NOD-862] Fix tests failing on DAGSetup.

* [NOD-862] Fix bad make call.

* [NOD-862] Fix remaining database opening problems in tests.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Iterate over the new block index in dagio.

* [NOD-862] Fix block index key.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-862] Pass byte slices to dbaccess instead of objects.

* [NOD-862] Fix errors.

* [NOD-862] Fix maybeAcceptBlock not checking block existence.

* [NOD-862] Fix TestAcceptanceIndexRecover.

* [NOD-862] Add comments to StoreBlockIndex and BlockIndexCursor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-862] Fix merge errors.

* [NOD-862] Fix DAGSetup using bad temp directories.

* [NOD-862] Fix TestProcessDelayedBlocks not closing the database properly.

* [NOD-867] Remove blockIndexBucket from dagio.

* [NOD-867] Fix wrong key in StoreIndexBucket.

* [NOD-867] Migrate DAG state to dbaccess.

* [NOD-867] Remove utxoSetVersionKeyName.

* [NOD-862] Fix merge errors.

* [NOD-867] Move localSubnetworkID into dagState.

* [NOD-867] Fix a comment.

* [NOD-867] Remove an unused function.

* [NOD-867] Migrate the database's UTXO set to dbaccess.

* [NOD-867] Add missing error check.

* [NOD-867] Changed First to not return an error.

* [NOD-867] Make Cursor.Key not return the entire key path.

* [NOD-887] Fix the comment above BlockIndexCursorFrom.

* [NOD-862] Merge flushToDBWithContext and flushToDB.

* [NOD-862] Remove TODO.

* [NOD-862] Add prefix to the temp dir in DAGSetup.

* [NOD-862] Bring back dbFetchBlockByHash.

* [NOD-862] Use BlockDAG.BlockByHash in p2p and rpc.

* [NOD-862] Use daghash.Hash in dbaccess.

* [NOD-862] Add defer to RollbackUnlessClosed after NewTx().

* [NOD-862] Extract dbStoreBlock to a separate function.

* [NOD-867] Remove TODOs.

* [NOD-867] Fix merge errors.

* [NOD-867] Fix comments and errors.

* [NOD-867] Unexport blockIndexKey.

* [NOD-867] Fix merge errors.

* [NOD-867] Move a misplaced comment.

* [NOD-867] Fix an error message.

* [NOD-867] Remove preallocation in initDAGState.

* [NOD-866] Migrate database logic in blockdag/indexers package to dbaccess (#682)

* [NOD-865] Delete blockidhash.go.

* [NOD-865] Remove a lot of no-longer relevant logic from indexers.

* [NOD-865] Pass TxContext to ConnectBlock.

* [NOD-865] Migrate the acceptance index to dbaccess.

* [NOD-865] Fix a block not being sent to ConnectBlock.

* [NOD-865] Pass the block's hash instead of the whole block.

* [NOD-865] Add forgotten Commit call.

* [NOD-865] Add comments.

* [NOD-866] Fix a comment.

* [NOD-866] Fix a comment.

* [NOD-866] Remove pointless indirection in acceptanceindex.

* [NOD-866] Fix comment over ForEachHash.

* [NOD-866] Rename ClearAcceptanceIndex to DropAcceptanceIndex.

* [NOD-866] Explain collecting keys before deleting them.

* [NOD-865] Move misc db logic to db access (#681)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-865] Move fee data db operations to dbaccess

* [NOD-865] Move reachability data db operations to dbaccess

* [NOD-865] Move UTXO diff data db operations to dbaccess

* [NOD-865] Move subnetwork data db operations to dbaccess

* [NOD-865] Fix createDAGState

* [NOD-865] Remove old Get signature with "exists"

* [NOD-865] Move multiset db operations to dbaccess

* [NOD-865] Use dbaccess transactions where possible

* [NOD-865] Remove old Get signature with "exists"

* [NOD-881] Recover TestGHOSTDAGErrors

* [NOD-865] Create function for db keys

* [NOD-865] Change Exists to Has, and use accessor.Has where possible

* [NOD-865] Make ClearReachabilityData transactive

* [NOD-865] Don't iterate cursors while changing db data

* [NOD-865] Rename RegisterSubnetwork -> StoreSubnetwork

* [NOD-865] Change bucket from utxodiffs to utxo-diffs

* [NOD-865] Rename SubnetworkExists->HasSubnetwork

* [NOD-865] Change a comment

* [NOD-865] Fix tests

* [NOD-865] Fix comment

* [NOD-865] Remove the prefix "db" from some functions

* [NOD-865] Remove redundant comments

* [NOD-865] Make clearBucket function

* [NOD-865] Make clear functions get a dbTx as an arg

* [NOD-865] Remove erroneous tx commit

Co-authored-by: stasatdaglabs <stas@daglabs.com>

* [NOD-868] Delete the old database package (#683)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-865] Move fee data db operations to dbaccess

* [NOD-865] Move reachability data db operations to dbaccess

* [NOD-865] Move UTXO diff data db operations to dbaccess

* [NOD-865] Move subnetwork data db operations to dbaccess

* [NOD-865] Fix createDAGState

* [NOD-865] Remove old Get signature with "exists"

* [NOD-865] Move multiset db operations to dbaccess

* [NOD-865] Use dbaccess transactions where possible

* [NOD-865] Remove old Get signature with "exists"

* [NOD-881] Recover TestGHOSTDAGErrors

* [NOD-865] Create function for db keys

* [NOD-865] Change Exists to Has, and use accessor.Has where possible

* [NOD-865] Make ClearReachabilityData transactive

* [NOD-865] Don't iterate cursors while changing db data

* [NOD-865] Rename RegisterSubnetwork -> StoreSubnetwork

* [NOD-865] Change bucket from utxodiffs to utxo-diffs

* [NOD-865] Rename SubnetworkExists->HasSubnetwork

* [NOD-865] Change a comment

* [NOD-868] Remove all tests from old database.

* [NOD-868] Remove all unused methods from the old database's interfaces.

* [NOD-865] Fix tests

* [NOD-868] Remove references to DB.

* [NOD-865] Fix comment

* [NOD-868] Remove the old ffldb besides the interface and errors.go.

* [NOD-868] Remove errors.go.

* [NOD-868] Remove the old database package.

* [NOD-868] Add openDB to DAGSetup to emulate the old dbpath in dag.config.

* [NOD-868] Rename database2 to database.

* [NOD-868] Use NewTx instead of NoTx where required.

* [NOD-868] Fix merge errors.

* [NOD-868] Rename dbXXX functions to just xxx.

* [NOD-868] Rename putDAGState to saveDAGState.

* [NOD-868] Replace comments in initDAGState with logs.

* [NOD-868] Explain the openDB parameter in DAGSetup.

* [NOD-868] Fixup doc.go and README.md.

* [NOD-868] Remove pointless transactions.

Co-authored-by: Ori Newman <orinewman1@gmail.com>

* [NOD-805] Fix merge errors.

* [NOD-805] Fix a comment.

* [NOD-805] Don't return virtualTxsAcceptanceData from applyDAGChanges.

* [NOD-805] Add missing error handling in TestAcceptanceDataIndexRecover.

* [NOD-805] Rename blockDAG to dag in indexers/manager.go.

* [NOD-805] Defer cursor.Close() everywhere.

* [NOD-805] Rename scanFlatFiles to findCurrentLocation.

* [NOD-805] Extract crc32ChecksumLength and dataLengthLength to constants.

* [NOD-805] Handle open files properly in rollback.go.

* [NOD-805] Remove unnecessary func wrapper.

* [NOD-805] Remove unnecessary trimming in initialize.

* [NOD-805] Made StoreBlock accept only TxContext.

* [NOD-805] Changed the log level of an error message to Error.

* [NOD-805] Add a note about holding mutexes over deleteFile.

* [NOD-805] Remove a false comment.

* [NOD-805] Fix a comment.

* [NOD-805] Rename blk to block.

* [NOD-805] Extract utxoKey to a separate function.

* [NOD-805] Move dbaccess.xxxKey functions to the tops of their respective files.

* [NOD-805] Fix grammar in dbaccess/db.go.

* [NOD-805] Wrap a failed database corruption recovery error.

* [NOD-805] Split lines with WithStack in them.

* [NOD-805] Fix the comment over initialize.

* [NOD-805] Rename ffdb to flatFileDB and ldb to levelDB.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a comment.

* [NOD-805] Use s.writeCursor instead of cursor.

* [NOD-805] Embed file in lockableFile.

* [NOD-805] the the -> the

* [NOD-805] openDB -> db

* [NOD-805] Use TxContext in all flushToDB functions.

* [NOD-805] Rename context -> dbContext.

* [NOD-805] Reword the comment at the beginning on initDAGState.

* [NOD-805] Explain cursor key trimming.

* [NOD-805] Remove Error from Cursor.

* [NOD-805] Return ErrNotFound from done Cursor Key and Value.

* [NOD-805] Add missing error handling.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a variable name.

* [NOD-805] Remove pointless underscore.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a variable name.

Co-authored-by: Mike Zak <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-04-02 13:56:32 +03:00
Ori Newman
b20a7a679b [NOD-874] If the node is not current call sm.restartSyncIfNeeded() on handleInvMsg (#684)
* [NOD-874] If the node is not current call sm.restartSyncIfNeeded() on handleInvMsg

* [NOD-874] Check haveUnknownInvBlock before restartSyncIfNeeded

* [NOD-874] Fix comment

* [NOD-874] Fix comment

* [NOD-874] Fix comment
2020-04-01 12:56:10 +03:00
Ori Newman
36d866375e [NOD-881] Don't recalculate subtreesize for children (#678)
* [NOD-881] Don't recalculate subtreesize for children

* [NOD-881] Make BenchmarkReindexInterval clearer

* [NOD-881] Use b.ResetTimer

* [NOD-881] Fix BenchmarkReindexInterval to use b.N
2020-03-31 12:43:02 +03:00
Svarog
024edc30a3 [NOD-857] Add generalized profiler package and use it everwhere (#679)
* [NOD-857] Add generalized profiler package and use it everwhere

* [NOD-857] Dependency-inject log into profiling.Start()
2020-03-31 12:41:21 +03:00
Ori Newman
6aa5e0b5a8 [NOD-882] Remove ecc and hdkeychain (#680)
* [NOD-882] Remove ecc and hdkeychain

* [NOD-882] Remove HDCoinType from dagParams
2020-03-31 10:58:11 +03:00
Mike Zak
1a38550fdd Update to version 0.3.0 2020-03-29 14:15:17 +03:00
stasatdaglabs
3e7ebb5a84 [NOD-861] Get rid of dbtool/fetchblockregion.go. (#667) 2020-03-29 12:47:13 +03:00
Svarog
4bca7342d3 [NOD-883] Fix dockerfile in kaspaminer + set real version for go-libsecp256k1 (#673) 2020-03-26 17:50:09 +02:00
Elichai Turkel
f80908fb4e [NOD-876] Replace ecc with go-secp256k1 for public keys (#670)
* Replace ecc with go-secp256k1 in txscript

* Replace ecc with go-secp256k1 in util and cmd

* Replace ecc.Multiset with secp256k1.MultiSet
2020-03-26 17:03:39 +02:00
stasatdaglabs
e000e10738 [NOD-880] Remove CGO_ENABLED=0 from Dockerfile. (#671) 2020-03-26 14:02:57 +02:00
Ori Newman
d83862f36c [NOD-855] Save ECMH for block utxo and not diff utxo (#669)
* [NOD-855] Save ECMH for each block UTXO

* [NOD-855] Remove UpdateExtraNonce method

* [NOD-855] Remove multiset data from UTXO diffs

* [NOD-855] Fix to fetch multiset of selected parent

* [NOD-855] Don't remove coinbase inputs from multiset

* [NOD-855] Create multisetBucketName on startup

* [NOD-855] Remove multiset from UTXO diff tests

* [NOD-855] clear new entries from multisetstore on saveChangesFromBlock

* [NOD-855] Fix tests

* [NOD-855] Use UnacceptedBlueScore when adding current block transactions to multiset

* [NOD-855] Hash utxo before adding it to multiset

* [NOD-855] Pass isCoinbase to NewUTXOEntry

* [NOD-855] Do not use hash when adding entries to multiset

* [NOD-855] When calculating multiset, replace the unaccepted blue score of selected parent transaction with the block blue score

* [NOD-855] Manually add a chained transaction to a block in TestChainedTransactions

* [NOD-855] Change name and comments

* [NOD-855] Use FindAcceptanceData to find a specific block acceptance data

* [NOD-855] Remove redundant copy of txIn.PreviousOutpoint

* [NOD-855] Use fmt.Sprintf when creating internalRPCError
2020-03-26 13:06:12 +02:00
Svarog
1020402b34 [NOD-869] Close panicHandlerDone instead of sending an empty struct + use time.After instead of time.Tick (#668) 2020-03-25 16:14:08 +02:00
Mike Zak
bc6ce6ed53 Update version to v0.2.0 2020-03-25 11:51:14 +02:00
Ori Newman
d3b1953deb [NOD-848] optimize utxo diffs serialize allocations (#666)
* [NOD-848] Optimize allocations when serializing UTXO diffs

* [NOD-848] Use same UTXO serialization everywhere, and use compression as well

* [NOD-848] Fix usage of wrong buffer

* [NOD-848] Fix tests

* [NOD-848] Fix wire tests

* [NOD-848] Fix tests

* [NOD-848] Remove VLQ

* [NOD-848] Fix comments

* [NOD-848] Add varint for big endian encoding

* [NOD-848] In TestVarIntWire, assume the expected decoded value is the same as the serialization input

* [NOD-848] Serialize outpoint index with big endian varint

* [NOD-848] Remove p2pk from compression support

* [NOD-848] Fix comments

* [NOD-848] Remove p2pk from decompression support

* [NOD-848] Make entry compression optional

* [NOD-848] Fix tests

* [NOD-848] Fix comments and var names

* [NOD-848] Remove UTXO compression

* [NOD-848] Fix tests

* [NOD-848] Remove big endian varint

* [NOD-848] Fix comments

* [NOD-848] Rename ReadVarIntLittleEndian->ReadVarInt and fix WriteVarInt comment

* [NOD-848] Add outpointIndexByteOrder variable

* [NOD-848] Remove redundant comment

* [NOD-848] Fix outpointMaxSerializeSize to the correct value

* [NOD-848] Move subBuffer to utils
2020-03-24 16:44:41 +02:00
Svarog
3c67215e76 [NOD-796] Upgrade to go 1.14 (#665) 2020-03-22 14:50:13 +02:00
Svarog
586624c836 [NOD-853] Add profiler server to kaspaminer (#664) 2020-03-19 17:19:31 +02:00
Svarog
49855e6333 [NOD-823] Use WithDiffInPlace for the implementation of WithDiff (#657)
* [NOD-823] Use WithDiffInPlace for the implementation of WithDiff

* [NOD-823] Unexport withDiffInPlace
2020-03-17 11:19:02 +02:00
Ori Newman
624249c0f3 [NOD-842] Use flushToDB with the same transaction as everything else in saveChangesFromBlock and never ignore flushToDB errors (#662) 2020-03-16 11:05:17 +02:00
Ori Newman
1cf443a63b [NOD-841] Fix tests to not be dependent on block rate (#661)
* [NOD-841] Fix TestDifficulty

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Fix TestCheckBlockSanity

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Shorten long lines
2020-03-15 18:08:03 +02:00
Ori Newman
8909679f44 [NOD-818] Remove time adjustment (#658)
* [NOD-818] Remove time adjustment

* [NOD-818] Remove interface ensuring and copyright message

* [NOD-818] Update comment
2020-03-15 17:37:01 +02:00
Ori Newman
e58efbf0ea [NOD-839] Panic from non-rule error from ProcessBlock (#660) 2020-03-15 17:26:53 +02:00
Ori Newman
34fb066590 [NOD-518] Implement getmempoolentry (#656) 2020-03-12 16:00:18 +02:00
stasatdaglabs
299826f392 [NOD-827] Get rid of dbtools insecureimport.go and loadheaders.go (#655)
* [NOD-827] Get rid of dbtools insecureimport.go and loadheaders.go

* [NOD-827] Remove commands from realMain().
2020-03-10 16:31:13 +02:00
stasatdaglabs
3d8dd8724d [NOD-816] Remove TxIndex and AddrIndex (#653)
* [NOD-816] Remove TxIndex.

* [NOD-816] Remove AddrIndex.

* [NOD-816] Remove mentions of TxIndex and AddrIndex.

* [NOD-816] Remove mentions of getrawtransaction.

* [NOD-816] Remove mentions of searchrawtransaction.

* [NOD-816] Remove cmd/addsubnetwork.

* [NOD-816] Fix a comment.

* [NOD-816] Fix a comment.

* [NOD-816] Implement BlockDAG.TxConfirmations.

* [NOD-816] Return confirmations in getTxOut.

* [NOD-816] Rename TxConfirmations to UTXOConfirmations.

* [NOD-816] Rename txConfirmations to utxoConfirmations.

* [NOD-816] Fix capitalization in variable names.

* [NOD-816] Add acceptance index to addblock.

* [NOD-816] Get rid of txrawresult-confirmations.

* [NOD-816] Fix config flag.
2020-03-10 16:09:31 +02:00
Svarog
b8a00f7519 [NOD-778] Optimize RestoreUTXO (#652)
* [NOD-778] Add WithDiffInPlace

* [NOD-778] Fix bug in WithDiffInPlace

* [NOD-778] Add comment to WithDiffInPlace

* [NOD-778] Add double dag.restoreUTXO to benchmark, to remove time for hard-disk loading

* [NOD-778] Also test WithDiffInPlace in TestUTXODiffRules

* [NOD-778] Add tests for all cases possible in TestUTXODiffRules

* [NOD-778] Fix test-case 'first in toAdd in this, second in toRemove in this and toAdd in other'

* [NOD-778] Fixed in WithDiffInPlace

* [NOD-778] Update error messages when diffFrom(withDiffResult) fails in TestUTXODiffRules

* [NOD-778] diffFrom: disallow utxos both in d.toAdd, other.toAdd, and only one of d.toRemove and other.toRemove

* [NOD-778] Fix expected value in 'first in toRemove in this, second in toRemove in other'

* [NOD-778] diffFrom: Disallow situations where utxo both in d.toRemove and other.toRemove with different blue scores and no corresponding utxo in d.toAdd

* [NOD-778] WithDiff: Fix faulty logic that allows updates to blue scores

* [NOD-778] Fix WithDiffInPlace to pass all tests

* [NOD-778] Deleted temporary prints

* [NOD-778] Sorted TestUTXODiffRules tests according to spreadsheet

* [NOD-778] Delete deeputxo_test.go

* [NOD-778] Updated comments

* [NOD-778] Re-order

* [NOD-778] Re-order test-cases to be according to spreadsheet

* [NOD-778] Simplified case when both d.toRemove and other.toRemove have the same outpoint in diffFrom

* [NOD-778] Change a few error messages that say 'transaction' instead of 'outpoint'

* [NOD-778] Rename: utxoToAdd/Remove -> entryToAdd/Remove

* [NOD-788] Remove redundant else

* [NOD-778] Rename: existingUTXO -> existingEntry + remove redundant else

* [NOD-778] Correct test name
2020-03-10 15:32:19 +02:00
stasatdaglabs
4dfc8cf5b0 [NOD-816] Remove addsubnetwork. (#654) 2020-03-10 11:09:33 +02:00
Ori Newman
5a99e4d2f3 [NOD-806] Exit early after panic (#650)
* [NOD-806] After panic, gracefully stop logs, and then exit immediately

* [NOD-806] Convert non-kaspad applications to use the new spawn

* [NOD-806] Fix disabled log at rpcclient

* [NOD-806] Refactor HandlePanic

* [NOD-806] Cancel Logger interface

* [NOD-806] Remove redundant spawn checks from waitgroup_test.go

* [NOD-806] Use caller subsystem when logging panics

* [NOD-806] Fix go vet errors
2020-03-08 11:24:37 +02:00
Svarog
606cd668ff [NOD-810] Fix error text in lookupParentNodes (#651) 2020-03-05 15:49:36 +02:00
Ori Newman
dd537f5143 [NOD-808] Use syndtr/goleveldb instead of btcsuite/goleveldb. (#649) 2020-03-05 12:26:48 +02:00
stasatdaglabs
a1c631be62 [NOD-798] Disconnect from a peer if a block received from it gets rejected (#648)
* [NOD-798] Disconnect from a peer if its block gets rejected.

* [NOD-798] Make a comment less ambiguous.
2020-03-03 09:47:22 +02:00
Ori Newman
707a728656 [NOD-552] Add NormalizeRPCServerAddress and use it where needed (#643)
* [NOD-552] Add NormalizeRPCServerAddress and use it where needed

* [NOD-552] Make NormalizeAddress return an error for an invalid address

* [NOD-552] Use longer lines for a comment
2020-03-01 16:37:26 +02:00
stasatdaglabs
80b5631a48 [NOD-726] Only print "no sync peer" message when not current (#646)
* [NOD-726] Only print "no sync peer" message when not current.

* [NOD-726] Shorten duration in which "no sync peer" messages would not print.
2020-02-27 17:38:39 +02:00
Ori Newman
2373965551 [NOD-576] Rename NextHashes to ChildHashes in GetBlock/GetBlockHeaders rpc call (#645)
* [NOD-576] Rename NextHashes to ChildHashes in GetBlock/GetBlockHeaders rpc call

* [NOD-576] Fix typo
2020-02-27 17:34:38 +02:00
Ori Newman
65cbb6655b [NOD-661] Change BCDB subsystem tag (for logs) to KSDB (#644) 2020-02-27 17:30:08 +02:00
Ori Newman
cdd96d0670 [NOD-664] Remove version from everything inside kaspad/cmd - use kaspad version instead (#642)
* [NOD-664] Remove version from everything inside kaspad/cmd - use kaspad version instead

* [NOD-664] Fix broken import
2020-02-27 13:26:22 +02:00
Dan Aharoni
ad04bbde83 [NOD-782] Make sure errors.As gets parameter that implements error interface (#641)
* [NOD-782] Make sure errors.As gets parameter that implements error interface.

* [NOD-782] Pass pointer to errors.As
2020-02-27 12:27:38 +02:00
Ori Newman
5374d95416 [NOD-656] Log hashrate in kaspaminer (#632)
* [NOD-656] Log hashrate in kaspaminer

* [NOD-656] Measure hash rate in kilohashes

* [NOD-656] Show hash rate once in 10 seconds

* [NOD-656] Put hash rate logic in a separate function

* [NOD-656] Create logHashRateInterval constant
2020-02-24 11:59:02 +02:00
Ori Newman
de9aa39cc5 [NOD-721] Add defers (#638)
* [NOD-721] Defer unlocks

* [NOD-721] Add functions with locks to rpcmodel

* [NOD-721] Defer unlocks

* [NOD-721] Add filterDataWithLock function

* [NOD-721] Defer unlocks

* [NOD-721] Defer .Close()

* [NOD-721] Fix access to wsc.filterData without a lock

* [NOD-721] De-anonymize some anonymous functions

* [NOD-721] Remove redundant assignments

* [NOD-721] Remove redundant assignments

* [NOD-721] Remove redundant assignments

* [NOD-721] Get rid of submitOld, and break handleGetBlockTemplateLongPoll to smaller functions

* [NOD-721] Rename existsUnspentOutpoint->existsUnspentOutpointNoLock, existsUnspentOutpointWithLock->existsUnspentOutpoint

* [NOD-721] Rename filterDataWithLock->FilterData

* [NOD-721] Fixed comments
2020-02-24 09:19:44 +02:00
Ori Newman
98987f4a8f [NOD-603] Update validateParents to use reachability (#640)
* [NOD-603] Update validateParents to use reachability

* [NOD-603] Break a long line

* [NOD-721] Remove redundant check if block parent is a tip
2020-02-24 08:59:12 +02:00
Ori Newman
9745f31b69 [NOD-693] Update link to license (#639) 2020-02-20 17:12:53 +02:00
Ori Newman
ee08531a52 [NOD-610] Rename newSet->newBlockSet and setFromSlice->blockSetFromSlice (#635) 2020-02-20 16:19:28 +02:00
stasatdaglabs
61baf7b260 [NOD-769] Add a log for when a reachability reindex occurs (#637)
*  [NOD-719] Added defers to unlocks  (#618)

* [NOD-719] Added defers to unlocks

* [NOD-719] Added another defer to another Unlock

* [NOD-719] Added yet another defer to yet another Unlock

* [NOD-747] Change FinalityInterval to be 24 hours, isCurrent to be true if the DAG's time is less than 12 hours than the present, and change MaxInvPerMsg to be 1 << 17 (#625)

* [NOD-769] Add a log for when a reachability reindex occurs.

Co-authored-by: Svarog <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-02-19 13:39:45 +02:00
Ori Newman
650e4f735e [NOD-757] Readd addrmanager tests (#628) 2020-02-18 18:12:19 +02:00
Dan Aharoni
550b12b041 [NOD-772] Fix a bug where we ignore the return value of forAllOutboundPeers. (#636) 2020-02-18 18:02:15 +02:00
Ori Newman
a4bb070722 [NOD-754] Fix staticcheck errors (#627)
* [NOD-754] Fix staticcheck errors

* [NOD-754] Remove some unused exported functions

* [NOD-754] Fix staticcheck errors

* [NOD-754] Don't panic if out/in close fails

* [NOD-754] Wrap outside errors with custom message
2020-02-18 16:56:38 +02:00
Ori Newman
30fe0c279b [NOD-738] Move rpcmodel helper functions to pointers package (#629)
* [NOD-738] Move rpcmodel helper functions to copytopointer package

* [NOD-738] Rename copytopointer->pointers
2020-02-18 14:06:34 +02:00
stasatdaglabs
e405dd5981 [NOD-694] Fix requesting blocks that will surely be orphaned during netsync. (#630) 2020-02-18 12:12:34 +02:00
stasatdaglabs
243b4b8021 [NOD-765] Fix database corruption after restart in reachabilitystore and utxodiffstore. (#634) 2020-02-18 12:04:50 +02:00
Ori Newman
dd4c93e1ef [NOD-759] Merge v0.1.1-dev into v0.1.2-dev (#633)
*  [NOD-719] Added defers to unlocks  (#618)

* [NOD-719] Added defers to unlocks

* [NOD-719] Added another defer to another Unlock

* [NOD-719] Added yet another defer to yet another Unlock

* [NOD-747] Change FinalityInterval to be 24 hours, isCurrent to be true if the DAG's time is less than 12 hours than the present, and change MaxInvPerMsg to be 1 << 17 (#625)
2020-02-18 11:02:25 +02:00
Ori Newman
a07335d74d [NOD-737] Remove btc prefix from util file names (#631) 2020-02-17 13:11:24 +02:00
Dan Aharoni
7567cd4cb9 [NOD-744] Wrap go routines with spawn (#626)
* [NOD-744] Wrap go routines with spawn

* [NOD-747] Wrap some more go routines with spawn

* [NOD-744] Some more missing go routines

* [NOD-744] Break lines so make code more readable

* [NOD-744] Declare a local scope variable so the func would use it.

* [NOD-744] Fix type and update comment.

* [NOD-744] Declare local var so go routine would use it

* [NOD-744] Rename variable, use normal assignment;

* [NOD-744] Rename variable.
2020-02-13 13:10:07 +02:00
stasatdaglabs
51ff9e2562 [NOD-571] Cover ghostdag in tests where possible (#613)
* [NOD-571] Cover reachabilityInterval split methods.

* [NOD-571] Cover reindexInterval.

* [NOD-571] Cover reachability String() methods.

* [NOD-571] Cover blueAnticoneSize.

* [NOD-571] Remove unnecessary error from setTreeNode.

* [NOD-571] Add TestGHOSTDAGErrors.

* [NOD-571] Use PrepareBlockForTest in TestBlueAnticoneSizeErrors.

* [NOD-571] Use PrepareBlockForTest in TestGHOSTDAGErrors.

* [NOD-571] Add substring checks to TestSplitFractionErrors.

* [NOD-571] Add substring checks to TestSplitExactErrors and TestSplitWithExponentialBiasErrors.

* [NOD-571] Add comments to TestReindexIntervalErrors.

* [NOD-571] Add additional info in some error messages.

* [NOD-571] Fix error messages.
2020-02-09 11:27:10 +02:00
stasatdaglabs
5b8ab63890 [NOD-717] Fix nodes getting stuck in an infinite loop in addrManager.getAddress (#624)
* [NOD-717] Fix nodes getting stuck in an infinite loop in addrManager.getAddress.

* [NOD-717] Rename ResetFailedAttempts -> NotifyConnectionRequestComplete.
2020-02-06 18:17:10 +02:00
Dan Aharoni
3dd7dc4496 [NOD-727] Do not allow delayed blocks from RPC. (#623)
* [NOD-727] Do not allow delayed blocks from RPC.

* [NOD-727] Refactor sentFromRPC -> DisallowDelay

* [NOD-727] Clarify comment; Clarify error message.

* [NOD-727] Change error message.
2020-02-05 11:14:26 +02:00
Ori Newman
d90a08ecfa [NOD-722] Fix processBlockMsg case in blockHandler to send only one response to msg.reply, and rename blockHandler->messageHandler (#622) 2020-02-04 18:10:15 +02:00
Ori Newman
45dc1a3e7b [NOD-545] Remove headers first related logic (#621)
* [NOD-545] Remove headers first related logic

* [NOD-545] Fix tests

* [NOD-545] Change getTopHeadersMaxHeaders to be equal to getHeadersMaxHeaders
2020-02-04 14:54:42 +02:00
Ori Newman
4ffb5daa37 [NOD-622] Fix populateTemplateFromCandidates to sort txsForBlockTemplate.txMasses and txsForBlockTemplate.txFees (#617)
* [NOD-622] Fix populateTemplateFromCandidates to sort txsForBlockTemplate.txMasses and txsForBlockTemplate.txFees

* [NOD-622] Sort transactions in PrepareBlockForTest

* [NOD-622] Remove duplicate append of selected transactions
2020-02-03 13:42:40 +02:00
Ori Newman
b9138b720d [NOD-597] Make BlockIndex clear its dirty entries only after it successfully written them to disk (#620) 2020-02-03 13:39:25 +02:00
Ori Newman
d8954f1339 [NOD-615] Make bluesAnticoneSizes a map with *blockNode as a key (#619) 2020-02-03 12:40:39 +02:00
Ori Newman
eb953286ec [NOD-641] Upgrade to github.com/pkg/errors v0.9.1 and use errors.As where needed (#614)
* [NOD-641] Upgrade to github.com/pkg/errors v0.9.1 and use errors.As where needed

* [NOD-641] Fix find and replace error

* [NOD-641] Use errors.As for error type checking

* [NOD-641] Fix errors.As for pointer types

* [NOD-641] Use errors.As where needed

* [NOD-641] Rename rErr->ruleErr

* [NOD-641] Rename derr->dbErr

* [NOD-641] e->flagsErr where necessary

* [NOD-641] change jerr to more appropriate name

* [NOD-641] Rename cerr->bdRuleErr

* [NOD-641] Rename serr->scriptErr

* [NOD-641] Use errors.Is instead of testutil.AreErrorsEqual in TestNewHashFromStr

* [NOD-641] Rename bdRuleErr->dagRuleErr

* [NOD-641] Rename mErr->msgErr

* [NOD-641] Rename dErr->deserializeErr
2020-02-03 12:38:33 +02:00
Ori Newman
41c8178ad3 [NOD-648] Add TestProcessDelayedBlocks (#612)
* [NOD-648] Add TestProcessDelayedBlocks

* [NOD-648] Add one second to secondsUntilDelayedBlockIsValid to make sure the delayedBlock timestamp will be valid, and add comments

* [NOD-648] Remove redundant import

* [NOD-648] Use fakeTimeSource instead of time.Sleep

* [NOD-648] Rename dag.HaveBlock->dag.IsKnownBlock,  dag.BlockExists->dag.IsInDAG

* [NOD-648] Add comment

* [NOD-641] Rename HaveBlock->IsKnownBlock, BlockExists->IsInDAG
2020-02-03 11:30:03 +02:00
Ori Newman
aa74b51e6f [NOD-687] Remove -gcflags='-l' from all tests (#616) 2020-02-02 15:26:26 +02:00
Mike Zak
f7800eb5c4 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-02-02 15:17:25 +02:00
stasatdaglabs
193add502f [NOD-716] Fix a crash in GetTopHeaders. (#615) 2020-02-02 13:51:53 +02:00
Ori Newman
44c55900f8 [NOD-715] Replace testDbRoot with os.TempDir() (#611) 2020-01-30 12:54:15 +02:00
Ori Newman
4c0ea78026 [NOD-586] Remove subTreeSize from reachabilityTreeNode (#610)
* [NOD-586] Remove subTreeSize from reachabilityTreeNode

* [NOD-586] Convert else { if { ... } } to else if { ... }
2020-01-30 10:39:53 +02:00
stasatdaglabs
03a93fe51e [NOD-647] Create a default config file even if the sample default config file is missing (#609)
* [NOD-647] Create a default config file even if the sample default config file is missing.

* [NOD-647] Unfancify WriteString().
2020-01-29 17:40:59 +02:00
Mike Zak
eca0514465 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 14:39:22 +02:00
stasatdaglabs
aadbebb720 [NOD-691] Remove addTrying from AddrManager. (#608) 2020-01-28 13:57:02 +02:00
Mike Zak
5daab45947 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 12:25:52 +02:00
stasatdaglabs
607b838ded [NOD-702] Fix netsync slowing down significantly due to excessive allocs in serializeUTXO (#605)
* [NOD-702] Fix netsync slowing down significantly due to excessive allocs in serializeUTXO.

* [NOD-702] Fix bad make statement.

* [NOD-702] Move writeBuffer to flushToDB.
2020-01-28 12:24:09 +02:00
Mike Zak
25bdaeed31 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 11:35:21 +02:00
Dan Aharoni
8b2d3f07ce [NOD-636] Prevent db corruption on crash. (#607)
* [NOD-636] Scope err so defer anonymous defer function will get it.

* [NOD-636] Add comment to explain why this line is needed.

* [NOD-636] Edit comment.
2020-01-28 11:04:41 +02:00
Mike Zak
a3dc2f7da7 Update version to v0.1.2 2020-01-28 10:53:10 +02:00
Svarog
bf36f9ceb6 [NOD-704] Call dag.IsInSelectedParentChain in every iteration of dag.SelectedParentChain (#606) 2020-01-27 17:09:41 +02:00
stasatdaglabs
11de12304e [NOD-700] Convert blockSet to map[*blockNode]struct{} (#604)
* [NOD-700] Convert blockSet to map[*blockNode]struct{}.

* [NOD-700] Rename bluestNode to bluestBlock in bluest().

* [NOD-700] Make IsInSelectedParentChain not use the now-slower containsHash.

* [NOD-700] Rename block to node in blockset.go.

* [NOD-700] Remove containsHash and hashesEqual.

* [NOD-700] Add a comment to IsInSelectedParentChain about how it'll fail if the given blockHash is not within the block index.
2020-01-27 11:48:58 +02:00
Ori Newman
a10320ad7b [NOD-696] Handle panics on time.AfterFunc (#600)
* [NOD-696] Handle panics on time.AfterFunc

* [NOD-696] Fix comment

* [NOD-696] Rename afterFunc->spawnAfter
2020-01-27 11:12:23 +02:00
Ori Newman
fd2bbf3557 [NOD-698] Change confirmations to be selectedTip.blueScore-acceptingBlock.blueScore+1 (#602) 2020-01-27 11:10:27 +02:00
stasatdaglabs
7f9cf17274 [NOD-697] In blockLocator, return an error if lowHash blueScore >= highHash blueScore. (#601) 2020-01-26 15:44:58 +02:00
Ori Newman
ba0e239557 [NOD-692] Fix thresholdState to use blueBlockWindow instead of selected chain height (#599) 2020-01-23 17:17:56 +02:00
Dan Aharoni
ed606bfda3 [NOD-575] Change devent address prefix to kaspadev. (#598) 2020-01-23 15:36:40 +02:00
Dan Aharoni
c0463a8a68 [NOD-675] Replace start-hash/stop-hash with meaningful names (#597)
* [NOD-675] Rename startHash/stopHash to lowHigh/stopHash

* [NOD-675] Fix typo

* [NOD-675] Undo go.mod go.sum conflicts

* [NOD-675] revert back to startHash for getChainFromBlock.

* [NOD-675] Revet back to startHash in getChainFromBlock leftovers.

* [NOD-675] Fix test name.
2020-01-22 18:14:42 +02:00
Dan Aharoni
52e0a0967d [NOD-629] change GHOSTDAG k to uint8 (#594)
* [NOD-629] Change GHOSTDAG K to a a single byte using type

* [NOD-629] Rename variable

* [NOD-629] Rename K to KSize

* [NOD-629] Remove redundant casting

* [NOD-629] Add test for KSize

* [NOD-629] Seperate block serialization and db store

* [NOD-629] Make sure K is serialized as uint8

* [NOD-629] Rename KSize to KType

* [NOD-629] Comment for test

* [NOD-629] Change fail message

* [NOD-629] Remove newlines

* [NOD-629] Fix test

* [NOD-629] Do not use maxuint8, but !0 instead

* [NOD-629] Fix test

* [NOD-629] Merge conflict

* [NOD-629] Fix test; Update comment
2020-01-22 16:58:53 +02:00
Ori Newman
29bcc271b5 [NOD-652] Add selected tip and get selected tip messages (#595)
* [NOD-652] Add selectedTip and getSelectedTip messages

* [NOD-652] Remove peerSyncState.isSelectedTipKnown

* [NOD-652] Do nothing on OnSelectedTip if the peer selected tip hasn't changed

* [NOD-652] Handle selected tip message with block handler

* [NOD-652] Add comments

* [NOD-652] go mod tidy

* [NOD-652] Fix TestVersion

* [NOD-652] Use dag.AdjustedTime instead of dag.timeSource.AdjustedTime

* [NOD-652] Create shouldQueryPeerSelectedTips and queueMsgGetSelectedTip functions

* [NOD-652] Change selectedTip to selectedTipHash where needed

* [NOD-652] add minDAGTimeDelay constant

* [NOD-652] add comments

* [NOD-652] Fix names and comments

* [NOD-652] Put msg.reply push in the right place

* [NOD-652] Fix comments and names
2020-01-22 16:34:21 +02:00
stasatdaglabs
94ec159147 [NOD-676] Remove blockLocator defaults. (#596) 2020-01-22 14:05:01 +02:00
stasatdaglabs
9d434de4a5 [NOD-640] Revamp blueBlocksBetween to return up to maxEntries from lowNode's antiPast to highNode's antiFuture (#593)
* [NOD-640] Revamp blueBlocksBetween to return up to maxEntries from highNode's antiFuture.

* [NOD-640] Fix bad traversal.

* [NOD-640] Use more accurate len.

* [NOD-640] Use more appropriate len in another place.

* [NOD-640] Remove the whole business with highNode's anticone.

* [NOD-640] Rename highNodeAntiFuture to candidateNodes.

* [NOD-640] Explain the highNode.blueScore-lowNode.blueScore+1 approximation.

* [NOD-640] UpHeap -> upHeap.

* [NOD-640] Fix off-by-one error.

* [NOD-640] Rename blueBlocksBetween to antiPastBetween,

* [NOD-640] upHeap -> up-heap.

* [NOD-640] Use a classic for to populate nodes.

* [NOD-640] Reworded a comment.

* [NOD-640] Clarify a comment.

* [NOD-640] Fix nodes declaration.
2020-01-22 12:13:55 +02:00
stasatdaglabs
49418f4222 [NOD-669] Rename start/endHash -> low/highHash (#591)
* [NOD-669] Remove the "get" from getBlueBlocksBetween.

* [NOD-669] Remove the "Get" from GetBlueBlocksHeadersBetween.

* [NOD-669] In blueBlocksBetween, rename startHash to lowHash and stopHash to highHash.

* [NOD-669] Rename startHash to lowHash and stopHash to highHash in blockLocator logic.

* [NOD-669] Remove zeroHash logic in blockLocator.

* [NOD-669] Finish renaming startHash and stopHash in blockdag.

* [NOD-669] Rename startHash and stopHash in blockdag where I previously missed it.

* [NOD-669] Rename startHash and stopHash in blockdag where I previously missed it some more.

* [NOD-669] Rename startHash and stopHash in blockdag where I previously missed it some more some more.

* [NOD-669] Fix bad grammar in method names.

* [NOD-669] Rename lowHash to blockHash in SelectedParentChain.

* [NOD-669] Fix a comment.
2020-01-20 12:47:16 +02:00
stasatdaglabs
38b4749f20 [NOD-669] Fix startSync sending a blockLocatorMsg with a zeroHash insead of the peer's selectedTip (#592)
* [NOD-669] Fix startSync sending a blockLocatorMsg with a zeroHash instead of the peer's selectedTip.

* [NOD-669] Rename bestPeer to syncPeer.

* [NOD-669] Fix comments.
2020-01-20 12:29:17 +02:00
stasatdaglabs
045984e6b9 [NOD-665] Initialize blockNode blueScore to be MaxUint64 by default (#590)
* [NOD-665] Initialize blockNode blueScore to be MaxUint64 by default.

* [NOD-665] Remove redundant err declaration.
2020-01-19 15:21:43 +02:00
Ori Newman
38883d1a98 [NOD-650] Remove CPU miner from the node and add kaspaminer in ./cmd (#587)
* [NOD-650] Add kaspaminer

* [NOD-650] Remove CPU miner

* [NOD-650] Fix comments and error messages

* [NOD-650] Remove redundant check for closing foundBlock

* [NOD-650] Submit block synchronically

* [NOD-650] Use ParseUint instead of ParseInt

* [NOD-650] Rearrange functions order in mineloop.go

* [NOD-650] Add block delay CLI argument to kaspaminer

* [NOD-650] Remove redundant spawn

* [NOD-650] Add Dockerfile for kaspaminer

* [NOD-650] Remove redundant comments

* [NOD-650] Remove tests from kaspaminer Dockerfile

* [NOD-650] Remove redundant argument on OnFilteredBlockAdded
2020-01-19 15:18:26 +02:00
stasatdaglabs
b5f365d282 [NOD-668] Rename height to blueScore in OnFilteredBlockAdded. (#589) 2020-01-16 14:10:26 +02:00
stasatdaglabs
a7d3a40465 [NOD-646] Fix initDAGState treating invalid blocks as genesis blocks. (#588) 2020-01-16 13:21:20 +02:00
stasatdaglabs
359b16fca9 [NOD-616] Remove blockNode.chainHeight (#586)
* [NOD-616] Remove unused methods from BlockDAG.

* [NOD-616] Remove Height from GetRawMempoolVerboseResult and TxDesc.

* [NOD-616] Replaced BlockDAG.ChainHeight with SelectedTipBlueScore.

* [NOD-616] Remove the unused BlockChainHeightByHash.

* [NOD-616] Remove the unused blockChainHeight from checkBlockHeaderContext.

* [NOD-616] Remove chainHeight from util.Block.

* [NOD-616] Remove TestChainHeight.

* [NOD-616] Update unknown rule activation warning to use blueScore.

* [NOD-616] Update thresholdState to use blueScore instead of chainHeight.

* [NOD-616] Update blockLocator to use blueScore instead of chainHeight.

* [NOD-616] Remove blockNode.chainHeight.

* [NOD-616] Fix comments and variable names.

* [NOD-616] Replace a weird for loop with a while loop.

* [NOD-616] Fix a comment.

* [NOD-616] Remove pre-allocation in blockLocator.

* [NOD-616] Coalesce checks that startHash and stopHash are not the same into the same condition.

* [NOD-616] Fix a comment.

* [NOD-616] Remove weird blueScore logic around childHashStrings.

* [NOD-616] Fix hash pointer comparison.

* [NOD-616] Fix a comment.

* [NOD-616] Add ban score to peers misusing GetBlockLocator.

* [NOD-616] Replace adding ban score with disconnecting.

* [NOD-616] Add blueScore to FilteredBlockAddedNtfn.
2020-01-16 13:09:16 +02:00
Mike Zak
8b8e73feb5 Merge branch 'v0.1.1-dev' of github.com:kaspanet/kaspad into v0.1.1-dev 2020-01-13 16:59:40 +02:00
Svarog
6044b6ac1a [NOD-643] Remove monkey patch (#585)
* [NOD-643] Removed any mentions of monkey.Patch

* [NOD-643] Removed monkey from go.mod
2020-01-13 16:59:16 +02:00
stasatdaglabs
a177ea4f15 [NOD-555] Remove build scripts. (#581) 2020-01-13 13:37:21 +02:00
Mike Zak
3a15aa4bae Update version to v0.1.1 2020-01-13 12:24:36 +02:00
Ori Newman
427185b6a8 [NOD-635] Change testnet maximum difficulty (#582) 2020-01-09 18:10:23 +02:00
Svarog
b282734a3f [NOD-626] Delete CHANGES file (#580) 2020-01-08 18:43:50 +02:00
Ori Newman
6d765f58ba [NOD-570] Separate genesis variables for different netwroks (#578)
* [NOD-570] Separate genesis variables for different netwroks

* [NOD-570] Make Testnet genesis

* [NOD-570] Make simnet and regtest genesis

* [NOD-570] Remake devnet genesis

* [NOD-570] Rename regNet -> regTest testnet->testNet

* [NOD-570] Change network names to one word instead of camel case

* [NOD-570] Change network names to one word instead of camel case

* [NOD-570] Fix test names

* [NOD-570] Fix TestGHOSTDAG

Co-authored-by: Dan Aharoni <dereeno@protonmail.com>
2020-01-08 18:42:47 +02:00
Svarog
20819ca4cd [NOD-505] Try reading the response in case of upnp error (#576) 2020-01-08 17:51:31 +02:00
Ori Newman
2174a0a7f2 [NOD-497] Implement GHOSTDAG (#575)
* [NOD-540] Implement reachability (#545)

* [NOD-540] Begin implementing reachability.

* [NOD-540] Finish implementing reachability.

* [NOD-540] Implement TestIsFutureBlock.

* [NOD-540] Implement TestInsertFutureBlock.

* [NOD-540] Add comments.

* [NOD-540] Add comment for interval in blockNode.

* [NOD-540] Updated comments over insertFutureBlock and isFutureBlock.

* [NOD-540] Implement interval splitting methods.

* [NOD-540] Begin implementing tree manipulation in blockNode.

* [NOD-540] Implement countSubtreesUp.

* [NOD-540] Add a comment explaining an impossible condition.

* [NOD-540] Implement applyIntervalDown.

* [NOD-540] Moved the reachability tree stuff into reachability.go.

* [NOD-540] Add some comments.

* [NOD-540] Add more comments, implement isInPast.

* [NOD-540] Fix comments.

* [NOD-540] Implement TestSplitFraction.

* [NOD-540] Implement TestSplitExact.

* [NOD-540] Implement TestSplit.

* [NOD-540] Add comments to structs.

* [NOD-540] Implement TestAddTreeChild.

* [NOD-540] Fix a comment.

* [NOD-540] Rename isInPast to isAncestorOf.

* [NOD-540] Rename futureBlocks to futureCoveringSet.

* [NOD-540] Rename isFutureBlock to isInFuture.

* [NOD-540] move reachabilityInterval to the top of reachability.go.

* [NOD-540] Change "s.t." to "such that" in a comment.

* [NOD-540] Fix indentation.

* [NOD-540] Fix a potential bug involving float inaccuracy.

* [NOD-540] Wrote a more descriptive error message.

* [NOD-540] Fix error messsage.

* [NOD-540] Fix the recursive countSubtreesUp.

* [NOD-540] Rename countSubtreesUp to countSubtrees and applyIntervalDown to propagateInterval.

* [NOD-540] Implement updating reachability for a valid new block.

* [NOD-540] Implement a disk storage for reachability data.

* [NOD-540] Fix not all tree nodes being written to the database.

* [NOD-540] Implement serialization for reachabilityData.

* [NOD-540] Implement some deserialization for reachabilityData.

* [NOD-540] Implement restoring the reachabilityStore on node restart.

* [NOD-540] Made interval and remainingInterval pointers.

* [NOD-540] Rename setTreeInterval to setInterval.

* [NOD-540] Rename reindexTreeIntervals to reindexIntervals and fixed the comment above it.

* [NOD-540] Expand the comment above reindexIntervals.

* [NOD-540] Fix comment above countSubtrees.

* [NOD-540] Fix comment above countSubtrees some more.

* [NOD-540] Fix comment above split.

* [NOD-540] Fix comment above isAncestorOf.

* [NOD-540] Fix comment above reachabilityTreeNode.

* [NOD-540] Fix weird condition in addTreeChild.

* [NOD-540] Rename addTreeChild to addChild.

* [NOD-540] Fix weird condition in splitFraction.

* [NOD-540] Reverse the lines in reachabilityTreeNode.String().

* [NOD-540] Renamed f to fraction and x to size.

* [NOD-540] Fix comment above bisect.

* [NOD-540] Implement rtn.isAncestorOf().

* [NOD-540] Use treeNode isAncestorOf instead of treeInterval isAncestorOf.

* [NOD-540] Use newReachabilityInterval instead of struct initialization.

* [NOD-540] Make reachabilityTreeNode.String() use strings.Join.

* [NOD-540] Use sync.RWMutex instead of locks.PriorityMutex.

* [NOD-540] Rename thisTreeNode to newTreeNode.

* [NOD-540] Rename setTreeNode to addTreeNode.

* [NOD-540] Extracted selectedParentAnticone to a separate function.

* [NOD-540] Rename node to this.

* [NOD-540] Move updateReachability and isAncestorOf from dag.go to reachability.go.

* [NOD-540] Add whitespace after multiline function signatures in reachability.go.

* [NOD-540] Make splitFraction return an error on empty interval.

* [NOD-540] Add a comment about rounding to splitFraction.

* [NOD-540] Replace sneaky tabs with spaces.

* [NOD-540] Rename split to splitExponential.

* [NOD-540] Extract exponentialFractions to a separate function.

* [NOD-540] Rename bisect to findIndex.

* [NOD-540] Add call to reachabilityStore.clearDirtyEntries at the end of saveChangesFromBlock.

* [NOD-540] Explain the dirty hack in reachabilityStore.init().

* [NOD-540] Split the function signature for deserializeReachabilityData to two lines.

* [NOD-540] Add a comment about float precision loss to exponentialFractions.

* [NOD-540] Corrected a comment about float precision loss to exponentialFractions.

* [NOD-540] Fixed a comment about float precision loss to exponentialFractions some more.

* [NOD-540] Added further comments above futureCoveringBlockSet.

* [NOD-540] Rename addTreeNode to setTreeNode.

* [NOD-540] Rename splitExponential to splitWithExponentialBias.

* [NOD-540] Fix object references in reachabilityData deserialization (#563)

* [NOD-540] Fix broken references in deserialization.

* [NOD-540] Fix broken references in futureCoveringSet deserialization. Also add comments.

* [NOD-540] Don't deserialize on the first pass in reachabilityStore.init().

* [NOD-540] Remove redundant assignment to loaded[hash].

* [NOD-540] Use NewHash instead of SetBytes. Rename data to destination.

* [NOD-540] Preallocate futureCoveringSet.

* [NOD-541] Implement GHOSTDAG (#560)

* [NOD-541] Implement GHOSTDAG

* [NOD-541] Replace the old PHANTOM variant with GHOSTDAG

* [NOD-541] Move dag.updateReachability to the top of dag.applyDAGChanges to update reachability before the virtual block is updated

* [NOD-541] Fix blueAnticoneSize

* [NOD-541] Initialize node.bluesAnticoneSizes

* [NOD-541] Fix pastUTXO and applyBlueBlocks blues order

* [NOD-541] Add serialization logic to node.bluesAnticoneSizes

* [NOD-541] Fix GHOSTDAG to not count the new block and the blue candidates anticone, add selected parent to blues, and save to node.bluesAnticoneSizes properly

* [NOD-541] Fix test names in inner strings

* [NOD-541] Writing TestGHOSTDAG

* [NOD-541] In blueAnticoneSize change node->current

* [NOD-541] name ghostdag return values

* [NOD-541] fix ghostdag to return slice

* [NOD-541] Split k-cluster violation rules

* [NOD-541] Add missing space

* [NOD-541] Add comment to ghostdag

* [NOD-541] In selectedParentAnticone rename past->selectedParentPast

* [NOD-541] Fix misrefernces to TestChainUpdates

* [NOD-541] Fix ghostdag comment

* [NOD-541] Make PrepareBlockForTest in blockdag package

* [NOD-541] Make PrepareBlockForTest in blockdag package

* [NOD-541] Assign to selectedParentAnticone[i] instead of appending

* [NOD-541] Remove redundant forceTransactions arguments from PrepareBlockForTEST

* [NOD-541] Add non-selected parents to anticoneHeap

* [NOD-541] add test for ghostdag

* [NOD-541] Add comments

* [NOD-541] Use adjusted time for initializing blockNode

* [NOD-541] Rename isAncestorOf -> isAncestorOfBlueCandidate

* [NOD-541] Remove params from PrepareBlockForTest

* [NOD-541] Fix TestChainHeight

* [NOD-541] Remove recursive lock

* [NOD-541] Fix TestTxIndexConnectBlock

* [NOD-541] Fix TestBlueBlockWindow

* [NOD-541] Put prepareAndProcessBlock in common_test.go

* [NOD-541] Fix TestConfirmations

* [NOD-541] Fix TestAcceptingBlock

* [NOD-541] Fix TestDifficulty

* [NOD-541] Fix TestVirtualBlock

* [NOD-541] Fix TestSelectedPath

* [NOD-541] Fix TestChainUpdates

* [NOD-541] Shorten TestDifficulty test time

* [NOD-541] Make PrepareBlockForTest use minimal valid block time

* [NOD-541] Remove TODO comment

* [NOD-541] Move blockdag related mining functions to mining.go

* [NOD-541] Use NextBlockCoinbaseTransaction instead of NextBlockCoinbaseTransactionNoLock in NextCoinbaseFromAddress

* [NOD-541] Remove useMinimalTime from BlockForMining

* [NOD-541] Make MedianAdjustedTime a *BlockDAG method

* [NOD-541] Fix ghostdag to use anticone slice instead of heap

* [NOD-541] Fix NewBlockTemplate locks

* [NOD-541] Fix ghostdag comments

* [NOD-541] Convert MedianAdjustedTime to NextBlockTime

* [NOD-541] Fix ghostdag comment

* [NOD-541] Fix TestGHOSTDAG comment

* [NOD-541] Add comment before sanity check

* [NOD-541] Explicitly initialize .blues in ghostdag

* [NOD-541] Rename *blockNode.lessThan to *blockNode.less

* [NOD-541] Remove redundant check if block != chainBlock

* [NOD-541] Fix comment

* [NOD-541] Fix comment

* [NOD-497] Add comment; General refactoring

* [NOD-497] General refactoring.

* [NOD-497] Use isAncestor of the tree rather than the node

* [NOD-497] Remove reachability mutex lock as it is redundant (dag lock is held so no need); General refactoring.

* [NOD-497] Update comment

* [NOD-497] Undo test blocktimestamp

* [NOD-497] Update comments; Use BlockNode.less for blockset;

* [NOD-497] Change processBlock to return boolean and not the delay duration (merge conflict)

* [NOD-497] Undo change for bluest to use less; Change blocknode less to use daghash.Less

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
Co-authored-by: Dan Aharoni <dereeno@protonmail.com>
2020-01-08 17:41:22 +02:00
Dan Aharoni
ea6f7a28c2 [NOD-420] Process delayed blocks (#529)
* [NOD-420] Delay blocks with valid timestamp (non-delayed) that point to a delayed block.

* [NOD-420] Mark block as requested when setting as delayed.

* [NOD-420] Merge master; Use dag.timeSource.AdjustedTime() instead of time.Now;

* [NOD-420] Return nil when not expecting an error

* [NOD-420] Initialise delyaed blocks mapping

* [NOD-420] Trigger delayed blocks processing every time we process a block.

* [NOD-420] Hold the read lock in processDelayedBlocks

* [NOD-420] Add delayed blocks heap sorted by their process time so we could process them in order.

* [NOD-420] Update debug log

* [NOD-420] Fix process blocks loop

* [NOD-420] Add comment

* [NOD-420] Log error message

* [NOD-420] Implement peek method for delayed block heap. extract delayed block processing to another  function.

* [NOD-420] Trigger process delayed blocks only in process block

* [NOD-420] Move delayed block addition to process block

* [NOD-420] Use process block to make sure we fully process the delayed block and deal with orphans.

* [NOD-420] Unexport functions when not needed; Return isDelayed boolean from ProcessBlock instead of the delay duration

* [NOd-420] Remove redundant delayedBlocksLock

* [NOD-420] Resolve merge conflict; Return delay 0 instead of boolean

* [NOD-420] Do not treat delayed block as orphan

* [NOD-420] Make sure block is not processed if we have already sa delayed.

* [NOD-420] Process delayed block if parent is delayed to make sure it would not be treated as orphan.

* [NOD-420] Rename variable

* [NOD-420] Rename function. Move maxDelayOfParents to process.go

* [NOD-420] Fix typo

* [NOD-420] Handle errors from processDelayedBlocks properly

* [NOD-420] Return default values if err != nil from dag.addDelayedBlock

* [NOD-420] Return default values if err != nil from dag.addDelayedBlock in another place

Co-authored-by: Svarog <feanorr@gmail.com>
2020-01-08 15:28:52 +02:00
Svarog
ac9aa74a75 [NOD-549] Use version.Version anywhere relevant + update related tests (#577)
* [NOD-549] Update version to 0.1.0 and allow injection of appBuild

* [NOD-549] Fixed peer tests

* [NOD-549] Fixed wire tests

* [NOD-549] Remove any mention of semVer.

* [NOD-549] Don't include appBuild at all if it includes invalid characters

* [NOD-549] Panic if appBuild contains invalid characters

* [NOD-549] Move checkAppBuild into

* [NOD-549] Update comment
2020-01-08 12:14:59 +02:00
Ori Newman
d46857677f [NOD-503] Remove Tor functionality (#573) 2020-01-08 10:45:27 +02:00
stasatdaglabs
cd719b1d5b [NOD-546] Report build failures to Discord instead of Telegram (#572)
* [NOD-546] Report build failures to Discord instead of Telegram.

* [NOD-546] Make a temporary compilation error.

* [NOD-546] Make a couple of temporary print outs.

* [NOD-546] Remove temporary debug stuff.

* [NOD-546] Make notify_discord() return early if Discord variables are not set.
2020-01-06 16:36:21 +02:00
Svarog
7cf15ac93b [NOD-549] Update version to 0.1.0 and allow injection of appBuild (#568)
* [NOD-549] Update version to 0.1.0 and allow injection of appBuild

* [NOD-549] Fixed peer tests

* [NOD-549] Fixed wire tests

* [NOD-549] Remove any mention of semVer.

* [NOD-549] Don't include appBuild at all if it includes invalid characters

* [NOD-549] Panic if appBuild contains invalid characters

* [NOD-549] Move checkAppBuild into
2020-01-06 15:30:00 +02:00
Svarog
d8e3191469 [NOD-619] Disable tor related cli flags (#571)
* [NOD-619] Disable tor related cli flags

* [NOD-619] Disabled --proxy and related flags

* [NOD-619] Added missing space
2020-01-06 14:50:32 +02:00
Svarog
784d3de4ca [NOD-618] Removed outdated docs folder (#570) 2020-01-06 10:48:41 +02:00
Ori Newman
733d06af5a [NOD-617] Remove test coverage from test.sh (#569)
* [NOD-617] Remove test coverage from test.sh

* [NOD-617] Log coverage
2020-01-06 10:31:09 +02:00
stasatdaglabs
df91643976 [NOD-608] Make the user agent read the version from the version package. (#566) 2020-01-05 16:47:58 +02:00
stasatdaglabs
ebf635e6ff [NOD-559] Remove cmd/genaddr. (#567) 2020-01-05 11:41:14 +02:00
stasatdaglabs
e41d9866c3 [NOD-613] Fix concurrent access to ecmh cache (#565)
* [NOD-613] Fix concurrent access to ecmh cache.

* [NOD-613] Localized dag.Lock().
2020-01-02 18:03:25 +02:00
stasatdaglabs
d984151549 [NOD-517] Update doc.go files (#559)
* [NOD-517] Remove copyright notices from all doc.go.

* [NOD-517] Updated the root doc.go.

* [NOD-517] Remove all cov_report.sh and test_coverage.txt.

* [NOD-517] Make all doc.go use the same style of comment.

* [NOD-517] Update dagconfig doc.go.

* [NOD-517] Update blockdag doc.go.

* [NOD-517] Update doc.go in connmgr.

* [NOD-517] Update doc.go in fullblocktests.

* [NOD-517] Update doc.go in database.

* [NOD-517] Update doc.go in ecc.

* [NOD-517] Update doc.go in rpctest.

* [NOD-517] Removed superfluous license in logs.

* [NOD-517] Update doc.go in mempool.

* [NOD-517] Updated doc.go in peer.

* [NOD-517] Update doc.go in rpcclient.

* [NOD-517] Update doc.go in txscript.

* [NOD-517] Update doc.go in util.

* [NOD-517] Update doc.go in base58.

* [NOD-517] Update doc.go in bech32.

* [NOD-517] Update doc.go in txsort.

* [NOD-517] Update doc.go in wire.

* [NOD-517] Fix indentation.

* [NOD-517] Add a copyright notice to the main doc.go.

* [NOD-517] Add Conformal to the license notices.

* [NOD-517] Remove superfluous language from a doc.

* [NOD-517] Fix bad example.
2020-01-02 16:57:43 +02:00
stasatdaglabs
6099ce56bd [NOD-5] Remove TestFullBlocks and package fullblocktests (#564) 2019-12-31 16:16:10 +02:00
Svarog
e0b5c145f7 [NOD-543] Added dnsseeds to testnet and mainnet (#562) 2019-12-31 10:57:43 +02:00
Ori Newman
cf37f733ef [NOD-601] Omit nil selected parent in GetBlockVerboseResult (#561) 2019-12-30 18:44:17 +02:00
Ori Newman
66a92a243c [NOD-591] Add selected parent to GetBlockVerboseResult (#558)
* [NOD-591] Add selected parent to GetBlockVerboseResult

* [NOD-591] Add selected parent to GetBlockHeaderResult
2019-12-29 12:46:35 +02:00
Ori Newman
4a88eea57e [NOD-590] Export newLogClosure (#557) 2019-12-26 18:26:22 +02:00
aspect
fbaf360a42 Fix missing pkg/errors reference in windows-only file (#547)
* Fixing missing pkg/errors reference

* go fmt pass
2019-12-25 15:50:43 +02:00
Svarog
1346810af8 [NOD-583] Move InterruptListener() to beggining of main (#555) 2019-12-25 13:14:28 +02:00
Svarog
9cbab94264 [NOD-579] Remove erroneous dependancy on github.com/prometheus/common/log (#554) 2019-12-25 11:54:36 +02:00
Svarog
48f29cc11f [NOD-401] Allow to pass PrefixUnknown to ParseAddress + add .Prefix() to addresses (#553)
* [NOD-401] Created CLI-Wallet base structure and new command

* [NOD-401] Switched to go-flags sub-command parsing

* [NOD-401] Added config for all sub-commands

* [NOD-401] Work in progress for send command in cli-wallet

* [NOD-401] Allow to pass PrefixUnknown to ParseAddress + add .Prefix() to addresses

* [NOD-401] Finished implementing all wallet commands

* [NOD-401] some refactorings to sendTx

* [NOD-401] Moved wallet to kasparov repo + updated tests with new prefixes
2019-12-24 15:38:47 +02:00
Ori Newman
e2b57e6231 [NOD-566] Do not accept coinbase transaction from the selected parent anticone (#552) 2019-12-19 18:05:55 +02:00
Ori Newman
f72afc8bbb [NOD-499] Change ports and network magics (#550)
* [NOD-499] Change network magics

* [NOD-499] Change default rpc ports

* [NOD-499] Change default p2p ports

* [NOD-499] Change port 18333 to 10433 everywhere

* [NOD-499] Change port 8333 to 10333 everywhere

* [NOD-499] Fix TestElementWire

* [NOD-499] Fix tests

* [NOD-499] Change port 10333->16111 and 10332->16110

* [NOD-499] Change port 10433->16211 and 10432->16210

* [NOD-499] Change port 10633->16511 and 10632->16510

* [NOD-499] Change port 10533->16611 and 10532->16610
2019-12-18 16:18:37 +02:00
stasatdaglabs
0d1f447cb7 [NOD-510] Fix comments so that they don't mention bitcoin. (#551) 2019-12-18 13:46:32 +02:00
Svarog
818f8c93eb [NOD-551] Move httpserverutils to kasparov (#549)
* [NOD-511] Move httpserverutils to kasparov repository
2019-12-18 12:51:05 +02:00
Svarog
264ffaae93 [NOD-495] Move out non-kaspad apps (#548)
* [NOD-495] Remove txgen to separate repository

* [NOD-495] Remove DNSSeeder to separate repository

* [NOD-495] Remove kasparov to separate repository

* [NOD-495] Remove miningsimulator to separate repository

* [NOD-495] httpserverutils should use kaspad logger package

* [NOD-495] Remove faucet to separate repository

* [NOD-495] httpserverutils should use kasparov logger
2019-12-18 12:14:07 +02:00
stasatdaglabs
03b7af9a13 [NOD-532] Replace "chain" with "DAG" where appropriate (#537)
* [NOD-532] Change chain to DAG in the root package.

* [NOD-532] Change chain to DAG in checkpoints.go.

* [NOD-532] Change chain to DAG in blockdag.

* [NOD-532] Change chain to DAG in cmd.

* [NOD-532] Change chain to DAG in dagconfig.

* [NOD-532] Change chain to DAG in database.

* [NOD-532] Change chain to DAG in mempool.

* [NOD-532] Change chain to DAG in mempool.

* [NOD-532] Change chain to DAG in netsync.

* [NOD-532] Change chain to DAG in rpcclient.

* [NOD-532] Change chain to DAG in server.

* [NOD-532] Change chain to DAG in txscript.

* [NOD-532] Change chain to DAG in util.

* [NOD-532] Change chain to DAG in wire.

* [NOD-532] Remove block heights in dagio.go examples.

* [NOD-532] Rename fakeChain to fakeDAG.

* [NOD-532] Fix comments, remove unused EnableBCInfoHacks flag.

* [NOD-532] Fix comments and variable names.

* [NOD-532] Fix comments.

* [NOD-532] Fix merge errors.

* [NOD-532] Formatted project.
2019-12-17 13:40:03 +02:00
Ori Newman
e3d7e83d44 [NOD-548] Remove default dns seed from devnet (#546)
* [NOD-390] Add faucet Dockerfile

* [NOD-390] Allow running migration without -api-server-url and --private-key arguments

* [NOD-390] Change kasparov-server to kasparovd in its Dockerfile

* [NOD-548] Remove default DNS seed from devnet
2019-12-17 12:26:27 +02:00
Ori Newman
07651e51c8 [NOD-390] Add faucet dockerfile (#544)
* [NOD-390] Add faucet Dockerfile

* [NOD-390] Allow running migration without -api-server-url and --private-key arguments

* [NOD-390] Change kasparov-server to kasparovd in its Dockerfile

* [NOD-390] Change API server and Kasparov server to kasparovd
2019-12-17 11:10:59 +02:00
Svarog
1cd2eb9308 [NOD-494] Update readmes (#543)
* [NOD-494] Updated main README.md

* [NOD-494] Updated blockdag/README.md

* [NOD-494] Aligned text length in main README.md

* [NOD-494] Updated most remaining packages READMEs + deleted util/coinset

* [NOD-494] Update integration README

* [NOD-494] Did a final pass over all readmes

* [NOD-494] Updated README for DNSSeeder with more info on how to create a functioning setup

* [NOD-494] Remove all double spaces from readmes

* [NOD-494] Minor fixes in READMEs + update license to kaspanet developers

* [NOD-494] Add backtick around ecc and util in hdkeychain README
2019-12-16 17:37:17 +02:00
stasatdaglabs
a140327dd2 [NOD-500] Remove checkpoints (#541)
* [NOD-502] Remove checkpoints.

* [NOD-502] Remove remaining references to checkpoints.

* [NOD-500] Split RejectFinality to RejectDifficulty.

* [NOD-500] Remove support for headers-first in p2p.

* [NOD-500] Panic in newHashFromStr in case of an error.
2019-12-16 17:22:10 +02:00
stasatdaglabs
c1f7ae72e0 [NOD-514] Change dagcoin to kaspa, dagtest to kaspatest, dagreg to kaspareg, and dagsim to kaspasim (#538)
* [NOD-514] Change dagcoin to kaspa, dagtest to kaspatest, etc.

* [NOD-514] Remove no-longer-relevant link in doc.
2019-12-15 18:24:15 +02:00
stasatdaglabs
3a12fe9b1d [NOD-516] Remove cmd/genesis. (#542) 2019-12-15 14:50:04 +02:00
stasatdaglabs
c25c9b25bd [NOD-502] Remove RPCQuirks. (#540) 2019-12-15 14:49:22 +02:00
stasatdaglabs
f46dec449d [NOD-510] Change all references to Bitcoin to Kaspa (#531)
* [NOD-510] Change coinbase flags to kaspad.

* [NOD-510] Removed superfluous spaces after periods in comments.

* [NOD-510] Rename btcd -> kaspad in the root folder.

* [NOD-510] Rename BtcEncode -> KaspaEncode and BtcDecode -> KaspaDecode.

* [NOD-510] Rename BtcEncode -> KaspaEncode and BtcDecode -> KaspaDecode.

* [NOD-510] Continue renaming btcd -> kaspad.

* [NOD-510] Rename btcjson -> kaspajson.

* [NOD-510] Rename file names inside kaspajson.

* [NOD-510] Rename kaspajson -> jsonrpc.

* [NOD-510] Finish renaming in addrmgr.

* [NOD-510] Rename package btcec to ecc.

* [NOD-510] Finish renaming stuff in blockdag.

* [NOD-510] Rename stuff in cmd.

* [NOD-510] Rename stuff in config.

* [NOD-510] Rename stuff in connmgr.

* [NOD-510] Rename stuff in dagconfig.

* [NOD-510] Rename stuff in database.

* [NOD-510] Rename stuff in docker.

* [NOD-510] Rename stuff in integration.

* [NOD-510] Rename jsonrpc to rpcmodel.

* [NOD-510] Rename stuff in limits.

* [NOD-510] Rename stuff in logger.

* [NOD-510] Rename stuff in mempool.

* [NOD-510] Rename stuff in mining.

* [NOD-510] Rename stuff in netsync.

* [NOD-510] Rename stuff in peer.

* [NOD-510] Rename stuff in release.

* [NOD-510] Rename stuff in rpcclient.

* [NOD-510] Rename stuff in server.

* [NOD-510] Rename stuff in signal.

* [NOD-510] Rename stuff in txscript.

* [NOD-510] Rename stuff in util.

* [NOD-510] Rename stuff in wire.

* [NOD-510] Fix failing tests.

* [NOD-510] Fix merge errors.

* [NOD-510] Fix go vet errors.

* [NOD-510] Remove merged file that's no longer relevant.

* [NOD-510] Add a comment above Op0.

* [NOD-510] Fix some comments referencing Bitcoin Core.

* [NOD-510] Fix some more comments referencing Bitcoin Core.

* [NOD-510] Fix bitcoin -> kaspa.

* [NOD-510] Fix more bitcoin -> kaspa.

* [NOD-510] Fix comments, remove DisconnectBlock in addrindex.

* [NOD-510] Rename KSPD to KASD.

* [NOD-510] Fix comments and user agent.
2019-12-12 15:21:41 +02:00
Ori Newman
60ab6330ff [NOD-521] Add blocks to Kasparov DB immediately after getblocks request (#532) 2019-12-12 10:14:37 +02:00
Dan Aharoni
89dee3e005 [NOD-533] Kaspad renaming in docker (#536)
* [NOD-533] Rename kasparov folder leftovers

* [NOD-533] Rename btcd to kaspad

* [NOD-533] Fix folder name

* [NOD-533] Add file name
2019-12-12 10:09:57 +02:00
Dan Aharoni
70d7009985 [NOD-533] Rename kasparov folder leftovers (#535) 2019-12-11 16:43:39 +02:00
998 changed files with 40680 additions and 94810 deletions

2
.gitignore vendored
View File

@@ -2,7 +2,7 @@
*~
# Databases
btcd.db
kaspad.db
*-shm
*-wal

955
CHANGES
View File

@@ -1,955 +0,0 @@
============================================================================
User visible changes for btcd
A full-node bitcoin implementation written in Go
============================================================================
Changes in 0.12.0 (Fri Nov 20 2015)
- Protocol and network related changes:
- Add a new checkpoint at block height 382320 (#555)
- Implement BIP0065 which includes support for version 4 blocks, a new
consensus opcode (OP_CHECKLOCKTIMEVERIFY) that enforces transaction
lock times, and a double-threshold switchover mechanism (#535, #459,
#455)
- Implement BIP0111 which provides a new bloom filter service flag and
hence provides support for protocol version 70011 (#499)
- Add a new parameter --nopeerbloomfilters to allow disabling bloom
filter support (#499)
- Reject non-canonically encoded variable length integers (#507)
- Add mainnet peer discovery DNS seed (seed.bitcoin.jonasschnelli.ch)
(#496)
- Correct reconnect handling for persistent peers (#463, #464)
- Ignore requests for block headers if not fully synced (#444)
- Add CLI support for specifying the zone id on IPv6 addresses (#538)
- Fix a couple of issues where the initial block sync could stall (#518,
#229, #486)
- Fix an issue which prevented the --onion option from working as
intended (#446)
- Transaction relay (memory pool) changes:
- Require transactions to only include signatures encoded with the
canonical 'low-s' encoding (#512)
- Add a new parameter --minrelaytxfee to allow the minimum transaction
fee in BTC/kB to be overridden (#520)
- Retain memory pool transactions when they redeem another one that is
removed when a block is accepted (#539)
- Do not send reject messages for a transaction if it is valid but
causes an orphan transaction which depends on it to be determined
as invalid (#546)
- Refrain from attempting to add orphans to the memory pool multiple
times when the transaction they redeem is added (#551)
- Modify minimum transaction fee calculations to scale based on bytes
instead of full kilobyte boundaries (#521, #537)
- Implement signature cache:
- Provides a limited memory cache of validated signatures which is a
huge optimization when verifying blocks for transactions that are
already in the memory pool (#506)
- Add a new parameter '--sigcachemaxsize' which allows the size of the
new cache to be manually changed if desired (#506)
- Mining support changes:
- Notify getblocktemplate long polling clients when a block is pushed
via submitblock (#488)
- Speed up getblocktemplate by making use of the new signature cache
(#506)
- RPC changes:
- Implement getmempoolinfo command (#453)
- Implement getblockheader command (#461)
- Modify createrawtransaction command to accept a new optional parameter
'locktime' (#529)
- Modify listunspent result to include the 'spendable' field (#440)
- Modify getinfo command to include 'errors' field (#511)
- Add timestamps to blockconnected and blockdisconnected notifications
(#450)
- Several modifications to searchrawtranscations command:
- Accept a new optional parameter 'vinextra' which causes the results
to include information about the outputs referenced by a transaction's
inputs (#485, #487)
- Skip entries in the mempool too (#495)
- Accept a new optional parameter 'reverse' to return the results in
reverse order (most recent to oldest) (#497)
- Accept a new optional parameter 'filteraddrs' which causes the
results to only include inputs and outputs which involve the
provided addresses (#516)
- Change the notification order to notify clients about mined
transactions (recvtx, redeemingtx) before the blockconnected
notification (#449)
- Update verifymessage RPC to use the standard algorithm so it is
compatible with other implementations (#515)
- Improve ping statistics by pinging on an interval (#517)
- Websocket changes:
- Implement session command which returns a per-session unique id (#500,
#503)
- btcctl utility changes:
- Add getmempoolinfo command (#453)
- Add getblockheader command (#461)
- Add getwalletinfo command (#471)
- Notable developer-related package changes:
- Introduce a new peer package which acts a common base for creating and
concurrently managing bitcoin network peers (#445)
- Various cleanup of the new peer package (#528, #531, #524, #534,
#549)
- Blocks heights now consistently use int32 everywhere (#481)
- The BlockHeader type in the wire package now provides the BtcDecode
and BtcEncode methods (#467)
- Update wire package to recognize BIP0064 (getutxo) service bit (#489)
- Export LockTimeThreshold constant from txscript package (#454)
- Export MaxDataCarrierSize constant from txscript package (#466)
- Provide new IsUnspendable function from the txscript package (#478)
- Export variable length string functions from the wire package (#514)
- Export DNS Seeds for each network from the chaincfg package (#544)
- Preliminary work towards separating the memory pool into a separate
package (#525, #548)
- Misc changes:
- Various documentation updates (#442, #462, #465, #460, #470, #473,
#505, #530, #545)
- Add installation instructions for gentoo (#542)
- Ensure an error is shown if OS limits can't be set at startup (#498)
- Tighten the standardness checks for multisig scripts (#526)
- Test coverage improvement (#468, #494, #527, #543, #550)
- Several optimizations (#457, #474, #475, #476, #508, #509)
- Minor code cleanup and refactoring (#472, #479, #482, #519, #540)
- Contributors (alphabetical order):
- Ben Echols
- Bruno Clermont
- danda
- Daniel Krawisz
- Dario Nieuwenhuis
- Dave Collins
- David Hill
- Javed Khan
- Jonathan Gillham
- Joseph Becher
- Josh Rickmar
- Justus Ranvier
- Mawuli Adzoe
- Olaoluwa Osuntokun
- Rune T. Aune
Changes in 0.11.1 (Wed May 27 2015)
- Protocol and network related changes:
- Use correct sub-command in reject message for rejected transactions
(#436, #437)
- Add a new parameter --torisolation which forces new circuits for each
connection when using tor (#430)
- Transaction relay (memory pool) changes:
- Reduce the default number max number of allowed orphan transactions
to 1000 (#419)
- Add a new parameter --maxorphantx which allows the maximum number of
orphan transactions stored in the mempool to be specified (#419)
- RPC changes:
- Modify listtransactions result to include the 'involveswatchonly' and
'vout' fields (#427)
- Update getrawtransaction result to omit the 'confirmations' field
when it is 0 (#420, #422)
- Update signrawtransaction result to include errors (#423)
- btcctl utility changes:
- Add gettxoutproof command (#428)
- Add verifytxoutproof command (#428)
- Notable developer-related package changes:
- The btcec package now provides the ability to perform ECDH
encryption and decryption (#375)
- The block and header validation in the blockchain package has been
split to help pave the way toward concurrent downloads (#386)
- Misc changes:
- Minor peer optimization (#433)
- Contributors (alphabetical order):
- Dave Collins
- David Hill
- Federico Bond
- Ishbir Singh
- Josh Rickmar
Changes in 0.11.0 (Wed May 06 2015)
- Protocol and network related changes:
- **IMPORTANT: Update is required due to the following point**
- Correct a few corner cases in script handling which could result in
forking from the network on non-standard transactions (#425)
- Add a new checkpoint at block height 352940 (#418)
- Optimized script execution (#395, #400, #404, #409)
- Fix a case that could lead stalled syncs (#138, #296)
- Network address manager changes:
- Implement eclipse attack countermeasures as proposed in
http://cs-people.bu.edu/heilman/eclipse (#370, #373)
- Optional address indexing changes:
- Fix an issue where a reorg could cause an orderly shutdown when the
address index is active (#340, #357)
- Transaction relay (memory pool) changes:
- Increase maximum allowed space for nulldata transactions to 80 bytes
(#331)
- Implement support for the following rules specified by BIP0062:
- The S value in ECDSA signature must be at most half the curve order
(rule 5) (#349)
- Script execution must result in a single non-zero value on the stack
(rule 6) (#347)
- NOTE: All 7 rules of BIP0062 are now implemented
- Use network adjusted time in finalized transaction checks to improve
consistency across nodes (#332)
- Process orphan transactions on acceptance of new transactions (#345)
- RPC changes:
- Add support for a limited RPC user which is not allowed admin level
operations on the server (#363)
- Implement node command for more unified control over connected peers
(#79, #341)
- Implement generate command for regtest/simnet to support
deterministically mining a specified number of blocks (#362, #407)
- Update searchrawtransactions to return the matching transactions in
order (#354)
- Correct an issue with searchrawtransactions where it could return
duplicates (#346, #354)
- Increase precision of 'difficulty' field in getblock result to 8
(#414, #415)
- Omit 'nextblockhash' field from getblock result when it is empty
(#416, #417)
- Add 'id' and 'timeoffset' fields to getpeerinfo result (#335)
- Websocket changes:
- Implement new commands stopnotifyspent, stopnotifyreceived,
stopnotifyblocks, and stopnotifynewtransactions to allow clients to
cancel notification registrations (#122, #342)
- btcctl utility changes:
- A single dash can now be used as an argument to cause that argument to
be read from stdin (#348)
- Add generate command
- Notable developer-related package changes:
- The new version 2 btcjson package has now replaced the deprecated
version 1 package (#368)
- The btcec package now performs all signing using RFC6979 deterministic
signatures (#358, #360)
- The txscript package has been significantly cleaned up and had a few
API changes (#387, #388, #389, #390, #391, #392, #393, #395, #396,
#400, #403, #404, #405, #406, #408, #409, #410, #412)
- A new PkScriptLocs function has been added to the wire package MsgTx
type which provides callers that deal with scripts optimization
opportunities (#343)
- Misc changes:
- Minor wire hashing optimizations (#366, #367)
- Other minor internal optimizations
- Contributors (alphabetical order):
- Alex Akselrod
- Arne Brutschy
- Chris Jepson
- Daniel Krawisz
- Dave Collins
- David Hill
- Jimmy Song
- Jonas Nick
- Josh Rickmar
- Olaoluwa Osuntokun
- Oleg Andreev
Changes in 0.10.0 (Sun Mar 01 2015)
- Protocol and network related changes:
- Add a new checkpoint at block height 343185
- Implement BIP066 which includes support for version 3 blocks, a new
consensus rule which prevents non-DER encoded signatures, and a
double-threshold switchover mechanism
- Rather than announcing all known addresses on getaddr requests which
can possibly result in multiple messages, randomize the results and
limit them to the max allowed by a single message (1000 addresses)
- Add more reserved IP spaces to the address manager
- Transaction relay (memory pool) changes:
- Make transactions which contain reserved opcodes nonstandard
- No longer accept or relay free and low-fee transactions that have
insufficient priority to be mined in the next block
- Implement support for the following rules specified by BIP0062:
- ECDSA signature must use strict DER encoding (rule 1)
- The signature script must only contain push operations (rule 2)
- All push operations must use the smallest possible encoding (rule 3)
- All stack values interpreted as a number must be encoding using the
shortest possible form (rule 4)
- NOTE: Rule 1 was already enforced, however the entire script now
evaluates to false rather than only the signature verification as
required by BIP0062
- Allow transactions with nulldata transaction outputs to be treated as
standard
- Mining support changes:
- Modify the getblocktemplate RPC to generate and return block templates
for version 3 blocks which are compatible with BIP0066
- Allow getblocktemplate to serve blocks when the current time is
less than the minimum allowed time for a generated block template
(https://github.com/btcsuite/btcd/issues/209)
- Crypto changes:
- Optimize scalar multiplication by the base point by using a
pre-computed table which results in approximately a 35% speedup
(https://github.com/btcsuite/btcec/issues/2)
- Optimize general scalar multiplication by using the secp256k1
endomorphism which results in approximately a 17-20% speedup
(https://github.com/btcsuite/btcec/issues/1)
- Optimize general scalar multiplication by using non-adjacent form
which results in approximately an additional 8% speedup
(https://github.com/btcsuite/btcec/issues/3)
- Implement optional address indexing:
- Add a new parameter --addrindex which will enable the creation of an
address index which can be queried to determine all transactions which
involve a given address
(https://github.com/btcsuite/btcd/issues/190)
- Add a new logging subsystem for address index related operations
- Support new searchrawtransactions RPC
(https://github.com/btcsuite/btcd/issues/185)
- RPC changes:
- Require TLS version 1.2 as the minimum version for all TLS connections
- Provide support for disabling TLS when only listening on localhost
(https://github.com/btcsuite/btcd/pull/192)
- Modify help output for all commands to provide much more consistent
and detailed information
- Correct case in getrawtransaction which would refuse to serve certain
transactions with invalid scripts
(https://github.com/btcsuite/btcd/issues/210)
- Correct error handling in the getrawtransaction RPC which could lead
to a crash in rare cases
(https://github.com/btcsuite/btcd/issues/196)
- Update getinfo RPC to include the appropriate 'timeoffset' calculated
from the median network time
- Modify listreceivedbyaddress result type to include txids field so it
is compatible
- Add 'iswatchonly' field to validateaddress result
- Add 'startingpriority' and 'currentpriority' fields to getrawmempool
(https://github.com/btcsuite/btcd/issues/178)
- Don't omit the 'confirmations' field from getrawtransaction when it is
zero
- Websocket changes:
- Modify the behavior of the rescan command to automatically register
for notifications about transactions paying to rescanned addresses
or spending outputs from the final rescan utxo set when the rescan
is through the best block in the chain
- btcctl utility changes:
- Make the list of commands available via the -l option rather than
dumping the entire list on usage errors
- Alphabetize and categorize the list of commands by chain and wallet
- Make the help option only show the help options instead of also
dumping all of the commands
- Make the usage syntax much more consistent and correct a few cases of
misnamed fields
(https://github.com/btcsuite/btcd/issues/305)
- Improve usage errors to show the specific parameter number, reason,
and error code
- Only show the usage for specific command is shown when a valid command
is provided with invalid parameters
- Add support for a SOCK5 proxy
- Modify output for integer fields (such as timestamps) to display
normally instead in scientific notation
- Add invalidateblock command
- Add reconsiderblock command
- Add createnewaccount command
- Add renameaccount command
- Add searchrawtransactions command
- Add importaddress command
- Add importpubkey command
- showblock utility changes:
- Remove utility in favor of the RPC getblock method
- Notable developer-related package changes:
- Many of the core packages have been relocated into the btcd repository
(https://github.com/btcsuite/btcd/issues/214)
- A new version of the btcjson package that has been completely
redesigned from the ground up based based upon how the project has
evolved and lessons learned while using it since it was first written
is now available in the btcjson/v2/btcjson directory
- This will ultimately replace the current version so anyone making
use of this package will need to update their code accordingly
- The btcec package now provides better facilities for working directly
with its public and private keys without having to mix elements from
the ecdsa package
- Update the script builder to ensure all rules specified by BIP0062 are
adhered to when creating scripts
- The blockchain package now provides a MedianTimeSource interface and
concrete implementation for providing time samples from remote peers
and using that data to calculate an offset against the local time
- Misc changes:
- Fix a slow memory leak due to tickers not being stopped
(https://github.com/btcsuite/btcd/issues/189)
- Fix an issue where a mix of orphans and SPV clients could trigger a
condition where peers would no longer be served
(https://github.com/btcsuite/btcd/issues/231)
- The RPC username and password can now contain symbols which previously
conflicted with special symbols used in URLs
- Improve handling of obtaining random nonces to prevent cases where it
could error when not enough entropy was available
- Improve handling of home directory creation errors such as in the case
of unmounted symlinks (https://github.com/btcsuite/btcd/issues/193)
- Improve the error reporting for rejected transactions to include the
inputs which are missing and/or being double spent
- Update sample config file with new options and correct a comment
regarding the fact the RPC server only listens on localhost by default
(https://github.com/btcsuite/btcd/issues/218)
- Update the continuous integration builds to run several tools which
help keep code quality high
- Significant amount of internal code cleanup and improvements
- Other minor internal optimizations
- Code Contributors (alphabetical order):
- Beldur
- Ben Holden-Crowther
- Dave Collins
- David Evans
- David Hill
- Guilherme Salgado
- Javed Khan
- Jimmy Song
- John C. Vernaleo
- Jonathan Gillham
- Josh Rickmar
- Michael Ford
- Michail Kargakis
- kac
- Olaoluwa Osuntokun
Changes in 0.9.0 (Sat Sep 20 2014)
- Protocol and network related changes:
- Add a new checkpoint at block height 319400
- Add support for BIP0037 bloom filters
(https://github.com/conformal/btcd/issues/132)
- Implement BIP0061 reject handling and hence support for protocol
version 70002 (https://github.com/conformal/btcd/issues/133)
- Add testnet DNS seeds for peer discovery (testnet-seed.alexykot.me
and testnet-seed.bitcoin.schildbach.de)
- Add mainnet DNS seed for peer discovery (seeds.bitcoin.open-nodes.org)
- Make multisig transactions with non-null dummy data nonstandard
(https://github.com/conformal/btcd/issues/131)
- Make transactions with an excessive number of signature operations
nonstandard
- Perform initial DNS lookups concurrently which allows connections
more quickly
- Improve the address manager to significantly reduce memory usage and
add tests
- Remove orphan transactions when they appear in a mined block
(https://github.com/conformal/btcd/issues/166)
- Apply incremental back off on connection retries for persistent peers
that give invalid replies to mirror the logic used for failed
connections (https://github.com/conformal/btcd/issues/103)
- Correct rate-limiting of free and low-fee transactions
- Mining support changes:
- Implement getblocktemplate RPC with the following support:
(https://github.com/conformal/btcd/issues/124)
- BIP0022 Non-Optional Sections
- BIP0022 Long Polling
- BIP0023 Basic Pool Extensions
- BIP0023 Mutation coinbase/append
- BIP0023 Mutations time, time/increment, and time/decrement
- BIP0023 Mutation transactions/add
- BIP0023 Mutations prevblock, coinbase, and generation
- BIP0023 Block Proposals
- Implement built-in concurrent CPU miner
(https://github.com/conformal/btcd/issues/137)
NOTE: CPU mining on mainnet is pointless. This has been provided
for testing purposes such as for the new simulation test network
- Add --generate flag to enable CPU mining
- Deprecate the --getworkkey flag in favor of --miningaddr which
specifies which addresses generated blocks will choose from to pay
the subsidy to
- RPC changes:
- Implement gettxout command
(https://github.com/conformal/btcd/issues/141)
- Implement validateaddress command
- Implement verifymessage command
- Mark getunconfirmedbalance RPC as wallet-only
- Mark getwalletinfo RPC as wallet-only
- Update getgenerate, setgenerate, gethashespersec, and getmininginfo
to return the appropriate information about new CPU mining status
- Modify getpeerinfo pingtime and pingwait field types to float64 so
they are compatible
- Improve disconnect handling for normal HTTP clients
- Make error code returns for invalid hex more consistent
- Websocket changes:
- Switch to a new more efficient websocket package
(https://github.com/conformal/btcd/issues/134)
- Add rescanfinished notification
- Modify the rescanprogress notification to include block hash as well
as height (https://github.com/conformal/btcd/issues/151)
- btcctl utility changes:
- Accept --simnet flag which automatically selects the appropriate port
and TLS certificates needed to communicate with btcd and btcwallet on
the simulation test network
- Fix createrawtransaction command to send amounts denominated in BTC
- Add estimatefee command
- Add estimatepriority command
- Add getmininginfo command
- Add getnetworkinfo command
- Add gettxout command
- Add lockunspent command
- Add signrawtransaction command
- addblock utility changes:
- Accept --simnet flag which automatically selects the appropriate port
and TLS certificates needed to communicate with btcd and btcwallet on
the simulation test network
- Notable developer-related package changes:
- Provide a new bloom package in btcutil which allows creating and
working with BIP0037 bloom filters
- Provide a new hdkeychain package in btcutil which allows working with
BIP0032 hierarchical deterministic key chains
- Introduce a new btcnet package which houses network parameters
- Provide new simnet network (--simnet) which is useful for private
simulation testing
- Enforce low S values in serialized signatures as detailed in BIP0062
- Return errors from all methods on the btcdb.Db interface
(https://github.com/conformal/btcdb/issues/5)
- Allow behavior flags to alter btcchain.ProcessBlock
(https://github.com/conformal/btcchain/issues/5)
- Provide a new SerializeSize API for blocks
(https://github.com/conformal/btcwire/issues/19)
- Several of the core packages now work with Google App Engine
- Misc changes:
- Correct an issue where the database could corrupt under certain
circumstances which would require a new chain download
- Slightly optimize deserialization
- Use the correct IP block for he.net
- Fix an issue where it was possible the block manager could hang on
shutdown
- Update sample config file so the comments are on a separate line
rather than the end of a line so they are not interpreted as settings
(https://github.com/conformal/btcd/issues/135)
- Correct an issue where getdata requests were not being properly
throttled which could lead to larger than necessary memory usage
- Always show help when given the help flag even when the config file
contains invalid entries
- General code cleanup and minor optimizations
Changes in 0.8.0-beta (Sun May 25 2014)
- Btcd is now Beta (https://github.com/conformal/btcd/issues/130)
- Add a new checkpoint at block height 300255
- Protocol and network related changes:
- Lower the minimum transaction relay fee to 1000 satoshi to match
recent reference client changes
(https://github.com/conformal/btcd/issues/100)
- Raise the maximum signature script size to support standard 15-of-15
multi-signature pay-to-sript-hash transactions with compressed pubkeys
to remain compatible with the reference client
(https://github.com/conformal/btcd/issues/128)
- Reduce max bytes allowed for a standard nulldata transaction to 40 for
compatibility with the reference client
- Introduce a new btcnet package which houses all of the network params
for each network (mainnet, testnet, regtest) to ultimately enable
easier addition and tweaking of networks without needing to change
several packages
- Fix several script discrepancies found by reference client test data
- Add new DNS seed for peer discovery (seed.bitnodes.io)
- Reduce the max known inventory cache from 20000 items to 1000 items
- Fix an issue where unknown inventory types could lead to a hung peer
- Implement inventory rebroadcast handler for sendrawtransaction
(https://github.com/conformal/btcd/issues/99)
- Update user agent to fully support BIP0014
(https://github.com/conformal/btcwire/issues/10)
- Implement initial mining support:
- Add a new logging subsystem for mining related operations
- Implement infrastructure for creating block templates
- Provide options to control block template creation settings
- Support the getwork RPC
- Allow address identifiers to apply to more than one network since both
testnet and the regression test network unfortunately use the same
identifier
- RPC changes:
- Set the content type for HTTP POST RPC connections to application/json
(https://github.com/conformal/btcd/issues/121)
- Modified the RPC server startup so it only requires at least one valid
listen interface
- Correct an error path where it was possible certain errors would not
be returned
- Implement getwork command
(https://github.com/conformal/btcd/issues/125)
- Update sendrawtransaction command to reject orphans
- Update sendrawtransaction command to include the reason a transaction
was rejected
- Update getinfo command to populate connection count field
- Update getinfo command to include relay fee field
(https://github.com/conformal/btcd/issues/107)
- Allow transactions submitted with sendrawtransaction to bypass the
rate limiter
- Allow the getcurrentnet and getbestblock extensions to be accessed via
HTTP POST in addition to Websockets
(https://github.com/conformal/btcd/issues/127)
- Websocket changes:
- Rework notifications to ensure they are delivered in the order they
occur
- Rename notifynewtxs command to notifyreceived (funds received)
- Rename notifyallnewtxs command to notifynewtransactions
- Rename alltx notification to txaccepted
- Rename allverbosetx notification to txacceptedverbose
(https://github.com/conformal/btcd/issues/98)
- Add rescan progress notification
- Add recvtx notification
- Add redeemingtx notification
- Modify notifyspent command to accept an array of outpoints
(https://github.com/conformal/btcd/issues/123)
- Significantly optimize the rescan command to yield up to a 60x speed
increase
- btcctl utility changes:
- Add createencryptedwallet command
- Add getblockchaininfo command
- Add importwallet command
- Add addmultisigaddress command
- Add setgenerate command
- Accept --testnet and --wallet flags which automatically select
the appropriate port and TLS certificates needed to communicate
with btcd and btcwallet (https://github.com/conformal/btcd/issues/112)
- Allow path expansion from config file entries
(https://github.com/conformal/btcd/issues/113)
- Minor refactor simplify handling of options
- addblock utility changes:
- Improve logging by making it consistent with the logging provided by
btcd (https://github.com/conformal/btcd/issues/90)
- Improve several package APIs for developers:
- Add new amount type for consistently handling monetary values
- Add new coin selector API
- Add new WIF (Wallet Import Format) API
- Add new crypto types for private keys and signatures
- Add new API to sign transactions including script merging and hash
types
- Expose function to extract all pushed data from a script
(https://github.com/conformal/btcscript/issues/8)
- Misc changes:
- Optimize address manager shuffling to do 67% less work on average
- Resolve a couple of benign data races found by the race detector
(https://github.com/conformal/btcd/issues/101)
- Add IP address to all peer related errors to clarify which peer is the
cause (https://github.com/conformal/btcd/issues/102)
- Fix a UPNP case issue that prevented the --upnp option from working
with some UPNP servers
- Update documentation in the sample config file regarding debug levels
- Adjust some logging levels to improve debug messages
- Improve the throughput of query messages to the block manager
- Several minor optimizations to reduce GC churn and enhance speed
- Other minor refactoring
- General code cleanup
Changes in 0.7.0 (Thu Feb 20 2014)
- Fix an issue when parsing scripts which contain a multi-signature script
which require zero signatures such as testnet block
000000001881dccfeda317393c261f76d09e399e15e27d280e5368420f442632
(https://github.com/conformal/btcscript/issues/7)
- Add check to ensure all transactions accepted to mempool only contain
canonical data pushes (https://github.com/conformal/btcscript/issues/6)
- Fix an issue causing excessive memory consumption
- Significantly rework and improve the websocket notification system:
- Each client is now independent so slow clients no longer limit the
speed of other connected clients
- Potentially long-running operations such as rescans are now run in
their own handler and rate-limited to one operation at a time without
preventing simultaneous requests from the same client for the faster
requests or notifications
- A couple of scenarios which could cause shutdown to hang have been
resolved
- Update notifynewtx notifications to support all address types instead
of only pay-to-pubkey-hash
- Provide a --rpcmaxwebsockets option to allow limiting the number of
concurrent websocket clients
- Add a new websocket command notifyallnewtxs to request notifications
(https://github.com/conformal/btcd/issues/86) (thanks @flammit)
- Improve btcctl utility in the following ways:
- Add getnetworkhashps command
- Add gettransaction command (wallet-specific)
- Add signmessage command (wallet-specific)
- Update getwork command to accept
- Continue cleanup and work on implementing the RPC API:
- Implement getnettotals command
(https://github.com/conformal/btcd/issues/84)
- Implement networkhashps command
(https://github.com/conformal/btcd/issues/87)
- Update getpeerinfo to always include syncnode field even when false
- Remove help addenda for getpeerinfo now that it supports all fields
- Close standard RPC connections on auth failure
- Provide a --rpcmaxclients option to allow limiting the number of
concurrent RPC clients (https://github.com/conformal/btcd/issues/68)
- Include IP address in RPC auth failure log messages
- Resolve a rather harmless data races found by the race detector
(https://github.com/conformal/btcd/issues/94)
- Increase block priority size and max standard transaction size to 50k
and 100k, respectively (https://github.com/conformal/btcd/issues/71)
- Add rate limiting of free transactions to the memory pool to prevent
penny flooding (https://github.com/conformal/btcd/issues/40)
- Provide a --logdir option (https://github.com/conformal/btcd/issues/95)
- Change the default log file path to include the network
- Add a new ScriptBuilder interface to btcscript to support creation of
custom scripts (https://github.com/conformal/btcscript/issues/5)
- General code cleanup
Changes in 0.6.0 (Tue Feb 04 2014)
- Fix an issue when parsing scripts which contain invalid signatures that
caused a chain fork on block
0000000000000001e4241fd0b3469a713f41c5682605451c05d3033288fb2244
- Correct an issue which could lead to an error in removeBlockNode
(https://github.com/conformal/btcchain/issues/4)
- Improve addblock utility as follows:
- Check imported blocks against all chain rules and checkpoints
- Skip blocks which are already known so you can stop and restart the
import or start the import after you have already downloaded a portion
of the chain
- Correct an issue where the utility did not shutdown cleanly after
processing all blocks
- Add error on attempt to import orphan blocks
- Improve error handling and reporting
- Display statistics after input file has been fully processed
- Rework, optimize, and improve headers-first mode:
- Resuming the chain sync from any point before the final checkpoint
will now use headers-first mode
(https://github.com/conformal/btcd/issues/69)
- Verify all checkpoints as opposed to only the final one
- Reduce and bound memory usage
- Rollback to the last known good point when a header does not match a
checkpoint
- Log information about what is happening with headers
- Improve btcctl utility in the following ways:
- Add getaddednodeinfo command
- Add getnettotals command
- Add getblocktemplate command (wallet-specific)
- Add getwork command (wallet-specific)
- Add getnewaddress command (wallet-specific)
- Add walletpassphrasechange command (wallet-specific)
- Add walletlock command (wallet-specific)
- Add sendfrom command (wallet-specific)
- Add sendmany command (wallet-specific)
- Add settxfee command (wallet-specific)
- Add listsinceblock command (wallet-specific)
- Add listaccounts command (wallet-specific)
- Add keypoolrefill command (wallet-specific)
- Add getreceivedbyaccount command (wallet-specific)
- Add getrawchangeaddress command (wallet-specific)
- Add gettxoutsetinfo command (wallet-specific)
- Add listaddressgroupings command (wallet-specific)
- Add listlockunspent command (wallet-specific)
- Add listlock command (wallet-specific)
- Add listreceivedbyaccount command (wallet-specific)
- Add validateaddress command (wallet-specific)
- Add verifymessage command (wallet-specific)
- Add sendtoaddress command (wallet-specific)
- Continue cleanup and work on implementing the RPC API:
- Implement submitblock command
(https://github.com/conformal/btcd/issues/61)
- Implement help command
- Implement ping command
- Implement getaddednodeinfo command
(https://github.com/conformal/btcd/issues/78)
- Implement getinfo command
- Update getpeerinfo to support bytesrecv and bytessent
(https://github.com/conformal/btcd/issues/83)
- Improve and correct several RPC server and websocket areas:
- Change the connection endpoint for websockets from /wallet to /ws
(https://github.com/conformal/btcd/issues/80)
- Implement an alternative authentication for websockets so clients
such as javascript from browsers that don't support setting HTTP
headers can authenticate (https://github.com/conformal/btcd/issues/77)
- Add an authentication deadline for RPC connections
(https://github.com/conformal/btcd/issues/68)
- Use standard authentication failure responses for RPC connections
- Make automatically generated certificate more standard so it works
from client such as node.js and Firefox
- Correct some minor issues which could prevent the RPC server from
shutting down in an orderly fashion
- Make all websocket notifications require registration
- Change the data sent over websockets to text since it is JSON-RPC
- Allow connections that do not have an Origin header set
- Expose and track the number of bytes read and written per peer
(https://github.com/conformal/btcwire/issues/6)
- Correct an issue with sendrawtransaction when invoked via websockets
which prevented a minedtx notification from being added
- Rescan operations issued from remote wallets are no stopped when
the wallet disconnects mid-operation
(https://github.com/conformal/btcd/issues/66)
- Several optimizations related to fetching block information from the
database
- General code cleanup
Changes in 0.5.0 (Mon Jan 13 2014)
- Optimize initial block download by introducing a new mode which
downloads the block headers first (up to the final checkpoint)
- Improve peer handling to remove the potential for slow peers to cause
sluggishness amongst all peers
(https://github.com/conformal/btcd/issues/63)
- Fix an issue where the initial block sync could stall when the sync peer
disconnects (https://github.com/conformal/btcd/issues/62)
- Correct an issue where --externalip was doing a DNS lookup on the full
host:port instead of just the host portion
(https://github.com/conformal/btcd/issues/38)
- Fix an issue which could lead to a panic on chain switches
(https://github.com/conformal/btcd/issues/70)
- Improve btcctl utility in the following ways:
- Show getdifficulty output as floating point to 6 digits of precision
- Show all JSON object replies formatted as standard JSON
- Allow btcctl getblock to accept optional params
- Add getaccount command (wallet-specific)
- Add getaccountaddress command (wallet-specific)
- Add sendrawtransaction command
- Continue cleanup and work on implementing RPC API calls
- Update getrawmempool to support new optional verbose flag
- Update getrawtransaction to match the reference client
- Update getblock to support new optional verbose flag
- Update raw transactions to fully match the reference client including
support for all transaction types and address types
- Correct getrawmempool fee field to return BTC instead of Satoshi
- Correct getpeerinfo service flag to return 8 digit string so it
matches the reference client
- Correct verifychain to return a boolean
- Implement decoderawtransaction command
- Implement createrawtransaction command
- Implement decodescript command
- Implement gethashespersec command
- Allow RPC handler overrides when invoked via a websocket versus
legacy connection
- Add new DNS seed for peer discovery
- Display user agent on new valid peer log message
(https://github.com/conformal/btcd/issues/64)
- Notify wallet when new transactions that pay to registered addresses
show up in the mempool before being mined into a block
- Support a tor-specific proxy in addition to a normal proxy
(https://github.com/conformal/btcd/issues/47)
- Remove deprecated sqlite3 imports from utilities
- Remove leftover profile write from addblock utility
- Quite a bit of code cleanup and refactoring to improve maintainability
Changes in 0.4.0 (Thu Dec 12 2013)
- Allow listen interfaces to be specified via --listen instead of only the
port (https://github.com/conformal/btcd/issues/33)
- Allow listen interfaces for the RPC server to be specified via
--rpclisten instead of only the port
(https://github.com/conformal/btcd/issues/34)
- Only disable listening when --connect or --proxy are used when no
--listen interface are specified
(https://github.com/conformal/btcd/issues/10)
- Add several new standard transaction checks to transaction memory pool:
- Support nulldata scripts as standard
- Only allow a max of one nulldata output per transaction
- Enforce a maximum of 3 public keys in multi-signature transactions
- The number of signatures in multi-signature transactions must not
exceed the number of public keys
- The number of inputs to a signature script must match the expected
number of inputs for the script type
- The number of inputs pushed onto the stack by a redeeming signature
script must match the number of inputs consumed by the referenced
public key script
- When a block is connected, remove any transactions from the memory pool
which are now double spends as a result of the newly connected
transactions
- Don't relay transactions resurrected during a chain switch since
other peers will also be switching chains and therefore already know
about them
- Cleanup a few cases where rejected transactions showed as an error
rather than as a rejected transaction
- Ignore the default configuration file when --regtest (regression test
mode) is specified
- Implement TLS support for RPC including automatic certificate generation
- Support HTTP authentication headers for web sockets
- Update address manager to recognize and properly work with Tor
addresses (https://github.com/conformal/btcd/issues/36) and
(https://github.com/conformal/btcd/issues/37)
- Improve btcctl utility in the following ways:
- Add the ability to specify a configuration file
- Add a default entry for the RPC cert to point to the location
it will likely be in the btcd home directory
- Implement --version flag
- Provide a --notls option to support non-TLS configurations
- Fix a couple of minor races found by the Go race detector
- Improve logging
- Allow logging level to be specified on a per subsystem basis
(https://github.com/conformal/btcd/issues/48)
- Allow logging levels to be dynamically changed via RPC
(https://github.com/conformal/btcd/issues/15)
- Implement a rolling log file with a max of 10MB per file and a
rotation size of 3 which results in a max logging size of 30 MB
- Correct a minor issue with the rescanning websocket call
(https://github.com/conformal/btcd/issues/54)
- Fix a race with pushing address messages that could lead to a panic
(https://github.com/conformal/btcd/issues/58)
- Improve which external IP address is reported to peers based on which
interface they are connected through
(https://github.com/conformal/btcd/issues/35)
- Add --externalip option to allow an external IP address to be specified
for cases such as tor hidden services or advanced network configurations
(https://github.com/conformal/btcd/issues/38)
- Add --upnp option to support automatic port mapping via UPnP
(https://github.com/conformal/btcd/issues/51)
- Update Ctrl+C interrupt handler to properly sync address manager and
remove the UPnP port mapping (if needed)
- Continue cleanup and work on implementing RPC API calls
- Add importprivkey (import private key) command to btcctl
- Update getrawtransaction to provide addresses properly, support
new verbose param, and match the reference implementation with the
exception of MULTISIG (thanks @flammit)
- Update getblock with new verbose flag (thanks @flammit)
- Add listtransactions command to btcctl
- Add getbalance command to btcctl
- Add basic support for btcd to run as a native Windows service
(https://github.com/conformal/btcd/issues/42)
- Package addblock utility with Windows MSIs
- Add support for TravisCI (continuous build integration)
- Cleanup some documentation and usage
- Several other minor bug fixes and general code cleanup
Changes in 0.3.3 (Wed Nov 13 2013)
- Significantly improve initial block chain download speed
(https://github.com/conformal/btcd/issues/20)
- Add a new checkpoint at block height 267300
- Optimize most recently used inventory handling
(https://github.com/conformal/btcd/issues/21)
- Optimize duplicate transaction input check
(https://github.com/conformal/btcchain/issues/2)
- Optimize transaction hashing
(https://github.com/conformal/btcd/issues/25)
- Rework and optimize wallet listener notifications
(https://github.com/conformal/btcd/issues/22)
- Optimize serialization and deserialization
(https://github.com/conformal/btcd/issues/27)
- Add support for minimum transaction fee to memory pool acceptance
(https://github.com/conformal/btcd/issues/29)
- Improve leveldb database performance by removing explicit GC call
- Fix an issue where Ctrl+C was not always finishing orderly database
shutdown
- Fix an issue in the script handling for OP_CHECKSIG
- Impose max limits on all variable length protocol entries to prevent
abuse from malicious peers
- Enforce DER signatures for transactions allowed into the memory pool
- Separate the debug profile http server from the RPC server
- Rework of the RPC code to improve performance and make the code cleaner
- The getrawtransaction RPC call now properly checks the memory pool
before consulting the db (https://github.com/conformal/btcd/issues/26)
- Add support for the following RPC calls: getpeerinfo, getconnectedcount,
addnode, verifychain
(https://github.com/conformal/btcd/issues/13)
(https://github.com/conformal/btcd/issues/17)
- Implement rescan websocket extension to allow wallet rescans
- Use correct paths for application data storage for all supported
operating systems (https://github.com/conformal/btcd/issues/30)
- Add a default redirect to the http profiling page when accessing the
http profile server
- Add a new --cpuprofile option which can be used to generate CPU
profiling data on platforms that support it
- Several other minor performance optimizations
- Other minor bug fixes and general code cleanup
Changes in 0.3.2 (Tue Oct 22 2013)
- Fix an issue that could cause the download of the block chain to stall
(https://github.com/conformal/btcd/issues/12)
- Remove deprecated sqlite as an available database backend
- Close sqlite compile issue as sqlite has now been removed
(https://github.com/conformal/btcd/issues/11)
- Change default RPC ports to 8334 (mainnet) and 18334 (testnet)
- Continue cleanup and work on implementing RPC API calls
- Add support for the following RPC calls: getrawmempool,
getbestblockhash, decoderawtransaction, getdifficulty,
getconnectioncount, getpeerinfo, and addnode
- Improve the btcctl utility that is used to issue JSON-RPC commands
- Fix an issue preventing btcd from cleanly shutting down with the RPC
stop command
- Add a number of database interface tests to ensure backends implement
the expected interface
- Expose some additional information from btcscript to be used for
identifying "standard"" transactions
- Add support for plan9 - thanks @mischief
(https://github.com/conformal/btcd/pull/19)
- Other minor bug fixes and general code cleanup
Changes in 0.3.1-alpha (Tue Oct 15 2013)
- Change default database to leveldb
NOTE: This does mean you will have to redownload the block chain. Since we
are still in alpha, we didn't feel writing a converter was worth the time as
it would take away from more important issues at this stage
- Add a warning if there are multiple block chain databases of different types
- Fix issue with unexpected EOF in leveldb -- https://github.com/conformal/btcd/issues/18
- Fix issue preventing block 21066 on testnet -- https://github.com/conformal/btcchain/issues/1
- Fix issue preventing block 96464 on testnet -- https://github.com/conformal/btcscript/issues/1
- Optimize transaction lookups
- Correct a few cases of list removal that could result in improper cleanup
of no longer needed orphans
- Add functionality to increase ulimits on non-Windows platforms
- Add support for mempool command which allows remote peers to query the
transaction memory pool via the bitcoin protocol
- Clean up logging a bit
- Add a flag to disable checkpoints for developers
- Add a lot of useful debug logging such as message summaries
- Other minor bug fixes and general code cleanup
Initial Release 0.3.0-alpha (Sat Oct 05 2013):
- Initial release

10
Jenkinsfile vendored
View File

@@ -1,10 +0,0 @@
node {
stage 'Checkout'
checkout scm
stage 'Version'
sh './deploy.sh version'
stage 'Build'
sh "./deploy.sh build"
}

View File

@@ -1,8 +1,9 @@
ISC License
Copyright (c) 2018-2019 DAGLabs
Copyright (c) 2018-2019 The kaspanet developers
Copyright (c) 2013-2018 The btcsuite developers
Copyright (c) 2015-2016 The Decred developers
Copyright (c) 2013-2014 Conformal Systems LLC.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above

115
README.md
View File

@@ -1,49 +1,24 @@
btcd
Kaspad
====
Warning: This is pre-alpha software. There's no guarantee anything works.
====
[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad)
Kaspad is the reference full node Kaspa implementation written in Go (golang).
This project is currently under active development and is in a Beta state. It
is extremely stable and has been in production use since October 2013.
It properly downloads, validates, and serves the block chain using the exact
rules (including consensus bugs) for block acceptance as Bitcoin Core. We have
taken great care to avoid btcd causing a fork to the block chain. It includes a
full block validation testing framework which contains all of the 'official'
block acceptance tests (and some additional ones) that is run on every pull
request to help ensure it properly follows consensus. Also, it passes all of
the JSON test data in the Bitcoin Core code.
It also properly relays newly mined blocks, maintains a transaction pool, and
relays individual transactions that have not yet made it into a block. It
ensures all individual transactions admitted to the pool follow the rules
required by the block chain and also includes more strict checks which filter
transactions based on miner requirements ("standard" transactions).
One key difference between btcd and Bitcoin Core is that btcd does *NOT* include
wallet functionality and this was a very intentional design decision. See the
blog entry [here](https://blog.conformal.com/btcd-not-your-moms-bitcoin-daemon)
for more details. This means you can't actually make or receive payments
directly with btcd. That functionality is provided by the
[btcwallet](https://github.com/btcsuite/btcwallet) and
[Paymetheus](https://github.com/btcsuite/Paymetheus) (Windows-only) projects
which are both under active development.
This project is currently under active development and is in a pre-Alpha state.
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
## Requirements
[Go](http://golang.org) 1.8 or newer.
Latest version of [Go](http://golang.org) (currently 1.13).
## Installation
#### Windows - MSI Available
https://github.com/kaspanet/kaspad/releases
#### Linux/BSD/MacOSX/POSIX - Build from Source
#### Build from Source
- Install Go according to the installation instructions here:
http://golang.org/doc/install
@@ -55,62 +30,39 @@ $ go version
$ go env GOROOT GOPATH
```
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
recommended that `GOPATH` is set to a directory in your home directory such as
`~/goprojects` to avoid write permission issues. It is also recommended to add
`~/dev/go` to avoid write permission issues. It is also recommended to add
`$GOPATH/bin` to your `PATH` at this point.
- Run the following commands to obtain btcd, all dependencies, and install it:
- Run the following commands to obtain and install kaspad including all dependencies:
```bash
$ # Install dep: https://golang.github.io/dep/docs/installation.html
$ git clone https://github.com/kaspanet/kaspad $GOPATH/src/github.com/kaspanet/kaspad
$ cd $GOPATH/src/github.com/kaspanet/kaspad
$ dep ensure
$ ./test.sh
$ go install . ./cmd/...
```
`./test.sh` tests can be skipped, but some things might not run correctly on your system if tests fail.
- btcd (and utilities) will now be installed in ```$GOPATH/bin```. If you did
- Kaspad (and utilities) should now be installed in `$GOPATH/bin`. If you did
not already add the bin directory to your system path during Go installation,
we recommend you do so now.
you are encouraged to do so now.
## Updating
#### Windows
Install a newer MSI
#### Linux/BSD/MacOSX/POSIX - Build from Source
- Run the following commands to update btcd, all dependencies, and install it:
```bash
$ cd $GOPATH/src/github.com/kaspanet/kaspad
$ git pull && dep ensure
$ go install . ./cmd/...
```
## Getting Started
btcd has several configuration options available to tweak how it runs, but all
of the basic operations described in the intro section work with zero
configuration.
#### Windows (Installed from MSI)
Launch btcd from your Start menu.
Kaspad has several configuration options available to tweak how it runs, but all
of the basic operations work with zero configuration.
#### Linux/BSD/POSIX/Source
```bash
$ ./btcd
$ ./kaspad
```
## IRC
- irc.freenode.net
- channel #btcd
- [webchat](https://webchat.freenode.net/?channels=btcd)
## Discord
Join our discord server using the following link: https://discord.gg/WmGhhzk
## Issue Tracker
@@ -119,28 +71,9 @@ is used for this project.
## Documentation
The documentation is a work-in-progress. It is located in the [docs](https://github.com/kaspanet/kaspad/tree/master/docs) folder.
## GPG Verification Key
All official release tags are signed by Conformal so users can ensure the code
has not been tampered with and is coming from the btcsuite developers. To
verify the signature perform the following:
- Download the Conformal public key:
https://raw.githubusercontent.com/btcsuite/btcd/master/release/GIT-GPG-KEY-conformal.txt
- Import the public key into your GPG keyring:
```bash
gpg --import GIT-GPG-KEY-conformal.txt
```
- Verify the release tag with the following command where `TAG_NAME` is a
placeholder for the specific tag:
```bash
git tag -v TAG_NAME
```
The documentation is a work-in-progress. It is located in the [docs](https://github.com/kaspanet/kaspad/tree/master/docs) folder.
## License
btcd is licensed under the [copyfree](http://copyfree.org) ISC License.
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
#!/bin/sh
# This script uses gocov to generate a test coverage report.
# The gocov tool my be obtained with the following command:
# go get github.com/axw/gocov/gocov
#
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
# Check for gocov.
type gocov >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo >&2 "This script requires the gocov tool."
echo >&2 "You may obtain it with the following command:"
echo >&2 "go get github.com/axw/gocov/gocov"
exit 1
fi
gocov test | gocov report

View File

@@ -1,38 +0,0 @@
// Copyright (c) 2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package addrmgr implements concurrency safe Bitcoin address manager.
Address Manager Overview
In order maintain the peer-to-peer Bitcoin network, there needs to be a source
of addresses to connect to as nodes come and go. The Bitcoin protocol provides
the getaddr and addr messages to allow peers to communicate known addresses with
each other. However, there needs to a mechanism to store those results and
select peers from them. It is also important to note that remote peers can't
be trusted to send valid peers nor attempt to provide you with only peers they
control with malicious intent.
With that in mind, this package provides a concurrency safe address manager for
caching and selecting peers in a non-deterministic manner. The general idea is
the caller adds addresses to the address manager and notifies it when addresses
are connected, known good, and attempted. The caller also requests addresses as
it needs them.
The address manager internally segregates the addresses into groups and
non-deterministically selects groups in a cryptographically random manner. This
reduce the chances multiple addresses from the same nets are selected which
generally helps provide greater peer diversity, and perhaps more importantly,
drastically reduces the chances an attacker is able to coerce your peer into
only connecting to nodes they control.
The address manager also understands routability and Tor addresses and tries
hard to only return routable addresses. In addition, it uses the information
provided by the caller about connected, known good, and attempted addresses to
periodically purge peers which no longer appear to be good peers as well as
bias the selection toward known good peers. The general idea is to make a best
effort at only providing usable addresses.
*/
package addrmgr

View File

@@ -1,25 +0,0 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addrmgr
import (
"time"
"github.com/kaspanet/kaspad/wire"
)
func TstKnownAddressIsBad(ka *KnownAddress) bool {
return ka.isBad()
}
func TstKnownAddressChance(ka *KnownAddress) float64 {
return ka.chance()
}
func TstNewKnownAddress(na *wire.NetAddress, attempts int,
lastattempt, lastsuccess time.Time, tried bool, refs int) *KnownAddress {
return &KnownAddress{na: na, attempts: attempts, lastattempt: lastattempt,
lastsuccess: lastsuccess, tried: tried, refs: refs}
}

View File

@@ -1,114 +0,0 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addrmgr_test
import (
"math"
"testing"
"time"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/wire"
)
func TestChance(t *testing.T) {
now := time.Unix(time.Now().Unix(), 0)
var tests = []struct {
addr *addrmgr.KnownAddress
expected float64
}{
{
//Test normal case
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1.0,
}, {
//Test case in which lastseen < 0
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)},
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1.0,
}, {
//Test case in which lastattempt < 0
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(30*time.Minute), time.Now(), false, 0),
1.0 * .01,
}, {
//Test case in which lastattempt < ten minutes
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(-5*time.Minute), time.Now(), false, 0),
1.0 * .01,
}, {
//Test case with several failed attempts.
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
2, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1 / 1.5 / 1.5,
},
}
err := .0001
for i, test := range tests {
chance := addrmgr.TstKnownAddressChance(test.addr)
if math.Abs(test.expected-chance) >= err {
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
}
}
}
func TestIsBad(t *testing.T) {
now := time.Unix(time.Now().Unix(), 0)
future := now.Add(35 * time.Minute)
monthOld := now.Add(-43 * time.Hour * 24)
secondsOld := now.Add(-2 * time.Second)
minutesOld := now.Add(-27 * time.Minute)
hoursOld := now.Add(-5 * time.Hour)
zeroTime := time.Time{}
futureNa := &wire.NetAddress{Timestamp: future}
minutesOldNa := &wire.NetAddress{Timestamp: minutesOld}
monthOldNa := &wire.NetAddress{Timestamp: monthOld}
currentNa := &wire.NetAddress{Timestamp: secondsOld}
//Test addresses that have been tried in the last minute.
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
}
//Test address that claims to be from the future.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
}
//Test address that has not been seen in over a month.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 7: addresses more than a month old are bad.")
}
//It has failed at least three times and never succeeded.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
t.Errorf("test case 8: addresses that have never succeeded are bad.")
}
//It has failed ten times in the last week
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
}
//Test an address that should work.
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 10: This should be a valid address.")
}
}

View File

@@ -1,62 +0,0 @@
github.com/conformal/btcd/addrmgr/network.go GroupKey 100.00% (23/23)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.reset 100.00% (6/6)
github.com/conformal/btcd/addrmgr/network.go IsRFC5737 100.00% (4/4)
github.com/conformal/btcd/addrmgr/network.go IsRFC1918 100.00% (4/4)
github.com/conformal/btcd/addrmgr/addrmanager.go New 100.00% (3/3)
github.com/conformal/btcd/addrmgr/addrmanager.go NetAddressKey 100.00% (2/2)
github.com/conformal/btcd/addrmgr/network.go IsRFC4862 100.00% (1/1)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.numAddresses 100.00% (1/1)
github.com/conformal/btcd/addrmgr/log.go init 100.00% (1/1)
github.com/conformal/btcd/addrmgr/log.go DisableLog 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go ipNet 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsIPv4 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsLocal 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsOnionCatTor 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC2544 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC3849 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC3927 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC3964 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC4193 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC4380 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC4843 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC6052 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC6145 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC6598 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsValid 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRoutable 100.00% (1/1)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetBestLocalAddress 94.74% (18/19)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddLocalAddress 90.91% (10/11)
github.com/conformal/btcd/addrmgr/addrmanager.go getReachabilityFrom 51.52% (17/33)
github.com/conformal/btcd/addrmgr/addrmanager.go ipString 50.00% (2/4)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetAddress 9.30% (4/43)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.deserializePeers 0.00% (0/50)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Good 0.00% (0/44)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.savePeers 0.00% (0/39)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.updateAddress 0.00% (0/30)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.expireNew 0.00% (0/22)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddressCache 0.00% (0/16)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.HostToNetAddress 0.00% (0/15)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getNewBucket 0.00% (0/15)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddressByIP 0.00% (0/14)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getTriedBucket 0.00% (0/14)
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.chance 0.00% (0/13)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.loadPeers 0.00% (0/11)
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.isBad 0.00% (0/11)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Connected 0.00% (0/10)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.addressHandler 0.00% (0/9)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.pickTried 0.00% (0/8)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.DeserializeNetAddress 0.00% (0/7)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Stop 0.00% (0/7)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Attempt 0.00% (0/7)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Start 0.00% (0/6)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddresses 0.00% (0/4)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NeedMoreAddresses 0.00% (0/3)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NumAddresses 0.00% (0/3)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddress 0.00% (0/3)
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.LastAttempt 0.00% (0/1)
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.NetAddress 0.00% (0/1)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.find 0.00% (0/1)
github.com/conformal/btcd/addrmgr/log.go UseLogger 0.00% (0/1)
github.com/conformal/btcd/addrmgr --------------------------------- 21.04% (113/537)

247
app/app.go Normal file
View File

@@ -0,0 +1,247 @@
package app
import (
"fmt"
"sync/atomic"
"github.com/kaspanet/kaspad/network/addressmanager"
"github.com/kaspanet/kaspad/network/netadapter/id"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain/blockdag/indexers"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/domain/mining"
"github.com/kaspanet/kaspad/domain/txscript"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/dbaccess"
"github.com/kaspanet/kaspad/infrastructure/signal"
"github.com/kaspanet/kaspad/network/connmanager"
"github.com/kaspanet/kaspad/network/dnsseed"
"github.com/kaspanet/kaspad/network/domainmessage"
"github.com/kaspanet/kaspad/network/netadapter"
"github.com/kaspanet/kaspad/network/protocol"
"github.com/kaspanet/kaspad/network/rpc"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/panics"
)
// App is a wrapper for all the kaspad services
type App struct {
cfg *config.Config
rpcServer *rpc.Server
addressManager *addressmanager.AddressManager
protocolManager *protocol.Manager
connectionManager *connmanager.ConnectionManager
netAdapter *netadapter.NetAdapter
started, shutdown int32
}
// Start launches all the kaspad services.
func (a *App) Start() {
// Already started?
if atomic.AddInt32(&a.started, 1) != 1 {
return
}
log.Trace("Starting kaspad")
err := a.protocolManager.Start()
if err != nil {
panics.Exit(log, fmt.Sprintf("Error starting the p2p protocol: %+v", err))
}
a.maybeSeedFromDNS()
a.connectionManager.Start()
if !a.cfg.DisableRPC {
a.rpcServer.Start()
}
}
// Stop gracefully shuts down all the kaspad services.
func (a *App) Stop() {
// Make sure this only happens once.
if atomic.AddInt32(&a.shutdown, 1) != 1 {
log.Infof("Kaspad is already in the process of shutting down")
return
}
log.Warnf("Kaspad shutting down")
a.connectionManager.Stop()
err := a.protocolManager.Stop()
if err != nil {
log.Errorf("Error stopping the p2p protocol: %+v", err)
}
// Shutdown the RPC server if it's not disabled.
if !a.cfg.DisableRPC {
err := a.rpcServer.Stop()
if err != nil {
log.Errorf("Error stopping rpcServer: %+v", err)
}
}
err = a.addressManager.Stop()
if err != nil {
log.Errorf("Error stopping address manager: %s", err)
}
return
}
// New returns a new App instance configured to listen on addr for the
// kaspa network type specified by dagParams. Use start to begin accepting
// connections from peers.
func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{}) (*App, error) {
indexManager, acceptanceIndex := setupIndexes(cfg)
sigCache := txscript.NewSigCache(cfg.SigCacheMaxSize)
// Create a new block DAG instance with the appropriate configuration.
dag, err := setupDAG(cfg, databaseContext, interrupt, sigCache, indexManager)
if err != nil {
return nil, err
}
txMempool := setupMempool(cfg, dag, sigCache)
netAdapter, err := netadapter.NewNetAdapter(cfg)
if err != nil {
return nil, err
}
addressManager := addressmanager.New(cfg, databaseContext)
connectionManager, err := connmanager.New(cfg, netAdapter, addressManager)
if err != nil {
return nil, err
}
protocolManager, err := protocol.NewManager(cfg, dag, netAdapter, addressManager, txMempool, connectionManager)
if err != nil {
return nil, err
}
rpcServer, err := setupRPC(
cfg, dag, txMempool, sigCache, acceptanceIndex, connectionManager, addressManager, protocolManager)
if err != nil {
return nil, err
}
return &App{
cfg: cfg,
rpcServer: rpcServer,
protocolManager: protocolManager,
connectionManager: connectionManager,
netAdapter: netAdapter,
addressManager: addressManager,
}, nil
}
func (a *App) maybeSeedFromDNS() {
if !a.cfg.DisableDNSSeed {
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, domainmessage.SFNodeNetwork, false, nil,
a.cfg.Lookup, func(addresses []*domainmessage.NetAddress) {
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
// IPs of nodes and not its own IP, we can not know real IP of
// source. So we'll take first returned address as source.
a.addressManager.AddAddresses(addresses, addresses[0], nil)
})
}
}
func setupDAG(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{},
sigCache *txscript.SigCache, indexManager blockdag.IndexManager) (*blockdag.BlockDAG, error) {
dag, err := blockdag.New(&blockdag.Config{
Interrupt: interrupt,
DatabaseContext: databaseContext,
DAGParams: cfg.NetParams(),
TimeSource: blockdag.NewTimeSource(),
SigCache: sigCache,
IndexManager: indexManager,
SubnetworkID: cfg.SubnetworkID,
})
return dag, err
}
func setupIndexes(cfg *config.Config) (blockdag.IndexManager, *indexers.AcceptanceIndex) {
// Create indexes if needed.
var indexes []indexers.Indexer
var acceptanceIndex *indexers.AcceptanceIndex
if cfg.AcceptanceIndex {
log.Info("acceptance index is enabled")
acceptanceIndex = indexers.NewAcceptanceIndex()
indexes = append(indexes, acceptanceIndex)
}
// Create an index manager if any of the optional indexes are enabled.
if len(indexes) < 0 {
return nil, nil
}
indexManager := indexers.NewManager(indexes)
return indexManager, acceptanceIndex
}
func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript.SigCache) *mempool.TxPool {
mempoolConfig := mempool.Config{
Policy: mempool.Policy{
AcceptNonStd: cfg.RelayNonStd,
MaxOrphanTxs: cfg.MaxOrphanTxs,
MaxOrphanTxSize: config.DefaultMaxOrphanTxSize,
MinRelayTxFee: cfg.MinRelayTxFee,
MaxTxVersion: 1,
},
CalcSequenceLockNoLock: func(tx *util.Tx, utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
return dag.CalcSequenceLockNoLock(tx, utxoSet)
},
SigCache: sigCache,
DAG: dag,
}
return mempool.New(&mempoolConfig)
}
func setupRPC(cfg *config.Config,
dag *blockdag.BlockDAG,
txMempool *mempool.TxPool,
sigCache *txscript.SigCache,
acceptanceIndex *indexers.AcceptanceIndex,
connectionManager *connmanager.ConnectionManager,
addressManager *addressmanager.AddressManager,
protocolManager *protocol.Manager) (*rpc.Server, error) {
if !cfg.DisableRPC {
policy := mining.Policy{
BlockMaxMass: cfg.BlockMaxMass,
}
blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy, txMempool, dag, sigCache)
rpcServer, err := rpc.NewRPCServer(cfg, dag, txMempool, acceptanceIndex, blockTemplateGenerator,
connectionManager, addressManager, protocolManager)
if err != nil {
return nil, err
}
// Signal process shutdown when the RPC server requests it.
spawn("setupRPC-handleShutdownRequest", func() {
<-rpcServer.RequestedProcessShutdown()
signal.ShutdownRequestChannel <- struct{}{}
})
return rpcServer, nil
}
return nil, nil
}
// P2PNodeID returns the network ID associated with this App
func (a *App) P2PNodeID() *id.ID {
return a.netAdapter.ID()
}
// AddressManager returns the AddressManager associated with this App
func (a *App) AddressManager() *addressmanager.AddressManager {
return a.addressManager
}

14
app/log.go Normal file
View File

@@ -0,0 +1,14 @@
// Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2017 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package app
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.KASD)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -1,103 +0,0 @@
blockchain
==========
[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/blockchain)
Package blockchain implements bitcoin block handling and chain selection rules.
The test coverage is currently only around 60%, but will be increasing over
time. See `test_coverage.txt` for the gocov coverage report. Alternatively, if
you are running a POSIX OS, you can run the `cov_report.sh` script for a
real-time report. Package blockchain is licensed under the liberal ISC license.
There is an associated blog post about the release of this package
[here](https://blog.conformal.com/btcchain-the-bitcoin-chain-package-from-bctd/).
This package has intentionally been designed so it can be used as a standalone
package for any projects needing to handle processing of blocks into the bitcoin
block chain.
## Installation and Updating
```bash
$ go get -u github.com/kaspanet/kaspad/blockchain
```
## Bitcoin Chain Processing Overview
Before a block is allowed into the block chain, it must go through an intensive
series of validation rules. The following list serves as a general outline of
those rules to provide some intuition into what is going on under the hood, but
is by no means exhaustive:
- Reject duplicate blocks
- Perform a series of sanity checks on the block and its transactions such as
verifying proof of work, timestamps, number and character of transactions,
transaction amounts, script complexity, and merkle root calculations
- Compare the block against predetermined checkpoints for expected timestamps
and difficulty based on elapsed time since the checkpoint
- Save the most recent orphan blocks for a limited time in case their parent
blocks become available
- Stop processing if the block is an orphan as the rest of the processing
depends on the block's position within the block chain
- Perform a series of more thorough checks that depend on the block's position
within the block chain such as verifying block difficulties adhere to
difficulty retarget rules, timestamps are after the median of the last
several blocks, all transactions are finalized, checkpoint blocks match, and
block versions are in line with the previous blocks
- Determine how the block fits into the chain and perform different actions
accordingly in order to ensure any side chains which have higher difficulty
than the main chain become the new main chain
- When a block is being connected to the main chain (either through
reorganization of a side chain to the main chain or just extending the
main chain), perform further checks on the block's transactions such as
verifying transaction duplicates, script complexity for the combination of
connected scripts, coinbase maturity, double spends, and connected
transaction values
- Run the transaction scripts to verify the spender is allowed to spend the
coins
- Insert the block into the block database
## Examples
* [ProcessBlock Example](http://godoc.org/github.com/kaspanet/kaspad/blockchain#example-BlockChain-ProcessBlock)
Demonstrates how to create a new chain instance and use ProcessBlock to
attempt to add a block to the chain. This example intentionally
attempts to insert a duplicate genesis block to illustrate how an invalid
block is handled.
* [CompactToBig Example](http://godoc.org/github.com/kaspanet/kaspad/blockchain#example-CompactToBig)
Demonstrates how to convert the compact "bits" in a block header which
represent the target difficulty to a big integer and display it using the
typical hex notation.
* [BigToCompact Example](http://godoc.org/github.com/kaspanet/kaspad/blockchain#example-BigToCompact)
Demonstrates how to convert a target difficulty into the
compact "bits" in a block header which represent that target difficulty.
## GPG Verification Key
All official release tags are signed by Conformal so users can ensure the code
has not been tampered with and is coming from the btcsuite developers. To
verify the signature perform the following:
- Download the public key from the Conformal website at
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
- Import the public key into your GPG keyring:
```bash
gpg --import GIT-GPG-KEY-conformal.txt
```
- Verify the release tag with the following command where `TAG_NAME` is a
placeholder for the specific tag:
```bash
git tag -v TAG_NAME
```
## License
Package blockchain is licensed under the [copyfree](http://copyfree.org) ISC
License.

View File

@@ -1,130 +0,0 @@
// Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
)
func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error {
blockHeader := &block.MsgBlock().Header
newNode := newBlockNode(blockHeader, newSet(), dag.dagParams.K)
newNode.status = statusInvalidAncestor
dag.index.AddNode(newNode)
return dag.index.flushToDB()
}
// maybeAcceptBlock potentially accepts a block into the block DAG. It
// performs several validation checks which depend on its position within
// the block DAG before adding it. The block is expected to have already
// gone through ProcessBlock before calling this function with it.
//
// The flags are also passed to checkBlockContext and connectToDAG. See
// their documentation for how the flags modify their behavior.
//
// This function MUST be called with the dagLock held (for writes).
func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error {
parents, err := lookupParentNodes(block, dag)
if err != nil {
if rErr, ok := err.(RuleError); ok && rErr.ErrorCode == ErrInvalidAncestorBlock {
err := dag.addNodeToIndexWithInvalidAncestor(block)
if err != nil {
return err
}
}
return err
}
// The block must pass all of the validation rules which depend on the
// position of the block within the block DAG.
err = dag.checkBlockContext(block, parents, flags)
if err != nil {
return err
}
// Create a new block node for the block and add it to the node index.
newNode := newBlockNode(&block.MsgBlock().Header, parents, dag.dagParams.K)
newNode.status = statusDataStored
dag.index.AddNode(newNode)
// Insert the block into the database if it's not already there. Even
// though it is possible the block will ultimately fail to connect, it
// has already passed all proof-of-work and validity tests which means
// it would be prohibitively expensive for an attacker to fill up the
// disk with a bunch of blocks that fail to connect. This is necessary
// since it allows block download to be decoupled from the much more
// expensive connection logic. It also has some other nice properties
// such as making blocks that never become part of the DAG or
// blocks that fail to connect available for further analysis.
err = dag.db.Update(func(dbTx database.Tx) error {
err := dbStoreBlock(dbTx, block)
if err != nil {
return err
}
return dag.index.flushToDBWithTx(dbTx)
})
if err != nil {
return err
}
// Make sure that all the block's transactions are finalized
fastAdd := flags&BFFastAdd == BFFastAdd
bluestParent := parents.bluest()
if !fastAdd {
if err := dag.validateAllTxsFinalized(block, newNode, bluestParent); err != nil {
return err
}
}
block.SetChainHeight(newNode.chainHeight)
// Connect the passed block to the DAG. This also handles validation of the
// transaction scripts.
chainUpdates, err := dag.addBlock(newNode, parents, block, flags)
if err != nil {
return err
}
// Notify the caller that the new block was accepted into the block
// DAG. The caller would typically want to react by relaying the
// inventory to other peers.
dag.dagLock.Unlock()
dag.sendNotification(NTBlockAdded, &BlockAddedNotificationData{
Block: block,
WasUnorphaned: flags&BFWasUnorphaned != 0,
})
if len(chainUpdates.addedChainBlockHashes) > 0 {
dag.sendNotification(NTChainChanged, &ChainChangedNotificationData{
RemovedChainBlockHashes: chainUpdates.removedChainBlockHashes,
AddedChainBlockHashes: chainUpdates.addedChainBlockHashes,
})
}
dag.dagLock.Lock()
return nil
}
func lookupParentNodes(block *util.Block, blockDAG *BlockDAG) (blockSet, error) {
header := block.MsgBlock().Header
parentHashes := header.ParentHashes
nodes := newSet()
for _, parentHash := range parentHashes {
node := blockDAG.index.LookupNode(parentHash)
if node == nil {
str := fmt.Sprintf("parent block %s is unknown", parentHashes)
return nil, ruleError(ErrParentBlockUnknown, str)
} else if blockDAG.index.NodeStatus(node).KnownInvalid() {
str := fmt.Sprintf("parent block %s is known to be invalid", parentHashes)
return nil, ruleError(ErrInvalidAncestorBlock, str)
}
nodes.add(node)
}
return nodes, nil
}

View File

@@ -1,143 +0,0 @@
package blockdag
import (
"github.com/pkg/errors"
"path/filepath"
"strings"
"testing"
"bou.ke/monkey"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
)
func TestMaybeAcceptBlockErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Fatalf("TestMaybeAcceptBlockErrors: Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
dag.TestSetCoinbaseMaturity(0)
// Test rejecting the block if its parents are missing
orphanBlockFile := "blk_3B.dat"
loadedBlocks, err := LoadBlocks(filepath.Join("testdata/", orphanBlockFile))
if err != nil {
t.Fatalf("TestMaybeAcceptBlockErrors: "+
"Error loading file '%s': %s\n", orphanBlockFile, err)
}
block := loadedBlocks[0]
err = dag.maybeAcceptBlock(block, BFNone)
if err == nil {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Expected: %s, got: <nil>", ErrParentBlockUnknown)
}
ruleErr, ok := err.(RuleError)
if !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrParentBlockUnknown {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are missing: "+
"Unexpected error code. Want: %s, got: %s", ErrParentBlockUnknown, ruleErr.ErrorCode)
}
// Test rejecting the block if its parents are invalid
blocksFile := "blk_0_to_4.dat"
blocks, err := LoadBlocks(filepath.Join("testdata/", blocksFile))
if err != nil {
t.Fatalf("TestMaybeAcceptBlockErrors: "+
"Error loading file '%s': %s\n", blocksFile, err)
}
// Add a valid block and mark it as invalid
block1 := blocks[1]
isOrphan, delay, err := dag.ProcessBlock(block1, BFNone)
if err != nil {
t.Fatalf("TestMaybeAcceptBlockErrors: Valid block unexpectedly returned an error: %s", err)
}
if delay != 0 {
t.Fatalf("TestMaybeAcceptBlockErrors: block 1 is too far in the future")
}
if isOrphan {
t.Fatalf("TestMaybeAcceptBlockErrors: incorrectly returned block 1 is an orphan")
}
blockNode1 := dag.index.LookupNode(block1.Hash())
dag.index.SetStatusFlags(blockNode1, statusValidateFailed)
block2 := blocks[2]
err = dag.maybeAcceptBlock(block2, BFNone)
if err == nil {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
"Expected: %s, got: <nil>", ErrInvalidAncestorBlock)
}
ruleErr, ok = err.(RuleError)
if !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
"Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrInvalidAncestorBlock {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block if its parents are invalid: "+
"Unexpected error. Want: %s, got: %s", ErrInvalidAncestorBlock, ruleErr.ErrorCode)
}
// Set block1's status back to valid for next tests
dag.index.UnsetStatusFlags(blockNode1, statusValidateFailed)
// Test rejecting the block due to bad context
originalBits := block2.MsgBlock().Header.Bits
block2.MsgBlock().Header.Bits = 0
err = dag.maybeAcceptBlock(block2, BFNone)
if err == nil {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
"Expected: %s, got: <nil>", ErrUnexpectedDifficulty)
}
ruleErr, ok = err.(RuleError)
if !ok {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
"Expected RuleError but got %s", err)
} else if ruleErr.ErrorCode != ErrUnexpectedDifficulty {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the block due to bad context: "+
"Unexpected error. Want: %s, got: %s", ErrUnexpectedDifficulty, ruleErr.ErrorCode)
}
// Set block2's bits back to valid for next tests
block2.MsgBlock().Header.Bits = originalBits
// Test rejecting the node due to database error
databaseErrorMessage := "database error"
guard := monkey.Patch(dbStoreBlock, func(dbTx database.Tx, block *util.Block) error {
return errors.New(databaseErrorMessage)
})
defer guard.Unpatch()
err = dag.maybeAcceptBlock(block2, BFNone)
if err == nil {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+
"Expected: %s, got: <nil>", databaseErrorMessage)
}
if !strings.Contains(err.Error(), databaseErrorMessage) {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to database error: "+
"Unexpected error. Want: %s, got: %s", databaseErrorMessage, err)
}
guard.Unpatch()
// Test rejecting the node due to index error
indexErrorMessage := "index error"
guard = monkey.Patch((*blockIndex).flushToDB, func(_ *blockIndex) error {
return errors.New(indexErrorMessage)
})
defer guard.Unpatch()
err = dag.maybeAcceptBlock(block2, BFNone)
if err == nil {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+
"Expected %s, got: <nil>", indexErrorMessage)
}
if !strings.Contains(err.Error(), indexErrorMessage) {
t.Errorf("TestMaybeAcceptBlockErrors: rejecting the node due to index error: "+
"Unexpected error. Want: %s, got: %s", indexErrorMessage, err)
}
}

View File

@@ -1,136 +0,0 @@
package blockdag
import (
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
var (
// idByHashIndexBucketName is the name of the db bucket used to house
// the block hash -> block id index.
idByHashIndexBucketName = []byte("idbyhashidx")
// hashByIDIndexBucketName is the name of the db bucket used to house
// the block id -> block hash index.
hashByIDIndexBucketName = []byte("hashbyididx")
currentBlockIDKey = []byte("currentblockid")
)
// -----------------------------------------------------------------------------
// This is a mapping between block hashes and unique IDs. The ID
// is simply a sequentially incremented uint64 that is used instead of block hash
// for the indexers. This is useful because it is only 8 bytes versus 32 bytes
// hashes and thus saves a ton of space when a block is referenced in an index.
// It consists of three buckets: the first bucket maps the hash of each
// block to the unique ID and the second maps that ID back to the block hash.
// The third bucket contains the last received block ID, and is used
// when starting the node to check that the enabled indexes are up to date
// with the latest received block, and if not, initiate recovery process.
//
// The serialized format for keys and values in the block hash to ID bucket is:
// <hash> = <ID>
//
// Field Type Size
// hash daghash.Hash 32 bytes
// ID uint64 8 bytes
// -----
// Total: 40 bytes
//
// The serialized format for keys and values in the ID to block hash bucket is:
// <ID> = <hash>
//
// Field Type Size
// ID uint64 8 bytes
// hash daghash.Hash 32 bytes
// -----
// Total: 40 bytes
//
// -----------------------------------------------------------------------------
const blockIDSize = 8 // 8 bytes for block ID
// DBFetchBlockIDByHash uses an existing database transaction to retrieve the
// block id for the provided hash from the index.
func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error) {
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
serializedID := hashIndex.Get(hash[:])
if serializedID == nil {
return 0, errors.Errorf("no entry in the block ID index for block with hash %s", hash)
}
return DeserializeBlockID(serializedID), nil
}
// DBFetchBlockHashBySerializedID uses an existing database transaction to
// retrieve the hash for the provided serialized block id from the index.
func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
hashBytes := idIndex.Get(serializedID)
if hashBytes == nil {
return nil, errors.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
}
var hash daghash.Hash
copy(hash[:], hashBytes)
return &hash, nil
}
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
// the index entries for the hash to id and id to hash mappings for the provided
// values.
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, serializedID []byte) error {
// Add the block hash to ID mapping to the index.
meta := dbTx.Metadata()
hashIndex := meta.Bucket(idByHashIndexBucketName)
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
return err
}
// Add the block ID to hash mapping to the index.
idIndex := meta.Bucket(hashByIDIndexBucketName)
return idIndex.Put(serializedID[:], hash[:])
}
// DBFetchCurrentBlockID returns the last known block ID.
func DBFetchCurrentBlockID(dbTx database.Tx) uint64 {
serializedID := dbTx.Metadata().Get(currentBlockIDKey)
if serializedID == nil {
return 0
}
return DeserializeBlockID(serializedID)
}
// DeserializeBlockID returns a deserialized block id
func DeserializeBlockID(serializedID []byte) uint64 {
return byteOrder.Uint64(serializedID)
}
// SerializeBlockID returns a serialized block id
func SerializeBlockID(blockID uint64) []byte {
serializedBlockID := make([]byte, blockIDSize)
byteOrder.PutUint64(serializedBlockID, blockID)
return serializedBlockID
}
// DBFetchBlockHashByID uses an existing database transaction to retrieve the
// hash for the provided block id from the index.
func DBFetchBlockHashByID(dbTx database.Tx, id uint64) (*daghash.Hash, error) {
return DBFetchBlockHashBySerializedID(dbTx, SerializeBlockID(id))
}
func createBlockID(dbTx database.Tx, blockHash *daghash.Hash) (uint64, error) {
currentBlockID := DBFetchCurrentBlockID(dbTx)
newBlockID := currentBlockID + 1
serializedNewBlockID := SerializeBlockID(newBlockID)
err := dbTx.Metadata().Put(currentBlockIDKey, serializedNewBlockID)
if err != nil {
return 0, err
}
err = dbPutBlockIDIndexEntry(dbTx, blockHash, serializedNewBlockID)
if err != nil {
return 0, err
}
return newBlockID, nil
}

View File

@@ -1,58 +0,0 @@
package blockdag
import (
"github.com/pkg/errors"
"strings"
"testing"
"time"
"bou.ke/monkey"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
)
func TestAncestorErrors(t *testing.T) {
node := newTestNode(newSet(), int32(0x10000000), 0, time.Unix(0, 0), dagconfig.MainNetParams.K)
node.chainHeight = 2
ancestor := node.SelectedAncestor(3)
if ancestor != nil {
t.Errorf("TestAncestorErrors: Ancestor() unexpectedly returned a node. Expected: <nil>")
}
}
func TestFlushToDBErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestFlushToDBErrors", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Fatalf("TestFlushToDBErrors: Failed to setup DAG instance: %s", err)
}
defer teardownFunc()
// Call flushToDB without anything to flush. This should succeed
err = dag.index.flushToDB()
if err != nil {
t.Errorf("TestFlushToDBErrors: flushToDB without anything to flush: "+
"Unexpected flushToDB error: %s", err)
}
// Mark the genesis block as dirty
dag.index.SetStatusFlags(dag.genesis, statusValid)
// Test flushToDB failure due to database error
databaseErrorMessage := "database error"
guard := monkey.Patch(dbStoreBlockNode, func(_ database.Tx, _ *blockNode) error {
return errors.New(databaseErrorMessage)
})
defer guard.Unpatch()
err = dag.index.flushToDB()
if err == nil {
t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+
"Expected: %s, got: <nil>", databaseErrorMessage)
}
if !strings.Contains(err.Error(), databaseErrorMessage) {
t.Errorf("TestFlushToDBErrors: flushToDB failure due to database error: "+
"Unexpected flushToDB error. Expected: %s, got: %s", databaseErrorMessage, err)
}
}

View File

@@ -1,143 +0,0 @@
package blockdag
import (
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
)
// BlockLocator is used to help locate a specific block. The algorithm for
// building the block locator is to add block hashes in reverse order on the
// block's selected parent chain until the desired stop block is reached.
// In order to keep the list of locator hashes to a reasonable number of entries,
// the step between each entry is doubled each loop iteration to exponentially
// decrease the number of hashes as a function of the distance from the block
// being located.
//
// For example, assume a selected parent chain with IDs as depicted below, and the
// stop block is genesis:
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
//
// The block locator for block 17 would be the hashes of blocks:
// [17 16 14 11 7 2 genesis]
type BlockLocator []*daghash.Hash
// BlockLocatorFromHashes returns a block locator from start and stop hash.
// See BlockLocator for details on the algorithm used to create a block locator.
//
// In addition to the general algorithm referenced above, this function will
// return the block locator for the selected tip if the passed hash is not currently
// known.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) BlockLocatorFromHashes(startHash, stopHash *daghash.Hash) BlockLocator {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
startNode := dag.index.LookupNode(startHash)
var stopNode *blockNode
if !stopHash.IsEqual(&daghash.ZeroHash) {
stopNode = dag.index.LookupNode(stopHash)
}
return dag.blockLocator(startNode, stopNode)
}
// LatestBlockLocator returns a block locator for the current tips of the DAG.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) LatestBlockLocator() BlockLocator {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
return dag.blockLocator(nil, nil)
}
// blockLocator returns a block locator for the passed start and stop nodes.
// The default value for the start node is the selected tip, and the default
// values of the stop node is the genesis block.
//
// See the BlockLocator type comments for more details.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) blockLocator(startNode, stopNode *blockNode) BlockLocator {
// Use the selected tip if requested.
if startNode == nil {
startNode = dag.virtual.selectedParent
}
if stopNode == nil {
stopNode = dag.genesis
}
// We use the selected parent of the start node, so the
// block locator won't contain the start node.
startNode = startNode.selectedParent
// If the start node or the stop node are not in the
// virtual's selected parent chain, we replace them with their
// closest selected parent that is part of the virtual's
// selected parent chain.
for !dag.IsInSelectedParentChain(stopNode.hash) {
stopNode = stopNode.selectedParent
}
for !dag.IsInSelectedParentChain(startNode.hash) {
startNode = startNode.selectedParent
}
// Calculate the max number of entries that will ultimately be in the
// block locator. See the description of the algorithm for how these
// numbers are derived.
// startNode.hash + stopNode.hash.
// Then floor(log2(startNode.chainHeight-stopNode.chainHeight)) entries for the skip portion.
maxEntries := 2 + util.FastLog2Floor(startNode.chainHeight-stopNode.chainHeight)
locator := make(BlockLocator, 0, maxEntries)
step := uint64(1)
for node := startNode; node != nil; {
locator = append(locator, node.hash)
// Nothing more to add once the stop node has been added.
if node.chainHeight == stopNode.chainHeight {
break
}
// Calculate chainHeight of previous node to include ensuring the
// final node is stopNode.
nextChainHeight := node.chainHeight - step
if nextChainHeight < stopNode.chainHeight {
nextChainHeight = stopNode.chainHeight
}
// walk backwards through the nodes to the correct ancestor.
node = node.SelectedAncestor(nextChainHeight)
// Double the distance between included hashes.
step *= 2
}
return locator
}
// FindNextLocatorBoundaries returns the lowest unknown block locator, hash
// and the highest known block locator hash. This is used to create the
// next block locator to find the highest shared known chain block with the
// sync peer.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (startHash, stopHash *daghash.Hash) {
// Find the most recent locator block hash in the DAG. In the case none of
// the hashes in the locator are in the DAG, fall back to the genesis block.
stopNode := dag.genesis
nextBlockLocatorIndex := int64(len(locator) - 1)
for i, hash := range locator {
node := dag.index.LookupNode(hash)
if node != nil {
stopNode = node
nextBlockLocatorIndex = int64(i) - 1
break
}
}
if nextBlockLocatorIndex < 0 {
return nil, stopNode.hash
}
return locator[nextBlockLocatorIndex], stopNode.hash
}

View File

@@ -1,86 +0,0 @@
package blockdag
import (
"testing"
)
func TestChainHeight(t *testing.T) {
phantomK := uint32(2)
buildNode := buildNodeGenerator(phantomK, true)
node0 := buildNode(setFromSlice())
node1 := buildNode(setFromSlice(node0))
node2 := buildNode(setFromSlice(node0))
node3 := buildNode(setFromSlice(node0))
node4 := buildNode(setFromSlice(node1, node2, node3))
node5 := buildNode(setFromSlice(node1, node2, node3))
node6 := buildNode(setFromSlice(node1, node2, node3))
node7 := buildNode(setFromSlice(node0))
node8 := buildNode(setFromSlice(node7))
node9 := buildNode(setFromSlice(node8))
node10 := buildNode(setFromSlice(node9, node6))
// Because nodes 7 & 8 were mined secretly, node10's selected
// parent will be node6, although node9 is higher. So in this
// case, node10.height and node10.chainHeight will be different
tests := []struct {
node *blockNode
expectedChainHeight uint64
}{
{
node: node0,
expectedChainHeight: 0,
},
{
node: node1,
expectedChainHeight: 1,
},
{
node: node2,
expectedChainHeight: 1,
},
{
node: node3,
expectedChainHeight: 1,
},
{
node: node4,
expectedChainHeight: 2,
},
{
node: node5,
expectedChainHeight: 2,
},
{
node: node6,
expectedChainHeight: 2,
},
{
node: node7,
expectedChainHeight: 1,
},
{
node: node8,
expectedChainHeight: 2,
},
{
node: node9,
expectedChainHeight: 3,
},
{
node: node10,
expectedChainHeight: 3,
},
}
for _, test := range tests {
if test.node.chainHeight != test.expectedChainHeight {
t.Errorf("block %v expected chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight)
}
if calculateChainHeight(test.node) != test.expectedChainHeight {
t.Errorf("block %v expected calculated chain height %v but got %v", test.node, test.expectedChainHeight, test.node.chainHeight)
}
}
}

View File

@@ -1,151 +0,0 @@
package blockdag
import (
"strings"
"github.com/kaspanet/kaspad/util/daghash"
)
// blockSet implements a basic unsorted set of blocks
type blockSet map[daghash.Hash]*blockNode
// newSet creates a new, empty BlockSet
func newSet() blockSet {
return map[daghash.Hash]*blockNode{}
}
// setFromSlice converts a slice of blocks into an unordered set represented as map
func setFromSlice(blocks ...*blockNode) blockSet {
set := newSet()
for _, block := range blocks {
set.add(block)
}
return set
}
// add adds a block to this BlockSet
func (bs blockSet) add(block *blockNode) {
bs[*block.hash] = block
}
// remove removes a block from this BlockSet, if exists
// Does nothing if this set does not contain the block
func (bs blockSet) remove(block *blockNode) {
delete(bs, *block.hash)
}
// clone clones thie block set
func (bs blockSet) clone() blockSet {
clone := newSet()
for _, block := range bs {
clone.add(block)
}
return clone
}
// subtract returns the difference between the BlockSet and another BlockSet
func (bs blockSet) subtract(other blockSet) blockSet {
diff := newSet()
for _, block := range bs {
if !other.contains(block) {
diff.add(block)
}
}
return diff
}
// addSet adds all blocks in other set to this set
func (bs blockSet) addSet(other blockSet) {
for _, block := range other {
bs.add(block)
}
}
// addSlice adds provided slice to this set
func (bs blockSet) addSlice(slice []*blockNode) {
for _, block := range slice {
bs.add(block)
}
}
// union returns a BlockSet that contains all blocks included in this set,
// the other set, or both
func (bs blockSet) union(other blockSet) blockSet {
union := bs.clone()
union.addSet(other)
return union
}
// contains returns true iff this set contains block
func (bs blockSet) contains(block *blockNode) bool {
_, ok := bs[*block.hash]
return ok
}
// containsHash returns true iff this set contains a block hash
func (bs blockSet) containsHash(hash *daghash.Hash) bool {
_, ok := bs[*hash]
return ok
}
// hashesEqual returns true if the given hashes are equal to the hashes
// of the blocks in this set.
// NOTE: The given hash slice must not contain duplicates.
func (bs blockSet) hashesEqual(hashes []*daghash.Hash) bool {
if len(hashes) != len(bs) {
return false
}
for _, hash := range hashes {
if _, wasFound := bs[*hash]; !wasFound {
return false
}
}
return true
}
// hashes returns the hashes of the blocks in this set.
func (bs blockSet) hashes() []*daghash.Hash {
hashes := make([]*daghash.Hash, 0, len(bs))
for _, node := range bs {
hashes = append(hashes, node.hash)
}
daghash.Sort(hashes)
return hashes
}
func (bs blockSet) String() string {
nodeStrs := make([]string, 0, len(bs))
for _, node := range bs {
nodeStrs = append(nodeStrs, node.String())
}
return strings.Join(nodeStrs, ",")
}
// anyChildInSet returns true iff any child of block is contained within this set
func (bs blockSet) anyChildInSet(block *blockNode) bool {
for _, child := range block.children {
if bs.contains(child) {
return true
}
}
return false
}
func (bs blockSet) bluest() *blockNode {
var bluestNode *blockNode
var maxScore uint64
for _, node := range bs {
if bluestNode == nil ||
node.blueScore > maxScore ||
(node.blueScore == maxScore && daghash.Less(node.hash, bluestNode.hash)) {
bluestNode = node
maxScore = node.blueScore
}
}
return bluestNode
}

View File

@@ -1,258 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
// CheckpointConfirmations is the number of blocks before the end of the current
// best block chain that a good checkpoint candidate must be.
const CheckpointConfirmations = 2016
// newHashFromStr converts the passed big-endian hex string into a
// daghash.Hash. It only differs from the one available in daghash in that
// it ignores the error since it will only (and must only) be called with
// hard-coded, and therefore known good, hashes.
func newHashFromStr(hexStr string) *daghash.Hash {
hash, _ := daghash.NewHashFromStr(hexStr)
return hash
}
// newTxIDFromStr converts the passed big-endian hex string into a
// daghash.TxID. It only differs from the one available in daghash in that
// it ignores the error since it will only (and must only) be called with
// hard-coded, and therefore known good, IDs.
func newTxIDFromStr(hexStr string) *daghash.TxID {
txID, _ := daghash.NewTxIDFromStr(hexStr)
return txID
}
// Checkpoints returns a slice of checkpoints (regardless of whether they are
// already known). When there are no checkpoints for the chain, it will return
// nil.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) Checkpoints() []dagconfig.Checkpoint {
return dag.checkpoints
}
// HasCheckpoints returns whether this BlockDAG has checkpoints defined.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) HasCheckpoints() bool {
return len(dag.checkpoints) > 0
}
// LatestCheckpoint returns the most recent checkpoint (regardless of whether it
// is already known). When there are no defined checkpoints for the active chain
// instance, it will return nil.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) LatestCheckpoint() *dagconfig.Checkpoint {
if !dag.HasCheckpoints() {
return nil
}
return &dag.checkpoints[len(dag.checkpoints)-1]
}
// verifyCheckpoint returns whether the passed block chain height and hash combination
// match the checkpoint data. It also returns true if there is no checkpoint
// data for the passed block chain height.
func (dag *BlockDAG) verifyCheckpoint(chainHeight uint64, hash *daghash.Hash) bool {
if !dag.HasCheckpoints() {
return true
}
// Nothing to check if there is no checkpoint data for the block chainHeight.
checkpoint, exists := dag.checkpointsByChainHeight[chainHeight]
if !exists {
return true
}
if !checkpoint.Hash.IsEqual(hash) {
return false
}
log.Infof("Verified checkpoint at chainHeight %d/block %s", checkpoint.ChainHeight,
checkpoint.Hash)
return true
}
// findPreviousCheckpoint finds the most recent checkpoint that is already
// available in the downloaded portion of the block chain and returns the
// associated block node. It returns nil if a checkpoint can't be found (this
// should really only happen for blocks before the first checkpoint).
//
// This function MUST be called with the DAG lock held (for reads).
func (dag *BlockDAG) findPreviousCheckpoint() (*blockNode, error) {
if !dag.HasCheckpoints() {
return nil, nil
}
// Perform the initial search to find and cache the latest known
// checkpoint if the best chain is not known yet or we haven't already
// previously searched.
checkpoints := dag.checkpoints
numCheckpoints := len(checkpoints)
if dag.checkpointNode == nil && dag.nextCheckpoint == nil {
// Loop backwards through the available checkpoints to find one
// that is already available.
for i := numCheckpoints - 1; i >= 0; i-- {
node := dag.index.LookupNode(checkpoints[i].Hash)
if node == nil {
continue
}
// Checkpoint found. Cache it for future lookups and
// set the next expected checkpoint accordingly.
dag.checkpointNode = node
if i < numCheckpoints-1 {
dag.nextCheckpoint = &checkpoints[i+1]
}
return dag.checkpointNode, nil
}
// No known latest checkpoint. This will only happen on blocks
// before the first known checkpoint. So, set the next expected
// checkpoint to the first checkpoint and return the fact there
// is no latest known checkpoint block.
dag.nextCheckpoint = &checkpoints[0]
return nil, nil
}
// At this point we've already searched for the latest known checkpoint,
// so when there is no next checkpoint, the current checkpoint lockin
// will always be the latest known checkpoint.
if dag.nextCheckpoint == nil {
return dag.checkpointNode, nil
}
// When there is a next checkpoint and the chain height of the current
// selected tip of the DAG does not exceed it, the current checkpoint
// lockin is still the latest known checkpoint.
if dag.selectedTip().chainHeight < dag.nextCheckpoint.ChainHeight {
return dag.checkpointNode, nil
}
// We've reached or exceeded the next checkpoint height. Note that
// once a checkpoint lockin has been reached, forks are prevented from
// any blocks before the checkpoint, so we don't have to worry about the
// checkpoint going away out from under us due to a chain reorganize.
// Cache the latest known checkpoint for future lookups. Note that if
// this lookup fails something is very wrong since the chain has already
// passed the checkpoint which was verified as accurate before inserting
// it.
checkpointNode := dag.index.LookupNode(dag.nextCheckpoint.Hash)
if checkpointNode == nil {
return nil, AssertError(fmt.Sprintf("findPreviousCheckpoint "+
"failed lookup of known good block node %s",
dag.nextCheckpoint.Hash))
}
dag.checkpointNode = checkpointNode
// Set the next expected checkpoint.
checkpointIndex := -1
for i := numCheckpoints - 1; i >= 0; i-- {
if checkpoints[i].Hash.IsEqual(dag.nextCheckpoint.Hash) {
checkpointIndex = i
break
}
}
dag.nextCheckpoint = nil
if checkpointIndex != -1 && checkpointIndex < numCheckpoints-1 {
dag.nextCheckpoint = &checkpoints[checkpointIndex+1]
}
return dag.checkpointNode, nil
}
// isNonstandardTransaction determines whether a transaction contains any
// scripts which are not one of the standard types.
func isNonstandardTransaction(tx *util.Tx) bool {
// Check all of the output public key scripts for non-standard scripts.
for _, txOut := range tx.MsgTx().TxOut {
scriptClass := txscript.GetScriptClass(txOut.ScriptPubKey)
if scriptClass == txscript.NonStandardTy {
return true
}
}
return false
}
// IsCheckpointCandidate returns whether or not the passed block is a good
// checkpoint candidate.
//
// The factors used to determine a good checkpoint are:
// - The block must be in the main chain
// - The block must be at least 'CheckpointConfirmations' blocks prior to the
// current end of the main chain
// - The timestamps for the blocks before and after the checkpoint must have
// timestamps which are also before and after the checkpoint, respectively
// (due to the median time allowance this is not always the case)
// - The block must not contain any strange transaction such as those with
// nonstandard scripts
//
// The intent is that candidates are reviewed by a developer to make the final
// decision and then manually added to the list of checkpoints for a network.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
// A checkpoint must be in the DAG.
node := dag.index.LookupNode(block.Hash())
if node == nil {
return false, nil
}
// Ensure the chain height of the passed block and the entry for the block
// in the DAG match. This should always be the case unless the
// caller provided an invalid block.
if node.chainHeight != block.ChainHeight() {
return false, errors.Errorf("passed block chain height of %d does not "+
"match the its height in the DAG: %d", block.ChainHeight(),
node.chainHeight)
}
// A checkpoint must be at least CheckpointConfirmations blocks
// before the end of the main chain.
dagChainHeight := dag.selectedTip().chainHeight
if node.chainHeight > (dagChainHeight - CheckpointConfirmations) {
return false, nil
}
// A checkpoint must be have at least one block after it.
//
// This should always succeed since the check above already made sure it
// is CheckpointConfirmations back, but be safe in case the constant
// changes.
if len(node.children) == 0 {
return false, nil
}
// A checkpoint must be have at least one block before it.
if &node.selectedParent == nil {
return false, nil
}
// A checkpoint must have transactions that only contain standard
// scripts.
for _, tx := range block.Transactions() {
if isNonstandardTransaction(tx) {
return false, nil
}
}
// All of the checks passed, so the block is a candidate.
return true, nil
}

View File

@@ -1,586 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"github.com/kaspanet/kaspad/btcec"
"github.com/kaspanet/kaspad/txscript"
)
// -----------------------------------------------------------------------------
// A variable length quantity (VLQ) is an encoding that uses an arbitrary number
// of binary octets to represent an arbitrarily large integer. The scheme
// employs a most significant byte (MSB) base-128 encoding where the high bit in
// each byte indicates whether or not the byte is the final one. In addition,
// to ensure there are no redundant encodings, an offset is subtracted every
// time a group of 7 bits is shifted out. Therefore each integer can be
// represented in exactly one way, and each representation stands for exactly
// one integer.
//
// Another nice property of this encoding is that it provides a compact
// representation of values that are typically used to indicate sizes. For
// example, the values 0 - 127 are represented with a single byte, 128 - 16511
// with two bytes, and 16512 - 2113663 with three bytes.
//
// While the encoding allows arbitrarily large integers, it is artificially
// limited in this code to an unsigned 64-bit integer for efficiency purposes.
//
// Example encodings:
// 0 -> [0x00]
// 127 -> [0x7f] * Max 1-byte value
// 128 -> [0x80 0x00]
// 129 -> [0x80 0x01]
// 255 -> [0x80 0x7f]
// 256 -> [0x81 0x00]
// 16511 -> [0xff 0x7f] * Max 2-byte value
// 16512 -> [0x80 0x80 0x00]
// 32895 -> [0x80 0xff 0x7f]
// 2113663 -> [0xff 0xff 0x7f] * Max 3-byte value
// 270549119 -> [0xff 0xff 0xff 0x7f] * Max 4-byte value
// 2^64-1 -> [0x80 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0x7f]
//
// References:
// https://en.wikipedia.org/wiki/Variable-length_quantity
// http://www.codecodex.com/wiki/Variable-Length_Integers
// -----------------------------------------------------------------------------
// serializeSizeVLQ returns the number of bytes it would take to serialize the
// passed number as a variable-length quantity according to the format described
// above.
func serializeSizeVLQ(n uint64) int {
size := 1
for ; n > 0x7f; n = (n >> 7) - 1 {
size++
}
return size
}
// putVLQ serializes the provided number to a variable-length quantity according
// to the format described above and returns the number of bytes of the encoded
// value. The result is placed directly into the passed byte slice which must
// be at least large enough to handle the number of bytes returned by the
// serializeSizeVLQ function or it will panic.
func putVLQ(target []byte, n uint64) int {
offset := 0
for ; ; offset++ {
// The high bit is set when another byte follows.
highBitMask := byte(0x80)
if offset == 0 {
highBitMask = 0x00
}
target[offset] = byte(n&0x7f) | highBitMask
if n <= 0x7f {
break
}
n = (n >> 7) - 1
}
// Reverse the bytes so it is MSB-encoded.
for i, j := 0, offset; i < j; i, j = i+1, j-1 {
target[i], target[j] = target[j], target[i]
}
return offset + 1
}
// deserializeVLQ deserializes the provided variable-length quantity according
// to the format described above. It also returns the number of bytes
// deserialized.
func deserializeVLQ(serialized []byte) (uint64, int) {
var n uint64
var size int
for _, val := range serialized {
size++
n = (n << 7) | uint64(val&0x7f)
if val&0x80 != 0x80 {
break
}
n++
}
return n, size
}
// -----------------------------------------------------------------------------
// In order to reduce the size of stored scripts, a domain specific compression
// algorithm is used which recognizes standard scripts and stores them using
// less bytes than the original script. The compression algorithm used here was
// obtained from Bitcoin Core, so all credits for the algorithm go to it.
//
// The general serialized format is:
//
// <script size or type><script data>
//
// Field Type Size
// script size or type VLQ variable
// script data []byte variable
//
// The specific serialized format for each recognized standard script is:
//
// - Pay-to-pubkey-hash: (21 bytes) - <0><20-byte pubkey hash>
// - Pay-to-script-hash: (21 bytes) - <1><20-byte script hash>
// - Pay-to-pubkey**: (33 bytes) - <2, 3, 4, or 5><32-byte pubkey X value>
// 2, 3 = compressed pubkey with bit 0 specifying the y coordinate to use
// 4, 5 = uncompressed pubkey with bit 0 specifying the y coordinate to use
// ** Only valid public keys starting with 0x02, 0x03, and 0x04 are supported.
//
// Any scripts which are not recognized as one of the aforementioned standard
// scripts are encoded using the general serialized format and encode the script
// size as the sum of the actual size of the script and the number of special
// cases.
// -----------------------------------------------------------------------------
// The following constants specify the special constants used to identify a
// special script type in the domain-specific compressed script encoding.
//
// NOTE: This section specifically does not use iota since these values are
// serialized and must be stable for long-term storage.
const (
// cstPayToPubKeyHash identifies a compressed pay-to-pubkey-hash script.
cstPayToPubKeyHash = 0
// cstPayToScriptHash identifies a compressed pay-to-script-hash script.
cstPayToScriptHash = 1
// cstPayToPubKeyComp2 identifies a compressed pay-to-pubkey script to
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyComp2 = 2
// cstPayToPubKeyComp3 identifies a compressed pay-to-pubkey script to
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyComp3 = 3
// cstPayToPubKeyUncomp4 identifies a compressed pay-to-pubkey script to
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyUncomp4 = 4
// cstPayToPubKeyUncomp5 identifies a compressed pay-to-pubkey script to
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyUncomp5 = 5
// numSpecialScripts is the number of special scripts recognized by the
// domain-specific script compression algorithm.
numSpecialScripts = 6
)
// isPubKeyHash returns whether or not the passed public key script is a
// standard pay-to-pubkey-hash script along with the pubkey hash it is paying to
// if it is.
func isPubKeyHash(script []byte) (bool, []byte) {
if len(script) == 25 && script[0] == txscript.OpDup &&
script[1] == txscript.OpHash160 &&
script[2] == txscript.OpData20 &&
script[23] == txscript.OpEqualVerify &&
script[24] == txscript.OpCheckSig {
return true, script[3:23]
}
return false, nil
}
// isScriptHash returns whether or not the passed public key script is a
// standard pay-to-script-hash script along with the script hash it is paying to
// if it is.
func isScriptHash(script []byte) (bool, []byte) {
if len(script) == 23 && script[0] == txscript.OpHash160 &&
script[1] == txscript.OpData20 &&
script[22] == txscript.OpEqual {
return true, script[2:22]
}
return false, nil
}
// isPubKey returns whether or not the passed public key script is a standard
// pay-to-pubkey script that pays to a valid compressed or uncompressed public
// key along with the serialized pubkey it is paying to if it is.
//
// NOTE: This function ensures the public key is actually valid since the
// compression algorithm requires valid pubkeys. It does not support hybrid
// pubkeys. This means that even if the script has the correct form for a
// pay-to-pubkey script, this function will only return true when it is paying
// to a valid compressed or uncompressed pubkey.
func isPubKey(script []byte) (bool, []byte) {
// Pay-to-compressed-pubkey script.
if len(script) == 35 && script[0] == txscript.OpData33 &&
script[34] == txscript.OpCheckSig && (script[1] == 0x02 ||
script[1] == 0x03) {
// Ensure the public key is valid.
serializedPubKey := script[1:34]
_, err := btcec.ParsePubKey(serializedPubKey, btcec.S256())
if err == nil {
return true, serializedPubKey
}
}
// Pay-to-uncompressed-pubkey script.
if len(script) == 67 && script[0] == txscript.OpData65 &&
script[66] == txscript.OpCheckSig && script[1] == 0x04 {
// Ensure the public key is valid.
serializedPubKey := script[1:66]
_, err := btcec.ParsePubKey(serializedPubKey, btcec.S256())
if err == nil {
return true, serializedPubKey
}
}
return false, nil
}
// compressedScriptSize returns the number of bytes the passed script would take
// when encoded with the domain specific compression algorithm described above.
func compressedScriptSize(scriptPubKey []byte) int {
// Pay-to-pubkey-hash script.
if valid, _ := isPubKeyHash(scriptPubKey); valid {
return 21
}
// Pay-to-script-hash script.
if valid, _ := isScriptHash(scriptPubKey); valid {
return 21
}
// Pay-to-pubkey (compressed or uncompressed) script.
if valid, _ := isPubKey(scriptPubKey); valid {
return 33
}
// When none of the above special cases apply, encode the script as is
// preceded by the sum of its size and the number of special cases
// encoded as a variable length quantity.
return serializeSizeVLQ(uint64(len(scriptPubKey)+numSpecialScripts)) +
len(scriptPubKey)
}
// decodeCompressedScriptSize treats the passed serialized bytes as a compressed
// script, possibly followed by other data, and returns the number of bytes it
// occupies taking into account the special encoding of the script size by the
// domain specific compression algorithm described above.
func decodeCompressedScriptSize(serialized []byte) int {
scriptSize, bytesRead := deserializeVLQ(serialized)
if bytesRead == 0 {
return 0
}
switch scriptSize {
case cstPayToPubKeyHash:
return 21
case cstPayToScriptHash:
return 21
case cstPayToPubKeyComp2, cstPayToPubKeyComp3, cstPayToPubKeyUncomp4,
cstPayToPubKeyUncomp5:
return 33
}
scriptSize -= numSpecialScripts
scriptSize += uint64(bytesRead)
return int(scriptSize)
}
// putCompressedScript compresses the passed script according to the domain
// specific compression algorithm described above directly into the passed
// target byte slice. The target byte slice must be at least large enough to
// handle the number of bytes returned by the compressedScriptSize function or
// it will panic.
func putCompressedScript(target, scriptPubKey []byte) int {
// Pay-to-pubkey-hash script.
if valid, hash := isPubKeyHash(scriptPubKey); valid {
target[0] = cstPayToPubKeyHash
copy(target[1:21], hash)
return 21
}
// Pay-to-script-hash script.
if valid, hash := isScriptHash(scriptPubKey); valid {
target[0] = cstPayToScriptHash
copy(target[1:21], hash)
return 21
}
// Pay-to-pubkey (compressed or uncompressed) script.
if valid, serializedPubKey := isPubKey(scriptPubKey); valid {
pubKeyFormat := serializedPubKey[0]
switch pubKeyFormat {
case 0x02, 0x03:
target[0] = pubKeyFormat
copy(target[1:33], serializedPubKey[1:33])
return 33
case 0x04:
// Encode the oddness of the serialized pubkey into the
// compressed script type.
target[0] = pubKeyFormat | (serializedPubKey[64] & 0x01)
copy(target[1:33], serializedPubKey[1:33])
return 33
}
}
// When none of the above special cases apply, encode the unmodified
// script preceded by the sum of its size and the number of special
// cases encoded as a variable length quantity.
encodedSize := uint64(len(scriptPubKey) + numSpecialScripts)
vlqSizeLen := putVLQ(target, encodedSize)
copy(target[vlqSizeLen:], scriptPubKey)
return vlqSizeLen + len(scriptPubKey)
}
// decompressScript returns the original script obtained by decompressing the
// passed compressed script according to the domain specific compression
// algorithm described above.
//
// NOTE: The script parameter must already have been proven to be long enough
// to contain the number of bytes returned by decodeCompressedScriptSize or it
// will panic. This is acceptable since it is only an internal function.
func decompressScript(compressedScriptPubKey []byte) []byte {
// In practice this function will not be called with a zero-length or
// nil script since the nil script encoding includes the length, however
// the code below assumes the length exists, so just return nil now if
// the function ever ends up being called with a nil script in the
// future.
if len(compressedScriptPubKey) == 0 {
return nil
}
// Decode the script size and examine it for the special cases.
encodedScriptSize, bytesRead := deserializeVLQ(compressedScriptPubKey)
switch encodedScriptSize {
// Pay-to-pubkey-hash script. The resulting script is:
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
case cstPayToPubKeyHash:
scriptPubKey := make([]byte, 25)
scriptPubKey[0] = txscript.OpDup
scriptPubKey[1] = txscript.OpHash160
scriptPubKey[2] = txscript.OpData20
copy(scriptPubKey[3:], compressedScriptPubKey[bytesRead:bytesRead+20])
scriptPubKey[23] = txscript.OpEqualVerify
scriptPubKey[24] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-script-hash script. The resulting script is:
// <OP_HASH160><20 byte script hash><OP_EQUAL>
case cstPayToScriptHash:
scriptPubKey := make([]byte, 23)
scriptPubKey[0] = txscript.OpHash160
scriptPubKey[1] = txscript.OpData20
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+20])
scriptPubKey[22] = txscript.OpEqual
return scriptPubKey
// Pay-to-compressed-pubkey script. The resulting script is:
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
scriptPubKey := make([]byte, 35)
scriptPubKey[0] = txscript.OpData33
scriptPubKey[1] = byte(encodedScriptSize)
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+32])
scriptPubKey[34] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-uncompressed-pubkey script. The resulting script is:
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyUncomp4, cstPayToPubKeyUncomp5:
// Change the leading byte to the appropriate compressed pubkey
// identifier (0x02 or 0x03) so it can be decoded as a
// compressed pubkey. This really should never fail since the
// encoding ensures it is valid before compressing to this type.
compressedKey := make([]byte, 33)
compressedKey[0] = byte(encodedScriptSize - 2)
copy(compressedKey[1:], compressedScriptPubKey[1:])
key, err := btcec.ParsePubKey(compressedKey, btcec.S256())
if err != nil {
return nil
}
scriptPubKey := make([]byte, 67)
scriptPubKey[0] = txscript.OpData65
copy(scriptPubKey[1:], key.SerializeUncompressed())
scriptPubKey[66] = txscript.OpCheckSig
return scriptPubKey
}
// When none of the special cases apply, the script was encoded using
// the general format, so reduce the script size by the number of
// special cases and return the unmodified script.
scriptSize := int(encodedScriptSize - numSpecialScripts)
scriptPubKey := make([]byte, scriptSize)
copy(scriptPubKey, compressedScriptPubKey[bytesRead:bytesRead+scriptSize])
return scriptPubKey
}
// -----------------------------------------------------------------------------
// In order to reduce the size of stored amounts, a domain specific compression
// algorithm is used which relies on there typically being a lot of zeroes at
// end of the amounts. The compression algorithm used here was obtained from
// Bitcoin Core, so all credits for the algorithm go to it.
//
// While this is simply exchanging one uint64 for another, the resulting value
// for typical amounts has a much smaller magnitude which results in fewer bytes
// when encoded as variable length quantity. For example, consider the amount
// of 0.1 BTC which is 10000000 satoshi. Encoding 10000000 as a VLQ would take
// 4 bytes while encoding the compressed value of 8 as a VLQ only takes 1 byte.
//
// Essentially the compression is achieved by splitting the value into an
// exponent in the range [0-9] and a digit in the range [1-9], when possible,
// and encoding them in a way that can be decoded. More specifically, the
// encoding is as follows:
// - 0 is 0
// - Find the exponent, e, as the largest power of 10 that evenly divides the
// value up to a maximum of 9
// - When e < 9, the final digit can't be 0 so store it as d and remove it by
// dividing the value by 10 (call the result n). The encoded value is thus:
// 1 + 10*(9*n + d-1) + e
// - When e==9, the only thing known is the amount is not 0. The encoded value
// is thus:
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
//
// Example encodings:
// (The numbers in parenthesis are the number of bytes when serialized as a VLQ)
// 0 (1) -> 0 (1) * 0.00000000 BTC
// 1000 (2) -> 4 (1) * 0.00001000 BTC
// 10000 (2) -> 5 (1) * 0.00010000 BTC
// 12345678 (4) -> 111111101(4) * 0.12345678 BTC
// 50000000 (4) -> 47 (1) * 0.50000000 BTC
// 100000000 (4) -> 9 (1) * 1.00000000 BTC
// 500000000 (5) -> 49 (1) * 5.00000000 BTC
// 1000000000 (5) -> 10 (1) * 10.00000000 BTC
// -----------------------------------------------------------------------------
// compressTxOutAmount compresses the passed amount according to the domain
// specific compression algorithm described above.
func compressTxOutAmount(amount uint64) uint64 {
// No need to do any work if it's zero.
if amount == 0 {
return 0
}
// Find the largest power of 10 (max of 9) that evenly divides the
// value.
exponent := uint64(0)
for amount%10 == 0 && exponent < 9 {
amount /= 10
exponent++
}
// The compressed result for exponents less than 9 is:
// 1 + 10*(9*n + d-1) + e
if exponent < 9 {
lastDigit := amount % 10
amount /= 10
return 1 + 10*(9*amount+lastDigit-1) + exponent
}
// The compressed result for an exponent of 9 is:
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
return 10 + 10*(amount-1)
}
// decompressTxOutAmount returns the original amount the passed compressed
// amount represents according to the domain specific compression algorithm
// described above.
func decompressTxOutAmount(amount uint64) uint64 {
// No need to do any work if it's zero.
if amount == 0 {
return 0
}
// The decompressed amount is either of the following two equations:
// x = 1 + 10*(9*n + d - 1) + e
// x = 1 + 10*(n - 1) + 9
amount--
// The decompressed amount is now one of the following two equations:
// x = 10*(9*n + d - 1) + e
// x = 10*(n - 1) + 9
exponent := amount % 10
amount /= 10
// The decompressed amount is now one of the following two equations:
// x = 9*n + d - 1 | where e < 9
// x = n - 1 | where e = 9
n := uint64(0)
if exponent < 9 {
lastDigit := amount%9 + 1
amount /= 9
n = amount*10 + lastDigit
} else {
n = amount + 1
}
// Apply the exponent.
for ; exponent > 0; exponent-- {
n *= 10
}
return n
}
// -----------------------------------------------------------------------------
// Compressed transaction outputs consist of an amount and a public key script
// both compressed using the domain specific compression algorithms previously
// described.
//
// The serialized format is:
//
// <compressed amount><compressed script>
//
// Field Type Size
// compressed amount VLQ variable
// compressed script []byte variable
// -----------------------------------------------------------------------------
// compressedTxOutSize returns the number of bytes the passed transaction output
// fields would take when encoded with the format described above.
func compressedTxOutSize(amount uint64, scriptPubKey []byte) int {
return serializeSizeVLQ(compressTxOutAmount(amount)) +
compressedScriptSize(scriptPubKey)
}
// putCompressedTxOut compresses the passed amount and script according to their
// domain specific compression algorithms and encodes them directly into the
// passed target byte slice with the format described above. The target byte
// slice must be at least large enough to handle the number of bytes returned by
// the compressedTxOutSize function or it will panic.
func putCompressedTxOut(target []byte, amount uint64, scriptPubKey []byte) int {
offset := putVLQ(target, compressTxOutAmount(amount))
offset += putCompressedScript(target[offset:], scriptPubKey)
return offset
}
// decodeCompressedTxOut decodes the passed compressed txout, possibly followed
// by other data, into its uncompressed amount and script and returns them along
// with the number of bytes they occupied prior to decompression.
func decodeCompressedTxOut(serialized []byte) (uint64, []byte, int, error) {
// Deserialize the compressed amount and ensure there are bytes
// remaining for the compressed script.
compressedAmount, bytesRead := deserializeVLQ(serialized)
if bytesRead >= len(serialized) {
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
"data after compressed amount")
}
// Decode the compressed script size and ensure there are enough bytes
// left in the slice for it.
scriptSize := decodeCompressedScriptSize(serialized[bytesRead:])
if len(serialized[bytesRead:]) < scriptSize {
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
"data after script size")
}
// Decompress and return the amount and script.
amount := decompressTxOutAmount(compressedAmount)
script := decompressScript(serialized[bytesRead : bytesRead+scriptSize])
return amount, script, bytesRead + scriptSize, nil
}

View File

@@ -1,436 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"bytes"
"encoding/hex"
"testing"
)
// hexToBytes converts the passed hex string into bytes and will panic if there
// is an error. This is only provided for the hard-coded constants so errors in
// the source code can be detected. It will only (and must only) be called with
// hard-coded values.
func hexToBytes(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
panic("invalid hex in source file: " + s)
}
return b
}
// TestVLQ ensures the variable length quantity serialization, deserialization,
// and size calculation works as expected.
func TestVLQ(t *testing.T) {
t.Parallel()
tests := []struct {
val uint64
serialized []byte
}{
{0, hexToBytes("00")},
{1, hexToBytes("01")},
{127, hexToBytes("7f")},
{128, hexToBytes("8000")},
{129, hexToBytes("8001")},
{255, hexToBytes("807f")},
{256, hexToBytes("8100")},
{16383, hexToBytes("fe7f")},
{16384, hexToBytes("ff00")},
{16511, hexToBytes("ff7f")}, // Max 2-byte value
{16512, hexToBytes("808000")},
{16513, hexToBytes("808001")},
{16639, hexToBytes("80807f")},
{32895, hexToBytes("80ff7f")},
{2113663, hexToBytes("ffff7f")}, // Max 3-byte value
{2113664, hexToBytes("80808000")},
{270549119, hexToBytes("ffffff7f")}, // Max 4-byte value
{270549120, hexToBytes("8080808000")},
{2147483647, hexToBytes("86fefefe7f")},
{2147483648, hexToBytes("86fefeff00")},
{4294967295, hexToBytes("8efefefe7f")}, // Max uint32, 5 bytes
// Max uint64, 10 bytes
{18446744073709551615, hexToBytes("80fefefefefefefefe7f")},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the value is calculated properly.
gotSize := serializeSizeVLQ(test.val)
if gotSize != len(test.serialized) {
t.Errorf("serializeSizeVLQ: did not get expected size "+
"for %d - got %d, want %d", test.val, gotSize,
len(test.serialized))
continue
}
// Ensure the value serializes to the expected bytes.
gotBytes := make([]byte, gotSize)
gotBytesWritten := putVLQ(gotBytes, test.val)
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("putVLQUnchecked: did not get expected bytes "+
"for %d - got %x, want %x", test.val, gotBytes,
test.serialized)
continue
}
if gotBytesWritten != len(test.serialized) {
t.Errorf("putVLQUnchecked: did not get expected number "+
"of bytes written for %d - got %d, want %d",
test.val, gotBytesWritten, len(test.serialized))
continue
}
// Ensure the serialized bytes deserialize to the expected
// value.
gotVal, gotBytesRead := deserializeVLQ(test.serialized)
if gotVal != test.val {
t.Errorf("deserializeVLQ: did not get expected value "+
"for %x - got %d, want %d", test.serialized,
gotVal, test.val)
continue
}
if gotBytesRead != len(test.serialized) {
t.Errorf("deserializeVLQ: did not get expected number "+
"of bytes read for %d - got %d, want %d",
test.serialized, gotBytesRead,
len(test.serialized))
continue
}
}
}
// TestScriptCompression ensures the domain-specific script compression and
// decompression works as expected.
func TestScriptCompression(t *testing.T) {
t.Parallel()
tests := []struct {
name string
uncompressed []byte
compressed []byte
}{
{
name: "nil",
uncompressed: nil,
compressed: hexToBytes("06"),
},
{
name: "pay-to-pubkey-hash 1",
uncompressed: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
compressed: hexToBytes("001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
},
{
name: "pay-to-pubkey-hash 2",
uncompressed: hexToBytes("76a914e34cce70c86373273efcc54ce7d2a491bb4a0e8488ac"),
compressed: hexToBytes("00e34cce70c86373273efcc54ce7d2a491bb4a0e84"),
},
{
name: "pay-to-script-hash 1",
uncompressed: hexToBytes("a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87"),
compressed: hexToBytes("01da1745e9b549bd0bfa1a569971c77eba30cd5a4b"),
},
{
name: "pay-to-script-hash 2",
uncompressed: hexToBytes("a914f815b036d9bbbce5e9f2a00abd1bf3dc91e9551087"),
compressed: hexToBytes("01f815b036d9bbbce5e9f2a00abd1bf3dc91e95510"),
},
{
name: "pay-to-pubkey compressed 0x02",
uncompressed: hexToBytes("2102192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4ac"),
compressed: hexToBytes("02192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
{
name: "pay-to-pubkey compressed 0x03",
uncompressed: hexToBytes("2103b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65ac"),
compressed: hexToBytes("03b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"),
},
{
name: "pay-to-pubkey uncompressed 0x04 even",
uncompressed: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
compressed: hexToBytes("04192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
{
name: "pay-to-pubkey uncompressed 0x04 odd",
uncompressed: hexToBytes("410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac"),
compressed: hexToBytes("0511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c"),
},
{
name: "pay-to-pubkey invalid pubkey",
uncompressed: hexToBytes("3302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
compressed: hexToBytes("293302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
},
{
name: "requires 2 size bytes - data push 200 bytes",
uncompressed: append(hexToBytes("4cc8"), bytes.Repeat([]byte{0x00}, 200)...),
// [0x80, 0x50] = 208 as a variable length quantity
// [0x4c, 0xc8] = OP_PUSHDATA1 200
compressed: append(hexToBytes("80504cc8"), bytes.Repeat([]byte{0x00}, 200)...),
},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the value is calculated properly.
gotSize := compressedScriptSize(test.uncompressed)
if gotSize != len(test.compressed) {
t.Errorf("compressedScriptSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotSize, len(test.compressed))
continue
}
// Ensure the script compresses to the expected bytes.
gotCompressed := make([]byte, gotSize)
gotBytesWritten := putCompressedScript(gotCompressed,
test.uncompressed)
if !bytes.Equal(gotCompressed, test.compressed) {
t.Errorf("putCompressedScript (%s): did not get "+
"expected bytes - got %x, want %x", test.name,
gotCompressed, test.compressed)
continue
}
if gotBytesWritten != len(test.compressed) {
t.Errorf("putCompressedScript (%s): did not get "+
"expected number of bytes written - got %d, "+
"want %d", test.name, gotBytesWritten,
len(test.compressed))
continue
}
// Ensure the compressed script size is properly decoded from
// the compressed script.
gotDecodedSize := decodeCompressedScriptSize(test.compressed)
if gotDecodedSize != len(test.compressed) {
t.Errorf("decodeCompressedScriptSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotDecodedSize, len(test.compressed))
continue
}
// Ensure the script decompresses to the expected bytes.
gotDecompressed := decompressScript(test.compressed)
if !bytes.Equal(gotDecompressed, test.uncompressed) {
t.Errorf("decompressScript (%s): did not get expected "+
"bytes - got %x, want %x", test.name,
gotDecompressed, test.uncompressed)
continue
}
}
}
// TestScriptCompressionErrors ensures calling various functions related to
// script compression with incorrect data returns the expected results.
func TestScriptCompressionErrors(t *testing.T) {
t.Parallel()
// A nil script must result in a decoded size of 0.
if gotSize := decodeCompressedScriptSize(nil); gotSize != 0 {
t.Fatalf("decodeCompressedScriptSize with nil script did not "+
"return 0 - got %d", gotSize)
}
// A nil script must result in a nil decompressed script.
if gotScript := decompressScript(nil); gotScript != nil {
t.Fatalf("decompressScript with nil script did not return nil "+
"decompressed script - got %x", gotScript)
}
// A compressed script for a pay-to-pubkey (uncompressed) that results
// in an invalid pubkey must result in a nil decompressed script.
compressedScript := hexToBytes("04012d74d0cb94344c9569c2e77901573d8d" +
"7903c3ebec3a957724895dca52c6b4")
if gotScript := decompressScript(compressedScript); gotScript != nil {
t.Fatalf("decompressScript with compressed pay-to-"+
"uncompressed-pubkey that is invalid did not return "+
"nil decompressed script - got %x", gotScript)
}
}
// TestAmountCompression ensures the domain-specific transaction output amount
// compression and decompression works as expected.
func TestAmountCompression(t *testing.T) {
t.Parallel()
tests := []struct {
name string
uncompressed uint64
compressed uint64
}{
{
name: "0 BTC",
uncompressed: 0,
compressed: 0,
},
{
name: "546 Satoshi (current network dust value)",
uncompressed: 546,
compressed: 4911,
},
{
name: "0.00001 BTC (typical transaction fee)",
uncompressed: 1000,
compressed: 4,
},
{
name: "0.0001 BTC (typical transaction fee)",
uncompressed: 10000,
compressed: 5,
},
{
name: "0.12345678 BTC",
uncompressed: 12345678,
compressed: 111111101,
},
{
name: "0.5 BTC",
uncompressed: 50000000,
compressed: 48,
},
{
name: "1 BTC",
uncompressed: 100000000,
compressed: 9,
},
{
name: "5 BTC",
uncompressed: 500000000,
compressed: 49,
},
{
name: "21000000 BTC (max minted coins)",
uncompressed: 2100000000000000,
compressed: 21000000,
},
}
for _, test := range tests {
// Ensure the amount compresses to the expected value.
gotCompressed := compressTxOutAmount(test.uncompressed)
if gotCompressed != test.compressed {
t.Errorf("compressTxOutAmount (%s): did not get "+
"expected value - got %d, want %d", test.name,
gotCompressed, test.compressed)
continue
}
// Ensure the value decompresses to the expected value.
gotDecompressed := decompressTxOutAmount(test.compressed)
if gotDecompressed != test.uncompressed {
t.Errorf("decompressTxOutAmount (%s): did not get "+
"expected value - got %d, want %d", test.name,
gotDecompressed, test.uncompressed)
continue
}
}
}
// TestCompressedTxOut ensures the transaction output serialization and
// deserialization works as expected.
func TestCompressedTxOut(t *testing.T) {
t.Parallel()
tests := []struct {
name string
amount uint64
scriptPubKey []byte
compressed []byte
}{
{
name: "pay-to-pubkey-hash dust",
amount: 546,
scriptPubKey: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
},
{
name: "pay-to-pubkey uncompressed 1 BTC",
amount: 100000000,
scriptPubKey: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the txout is calculated properly.
gotSize := compressedTxOutSize(test.amount, test.scriptPubKey)
if gotSize != len(test.compressed) {
t.Errorf("compressedTxOutSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotSize, len(test.compressed))
continue
}
// Ensure the txout compresses to the expected value.
gotCompressed := make([]byte, gotSize)
gotBytesWritten := putCompressedTxOut(gotCompressed,
test.amount, test.scriptPubKey)
if !bytes.Equal(gotCompressed, test.compressed) {
t.Errorf("compressTxOut (%s): did not get expected "+
"bytes - got %x, want %x", test.name,
gotCompressed, test.compressed)
continue
}
if gotBytesWritten != len(test.compressed) {
t.Errorf("compressTxOut (%s): did not get expected "+
"number of bytes written - got %d, want %d",
test.name, gotBytesWritten,
len(test.compressed))
continue
}
// Ensure the serialized bytes are decoded back to the expected
// uncompressed values.
gotAmount, gotScript, gotBytesRead, err := decodeCompressedTxOut(
test.compressed)
if err != nil {
t.Errorf("decodeCompressedTxOut (%s): unexpected "+
"error: %v", test.name, err)
continue
}
if gotAmount != test.amount {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected amount - got %d, want %d",
test.name, gotAmount, test.amount)
continue
}
if !bytes.Equal(gotScript, test.scriptPubKey) {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected script - got %x, want %x",
test.name, gotScript, test.scriptPubKey)
continue
}
if gotBytesRead != len(test.compressed) {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected number of bytes read - got %d, want %d",
test.name, gotBytesRead, len(test.compressed))
continue
}
}
}
// TestTxOutCompressionErrors ensures calling various functions related to
// txout compression with incorrect data returns the expected results.
func TestTxOutCompressionErrors(t *testing.T) {
t.Parallel()
// A compressed txout with missing compressed script must error.
compressedTxOut := hexToBytes("00")
_, _, _, err := decodeCompressedTxOut(compressedTxOut)
if !isDeserializeErr(err) {
t.Fatalf("decodeCompressedTxOut with missing compressed script "+
"did not return expected error type - got %T, want "+
"errDeserialize", err)
}
// A compressed txout with short compressed script must error.
compressedTxOut = hexToBytes("0010")
_, _, _, err = decodeCompressedTxOut(compressedTxOut)
if !isDeserializeErr(err) {
t.Fatalf("decodeCompressedTxOut with short compressed script "+
"did not return expected error type - got %T, want "+
"errDeserialize", err)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,854 +0,0 @@
// Copyright (c) 2015-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"github.com/pkg/errors"
"io"
"sync"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
)
const (
// blockHdrSize is the size of a block header. This is simply the
// constant from wire and is only provided here for convenience since
// wire.MaxBlockHeaderPayload is quite long.
blockHdrSize = wire.MaxBlockHeaderPayload
// latestUTXOSetBucketVersion is the current version of the UTXO set
// bucket that is used to track all unspent outputs.
latestUTXOSetBucketVersion = 1
)
var (
// blockIndexBucketName is the name of the db bucket used to house to the
// block headers and contextual information.
blockIndexBucketName = []byte("blockheaderidx")
// dagStateKeyName is the name of the db key used to store the DAG
// tip hashes.
dagStateKeyName = []byte("dagstate")
// utxoSetVersionKeyName is the name of the db key used to store the
// version of the utxo set currently in the database.
utxoSetVersionKeyName = []byte("utxosetversion")
// utxoSetBucketName is the name of the db bucket used to house the
// unspent transaction output set.
utxoSetBucketName = []byte("utxoset")
// utxoDiffsBucketName is the name of the db bucket used to house the
// diffs and diff children of blocks.
utxoDiffsBucketName = []byte("utxodiffs")
// subnetworksBucketName is the name of the db bucket used to store the
// subnetwork registry.
subnetworksBucketName = []byte("subnetworks")
// localSubnetworkKeyName is the name of the db key used to store the
// node's local subnetwork ID.
localSubnetworkKeyName = []byte("localsubnetworkidkey")
// byteOrder is the preferred byte order used for serializing numeric
// fields for storage in the database.
byteOrder = binary.LittleEndian
)
// errNotInDAG signifies that a block hash or height that is not in the
// DAG was requested.
type errNotInDAG string
// Error implements the error interface.
func (e errNotInDAG) Error() string {
return string(e)
}
// isNotInDAGErr returns whether or not the passed error is an
// errNotInDAG error.
func isNotInDAGErr(err error) bool {
_, ok := err.(errNotInDAG)
return ok
}
// errDeserialize signifies that a problem was encountered when deserializing
// data.
type errDeserialize string
// Error implements the error interface.
func (e errDeserialize) Error() string {
return string(e)
}
// isDeserializeErr returns whether or not the passed error is an errDeserialize
// error.
func isDeserializeErr(err error) bool {
_, ok := err.(errDeserialize)
return ok
}
// dbPutVersion uses an existing database transaction to update the provided
// key in the metadata bucket to the given version. It is primarily used to
// track versions on entities such as buckets.
func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
var serialized [4]byte
byteOrder.PutUint32(serialized[:], version)
return dbTx.Metadata().Put(key, serialized[:])
}
// -----------------------------------------------------------------------------
// The unspent transaction output (UTXO) set consists of an entry for each
// unspent output using a format that is optimized to reduce space using domain
// specific compression algorithms. This format is a slightly modified version
// of the format used in Bitcoin Core.
//
// Each entry is keyed by an outpoint as specified below. It is important to
// note that the key encoding uses a VLQ, which employs an MSB encoding so
// iteration of UTXOs when doing byte-wise comparisons will produce them in
// order.
//
// The serialized key format is:
// <hash><output index>
//
// Field Type Size
// hash daghash.Hash daghash.HashSize
// output index VLQ variable
//
// The serialized value format is:
//
// <header code><compressed txout>
//
// Field Type Size
// header code VLQ variable
// compressed txout
// compressed amount VLQ variable
// compressed script []byte variable
//
// The serialized header code format is:
// bit 0 - containing transaction is a coinbase
// bits 1-x - height of the block that contains the unspent txout
//
// Example 1:
// From tx in main blockchain:
// Blk 1, b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
//
// 03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
// <><------------------------------------------------------------------>
// | |
// header code compressed txout
//
// - header code: 0x03 (coinbase, height 1)
// - compressed txout:
// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC)
// - 0x04: special script type pay-to-pubkey
// - 0x96...52: x-coordinate of the pubkey
//
// Example 2:
// From tx in main blockchain:
// Blk 113931, 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2
//
// 8cf316800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
// <----><------------------------------------------>
// | |
// header code compressed txout
//
// - header code: 0x8cf316 (not coinbase, height 113931)
// - compressed txout:
// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC)
// - 0x00: special script type pay-to-pubkey-hash
// - 0xb8...58: pubkey hash
//
// Example 3:
// From tx in main blockchain:
// Blk 338156, 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22
//
// a8a2588ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
// <----><-------------------------------------------------->
// | |
// header code compressed txout
//
// - header code: 0xa8a258 (not coinbase, height 338156)
// - compressed txout:
// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC)
// - 0x01: special script type pay-to-script-hash
// - 0x1d...e6: script hash
// -----------------------------------------------------------------------------
// maxUint32VLQSerializeSize is the maximum number of bytes a max uint32 takes
// to serialize as a VLQ.
var maxUint32VLQSerializeSize = serializeSizeVLQ(1<<32 - 1)
// outpointKeyPool defines a concurrent safe free list of byte slices used to
// provide temporary buffers for outpoint database keys.
var outpointKeyPool = sync.Pool{
New: func() interface{} {
b := make([]byte, daghash.HashSize+maxUint32VLQSerializeSize)
return &b // Pointer to slice to avoid boxing alloc.
},
}
// outpointKey returns a key suitable for use as a database key in the UTXO set
// while making use of a free list. A new buffer is allocated if there are not
// already any available on the free list. The returned byte slice should be
// returned to the free list by using the recycleOutpointKey function when the
// caller is done with it _unless_ the slice will need to live for longer than
// the caller can calculate such as when used to write to the database.
func outpointKey(outpoint wire.Outpoint) *[]byte {
// A VLQ employs an MSB encoding, so they are useful not only to reduce
// the amount of storage space, but also so iteration of UTXOs when
// doing byte-wise comparisons will produce them in order.
key := outpointKeyPool.Get().(*[]byte)
idx := uint64(outpoint.Index)
*key = (*key)[:daghash.HashSize+serializeSizeVLQ(idx)]
copy(*key, outpoint.TxID[:])
putVLQ((*key)[daghash.HashSize:], idx)
return key
}
// recycleOutpointKey puts the provided byte slice, which should have been
// obtained via the outpointKey function, back on the free list.
func recycleOutpointKey(key *[]byte) {
outpointKeyPool.Put(key)
}
// dbPutUTXODiff uses an existing database transaction to update the UTXO set
// in the database based on the provided UTXO view contents and state. In
// particular, only the entries that have been marked as modified are written
// to the database.
func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
for outpoint := range diff.toRemove {
key := outpointKey(outpoint)
err := utxoBucket.Delete(*key)
recycleOutpointKey(key)
if err != nil {
return err
}
}
for outpoint, entry := range diff.toAdd {
// Serialize and store the UTXO entry.
serialized := serializeUTXOEntry(entry)
key := outpointKey(outpoint)
err := utxoBucket.Put(*key, serialized)
// NOTE: The key is intentionally not recycled here since the
// database interface contract prohibits modifications. It will
// be garbage collected normally when the database is done with
// it.
if err != nil {
return err
}
}
return nil
}
type dagState struct {
TipHashes []*daghash.Hash
LastFinalityPoint *daghash.Hash
}
// serializeDAGState returns the serialization of the DAG state.
// This is data to be stored in the DAG state bucket.
func serializeDAGState(state *dagState) ([]byte, error) {
return json.Marshal(state)
}
// deserializeDAGState deserializes the passed serialized DAG state.
// This is data stored in the DAG state bucket and is updated after
// every block is connected to the DAG.
func deserializeDAGState(serializedData []byte) (*dagState, error) {
var state *dagState
err := json.Unmarshal(serializedData, &state)
if err != nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: "corrupt DAG state",
}
}
return state, nil
}
// dbPutDAGState uses an existing database transaction to store the latest
// tip hashes of the DAG.
func dbPutDAGState(dbTx database.Tx, state *dagState) error {
serializedData, err := serializeDAGState(state)
if err != nil {
return err
}
return dbTx.Metadata().Put(dagStateKeyName, serializedData)
}
// createDAGState initializes both the database and the DAG state to the
// genesis block. This includes creating the necessary buckets, so it
// must only be called on an uninitialized database.
func (dag *BlockDAG) createDAGState() error {
// Create the initial the database DAG state including creating the
// necessary index buckets and inserting the genesis block.
err := dag.db.Update(func(dbTx database.Tx) error {
meta := dbTx.Metadata()
// Create the bucket that houses the block index data.
_, err := meta.CreateBucket(blockIndexBucketName)
if err != nil {
return err
}
// Create the buckets that house the utxo set, the utxo diffs, and their
// version.
_, err = meta.CreateBucket(utxoSetBucketName)
if err != nil {
return err
}
_, err = meta.CreateBucket(utxoDiffsBucketName)
if err != nil {
return err
}
err = dbPutVersion(dbTx, utxoSetVersionKeyName,
latestUTXOSetBucketVersion)
if err != nil {
return err
}
// Create the bucket that houses the registered subnetworks.
_, err = meta.CreateBucket(subnetworksBucketName)
if err != nil {
return err
}
if err := dbPutLocalSubnetworkID(dbTx, dag.subnetworkID); err != nil {
return err
}
if _, err := meta.CreateBucketIfNotExists(idByHashIndexBucketName); err != nil {
return err
}
if _, err := meta.CreateBucketIfNotExists(hashByIDIndexBucketName); err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
func (dag *BlockDAG) removeDAGState() error {
err := dag.db.Update(func(dbTx database.Tx) error {
meta := dbTx.Metadata()
err := meta.DeleteBucket(blockIndexBucketName)
if err != nil {
return err
}
err = meta.DeleteBucket(utxoSetBucketName)
if err != nil {
return err
}
err = meta.DeleteBucket(utxoDiffsBucketName)
if err != nil {
return err
}
err = dbTx.Metadata().Delete(utxoSetVersionKeyName)
if err != nil {
return err
}
err = meta.DeleteBucket(subnetworksBucketName)
if err != nil {
return err
}
err = dbTx.Metadata().Delete(localSubnetworkKeyName)
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
func dbPutLocalSubnetworkID(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID) error {
if subnetworkID == nil {
return dbTx.Metadata().Put(localSubnetworkKeyName, []byte{})
}
return dbTx.Metadata().Put(localSubnetworkKeyName, subnetworkID[:])
}
// initDAGState attempts to load and initialize the DAG state from the
// database. When the db does not yet contain any DAG state, both it and the
// DAG state are initialized to the genesis block.
func (dag *BlockDAG) initDAGState() error {
// Determine the state of the DAG database. We may need to initialize
// everything from scratch or upgrade certain buckets.
var initialized bool
err := dag.db.View(func(dbTx database.Tx) error {
initialized = dbTx.Metadata().Get(dagStateKeyName) != nil
if initialized {
var localSubnetworkID *subnetworkid.SubnetworkID
localSubnetworkIDBytes := dbTx.Metadata().Get(localSubnetworkKeyName)
if len(localSubnetworkIDBytes) != 0 {
localSubnetworkID = &subnetworkid.SubnetworkID{}
localSubnetworkID.SetBytes(localSubnetworkIDBytes)
}
if !localSubnetworkID.IsEqual(dag.subnetworkID) {
return errors.Errorf("Cannot start btcd with subnetwork ID %s because"+
" its database is already built with subnetwork ID %s. If you"+
" want to switch to a new database, please reset the"+
" database by starting btcd with --reset-db flag", dag.subnetworkID, localSubnetworkID)
}
}
return nil
})
if err != nil {
return err
}
if !initialized {
// At this point the database has not already been initialized, so
// initialize both it and the chain state to the genesis block.
return dag.createDAGState()
}
// Attempt to load the DAG state from the database.
return dag.db.View(func(dbTx database.Tx) error {
// Fetch the stored DAG tipHashes from the database metadata.
// When it doesn't exist, it means the database hasn't been
// initialized for use with the DAG yet, so break out now to allow
// that to happen under a writable database transaction.
serializedData := dbTx.Metadata().Get(dagStateKeyName)
log.Tracef("Serialized DAG tip hashes: %x", serializedData)
state, err := deserializeDAGState(serializedData)
if err != nil {
return err
}
// Load all of the headers from the data for the known DAG
// and construct the block index accordingly. Since the
// number of nodes are already known, perform a single alloc
// for them versus a whole bunch of little ones to reduce
// pressure on the GC.
log.Infof("Loading block index...")
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
var i int32
var lastNode *blockNode
var unprocessedBlockNodes []*blockNode
cursor := blockIndexBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
node, err := dag.deserializeBlockNode(cursor.Value())
if err != nil {
return err
}
// Check to see if this node had been stored in the the block DB
// but not yet accepted. If so, add it to a slice to be processed later.
if node.status == statusDataStored {
unprocessedBlockNodes = append(unprocessedBlockNodes, node)
continue
}
if lastNode == nil {
if !node.hash.IsEqual(dag.dagParams.GenesisHash) {
return AssertError(fmt.Sprintf("initDAGState: Expected "+
"first entry in block index to be genesis block, "+
"found %s", node.hash))
}
} else {
if len(node.parents) == 0 {
return AssertError(fmt.Sprintf("initDAGState: Could "+
"not find any parent for block %s", node.hash))
}
}
// Add the node to its parents children, connect it,
// and add it to the block index.
node.updateParentsChildren()
dag.index.addNode(node)
if node.status.KnownValid() {
dag.blockCount++
}
lastNode = node
i++
}
// Load all of the known UTXO entries and construct the full
// UTXO set accordingly. Since the number of entries is already
// known, perform a single alloc for them versus a whole bunch
// of little ones to reduce pressure on the GC.
log.Infof("Loading UTXO set...")
utxoEntryBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
// Determine how many UTXO entries will be loaded into the index so we can
// allocate the right amount.
var utxoEntryCount int32
cursor = utxoEntryBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
utxoEntryCount++
}
fullUTXOCollection := make(utxoCollection, utxoEntryCount)
for ok := cursor.First(); ok; ok = cursor.Next() {
// Deserialize the outpoint
outpoint, err := deserializeOutpoint(cursor.Key())
if err != nil {
// Ensure any deserialization errors are returned as database
// corruption errors.
if isDeserializeErr(err) {
return database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt outpoint: %s", err),
}
}
return err
}
// Deserialize the utxo entry
entry, err := deserializeUTXOEntry(cursor.Value())
if err != nil {
// Ensure any deserialization errors are returned as database
// corruption errors.
if isDeserializeErr(err) {
return database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt utxo entry: %s", err),
}
}
return err
}
fullUTXOCollection[*outpoint] = entry
}
// Apply the loaded utxoCollection to the virtual block.
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
if err != nil {
return AssertError(fmt.Sprintf("Error loading UTXOSet: %s", err))
}
// Apply the stored tips to the virtual block.
tips := newSet()
for _, tipHash := range state.TipHashes {
tip := dag.index.LookupNode(tipHash)
if tip == nil {
return AssertError(fmt.Sprintf("initDAGState: cannot find "+
"DAG tip %s in block index", state.TipHashes))
}
tips.add(tip)
}
dag.virtual.SetTips(tips)
// Set the last finality point
dag.lastFinalityPoint = dag.index.LookupNode(state.LastFinalityPoint)
dag.finalizeNodesBelowFinalityPoint(false)
// Go over any unprocessed blockNodes and process them now.
for _, node := range unprocessedBlockNodes {
// Check to see if the block exists in the block DB. If it
// doesn't, the database has certainly been corrupted.
blockExists, err := dbTx.HasBlock(node.hash)
if err != nil {
return AssertError(fmt.Sprintf("initDAGState: HasBlock "+
"for block %s failed: %s", node.hash, err))
}
if !blockExists {
return AssertError(fmt.Sprintf("initDAGState: block %s "+
"exists in block index but not in block db", node.hash))
}
// Attempt to accept the block.
block, err := dbFetchBlockByNode(dbTx, node)
isOrphan, delay, err := dag.ProcessBlock(block, BFWasStored)
if err != nil {
log.Warnf("Block %s, which was not previously processed, "+
"failed to be accepted to the DAG: %s", node.hash, err)
continue
}
// If the block is an orphan or is delayed then it couldn't have
// possibly been written to the block index in the first place.
if isOrphan {
return AssertError(fmt.Sprintf("Block %s, which was not "+
"previously processed, turned out to be an orphan, which is "+
"impossible.", node.hash))
}
if delay != 0 {
return AssertError(fmt.Sprintf("Block %s, which was not "+
"previously processed, turned out to be delayed, which is "+
"impossible.", node.hash))
}
}
return nil
})
}
// deserializeBlockNode parses a value in the block index bucket and returns a block node.
func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
buffer := bytes.NewReader(blockRow)
var header wire.BlockHeader
err := header.Deserialize(buffer)
if err != nil {
return nil, err
}
node := &blockNode{
hash: header.BlockHash(),
version: header.Version,
bits: header.Bits,
nonce: header.Nonce,
timestamp: header.Timestamp.Unix(),
hashMerkleRoot: header.HashMerkleRoot,
acceptedIDMerkleRoot: header.AcceptedIDMerkleRoot,
utxoCommitment: header.UTXOCommitment,
}
node.children = newSet()
node.parents = newSet()
for _, hash := range header.ParentHashes {
parent := dag.index.LookupNode(hash)
if parent == nil {
return nil, AssertError(fmt.Sprintf("deserializeBlockNode: Could "+
"not find parent %s for block %s", hash, header.BlockHash()))
}
node.parents.add(parent)
}
statusByte, err := buffer.ReadByte()
if err != nil {
return nil, err
}
node.status = blockStatus(statusByte)
selectedParentHash := &daghash.Hash{}
if _, err := io.ReadFull(buffer, selectedParentHash[:]); err != nil {
return nil, err
}
// Because genesis doesn't have selected parent, it's serialized as zero hash
if !selectedParentHash.IsEqual(&daghash.ZeroHash) {
node.selectedParent = dag.index.LookupNode(selectedParentHash)
}
node.blueScore, err = binaryserializer.Uint64(buffer, byteOrder)
if err != nil {
return nil, err
}
bluesCount, err := wire.ReadVarInt(buffer)
if err != nil {
return nil, err
}
node.blues = make([]*blockNode, bluesCount)
for i := uint64(0); i < bluesCount; i++ {
hash := &daghash.Hash{}
if _, err := io.ReadFull(buffer, hash[:]); err != nil {
return nil, err
}
node.blues[i] = dag.index.LookupNode(hash)
}
node.chainHeight = calculateChainHeight(node)
return node, nil
}
// dbFetchBlockByNode uses an existing database transaction to retrieve the
// raw block for the provided node, deserialize it, and return a util.Block
// of it.
func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error) {
// Load the raw block bytes from the database.
blockBytes, err := dbTx.FetchBlock(node.hash)
if err != nil {
return nil, err
}
// Create the encapsulated block.
block, err := util.NewBlockFromBytes(blockBytes)
if err != nil {
return nil, err
}
return block, nil
}
// dbStoreBlockNode stores the block node data into the block
// index bucket. This overwrites the current entry if there exists one.
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
// Serialize block data to be stored.
w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1))
header := node.Header()
err := header.Serialize(w)
if err != nil {
return err
}
err = w.WriteByte(byte(node.status))
if err != nil {
return err
}
// Because genesis doesn't have selected parent, it's serialized as zero hash
selectedParentHash := &daghash.ZeroHash
if node.selectedParent != nil {
selectedParentHash = node.selectedParent.hash
}
_, err = w.Write(selectedParentHash[:])
if err != nil {
return err
}
err = binaryserializer.PutUint64(w, byteOrder, node.blueScore)
if err != nil {
return err
}
err = wire.WriteVarInt(w, uint64(len(node.blues)))
if err != nil {
return err
}
for _, blue := range node.blues {
_, err = w.Write(blue.hash[:])
if err != nil {
return err
}
}
value := w.Bytes()
// Write block header data to block index bucket.
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
key := BlockIndexKey(node.hash, node.blueScore)
return blockIndexBucket.Put(key, value)
}
// dbStoreBlock stores the provided block in the database if it is not already
// there. The full block data is written to ffldb.
func dbStoreBlock(dbTx database.Tx, block *util.Block) error {
hasBlock, err := dbTx.HasBlock(block.Hash())
if err != nil {
return err
}
if hasBlock {
return nil
}
return dbTx.StoreBlock(block)
}
// BlockIndexKey generates the binary key for an entry in the block index
// bucket. The key is composed of the block blue score encoded as a big-endian
// 64-bit unsigned int followed by the 32 byte block hash.
// The blue score component is important for iteration order.
func BlockIndexKey(blockHash *daghash.Hash, blueScore uint64) []byte {
indexKey := make([]byte, daghash.HashSize+8)
binary.BigEndian.PutUint64(indexKey[0:8], blueScore)
copy(indexKey[8:daghash.HashSize+8], blockHash[:])
return indexKey
}
func blockHashFromBlockIndexKey(BlockIndexKey []byte) (*daghash.Hash, error) {
return daghash.NewHash(BlockIndexKey[8 : daghash.HashSize+8])
}
// BlockByHash returns the block from the DAG with the given hash.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
// Lookup the block hash in block index and ensure it is in the DAG
node := dag.index.LookupNode(hash)
if node == nil {
str := fmt.Sprintf("block %s is not in the main chain", hash)
return nil, errNotInDAG(str)
}
// Load the block from the database and return it.
var block *util.Block
err := dag.db.View(func(dbTx database.Tx) error {
var err error
block, err = dbFetchBlockByNode(dbTx, node)
return err
})
return block, err
}
// BlockHashesFrom returns a slice of blocks starting from startHash
// ordered by blueScore. If startHash is nil then the genesis block is used.
//
// This method MUST be called with the DAG lock held
func (dag *BlockDAG) BlockHashesFrom(startHash *daghash.Hash, limit int) ([]*daghash.Hash, error) {
blockHashes := make([]*daghash.Hash, 0, limit)
if startHash == nil {
startHash = dag.genesis.hash
// If we're starting from the beginning we should include the
// genesis hash in the result
blockHashes = append(blockHashes, dag.genesis.hash)
}
if !dag.BlockExists(startHash) {
return nil, errors.Errorf("block %s not found", startHash)
}
blueScore, err := dag.BlueScoreByBlockHash(startHash)
if err != nil {
return nil, err
}
err = dag.index.db.View(func(dbTx database.Tx) error {
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
startKey := BlockIndexKey(startHash, blueScore)
cursor := blockIndexBucket.Cursor()
cursor.Seek(startKey)
for ok := cursor.Next(); ok; ok = cursor.Next() {
key := cursor.Key()
blockHash, err := blockHashFromBlockIndexKey(key)
if err != nil {
return err
}
blockHashes = append(blockHashes, blockHash)
if len(blockHashes) == limit {
break
}
}
return nil
})
if err != nil {
return nil, err
}
return blockHashes, nil
}

View File

@@ -1,81 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package blockdag implements bitcoin block handling and chain selection rules.
The bitcoin block handling and chain selection rules are an integral, and quite
likely the most important, part of bitcoin. Unfortunately, at the time of
this writing, these rules are also largely undocumented and had to be
ascertained from the bitcoind source code. At its core, bitcoin is a
distributed consensus of which blocks are valid and which ones will comprise the
main block chain (public ledger) that ultimately determines accepted
transactions, so it is extremely important that fully validating nodes agree on
all rules.
At a high level, this package provides support for inserting new blocks into
the block chain according to the aforementioned rules. It includes
functionality such as rejecting duplicate blocks, ensuring blocks and
transactions follow all rules, orphan handling, and best chain selection along
with reorganization.
Since this package does not deal with other bitcoin specifics such as network
communication or wallets, it provides a notification system which gives the
caller a high level of flexibility in how they want to react to certain events
such as orphan blocks which need their parents requested and newly connected
main chain blocks which might result in wallet updates.
Bitcoin Chain Processing Overview
Before a block is allowed into the block chain, it must go through an intensive
series of validation rules. The following list serves as a general outline of
those rules to provide some intuition into what is going on under the hood, but
is by no means exhaustive:
- Reject duplicate blocks
- Perform a series of sanity checks on the block and its transactions such as
verifying proof of work, timestamps, number and character of transactions,
transaction amounts, script complexity, and merkle root calculations
- Compare the block against predetermined checkpoints for expected timestamps
and difficulty based on elapsed time since the checkpoint
- Save the most recent orphan blocks for a limited time in case their parent
blocks become available
- Stop processing if the block is an orphan as the rest of the processing
depends on the block's position within the block chain
- Perform a series of more thorough checks that depend on the block's position
within the block chain such as verifying block difficulties adhere to
difficulty retarget rules, timestamps are after the median of the last
several blocks, all transactions are finalized, checkpoint blocks match, and
block versions are in line with the previous blocks
- Determine how the block fits into the chain and perform different actions
accordingly in order to ensure any side chains which have higher difficulty
than the main chain become the new main chain
- When a block is being connected to the main chain (either through
reorganization of a side chain to the main chain or just extending the
main chain), perform further checks on the block's transactions such as
verifying transaction duplicates, script complexity for the combination of
connected scripts, coinbase maturity, double spends, and connected
transaction values
- Run the transaction scripts to verify the spender is allowed to spend the
coins
- Insert the block into the block database
Errors
Errors returned by this package are either the raw errors provided by underlying
calls or of type blockchain.RuleError. This allows the caller to differentiate
between unexpected errors, such as database errors, versus errors due to rule
violations through type assertions. In addition, callers can programmatically
determine the specific rule violation by examining the ErrorCode field of the
type asserted blockchain.RuleError.
Bitcoin Improvement Proposals
This package includes spec changes outlined by the following BIPs:
BIP0016 (https://en.bitcoin.it/wiki/BIP_0016)
BIP0030 (https://en.bitcoin.it/wiki/BIP_0030)
BIP0034 (https://en.bitcoin.it/wiki/BIP_0034)
*/
package blockdag

View File

@@ -1,318 +0,0 @@
// Copyright (c) 2016 The Decred developers
// Copyright (c) 2016-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag_test
import (
"bytes"
"github.com/pkg/errors"
"os"
"path/filepath"
"testing"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/blockdag/fullblocktests"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const (
// testDbType is the database backend type to use for the tests.
testDbType = "ffldb"
// testDbRoot is the root directory used to create all test databases.
testDbRoot = "testdbs"
// blockDataNet is the expected network in the test block data.
blockDataNet = wire.MainNet
)
// filesExists returns whether or not the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// isSupportedDbType returns whether or not the passed database type is
// currently supported.
func isSupportedDbType(dbType string) bool {
supportedDrivers := database.SupportedDrivers()
for _, driver := range supportedDrivers {
if dbType == driver {
return true
}
}
return false
}
// DAGSetup is used to create a new db and chain instance with the genesis
// block already inserted. In addition to the new chain instance, it returns
// a teardown function the caller should invoke when done testing to clean up.
func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func(), error) {
if !isSupportedDbType(testDbType) {
return nil, nil, errors.Errorf("unsupported db type %v", testDbType)
}
// Handle memory database specially since it doesn't need the disk
// specific handling.
var db database.DB
var teardown func()
if testDbType == "memdb" {
ndb, err := database.Create(testDbType)
if err != nil {
return nil, nil, errors.Errorf("error creating db: %v", err)
}
db = ndb
// Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing.
teardown = func() {
db.Close()
}
} else {
// Create the root directory for test databases.
if !fileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
err := errors.Errorf("unable to create test db "+
"root: %v", err)
return nil, nil, err
}
}
// Create a new database to store the accepted blocks into.
dbPath := filepath.Join(testDbRoot, dbName)
_ = os.RemoveAll(dbPath)
ndb, err := database.Create(testDbType, dbPath, blockDataNet)
if err != nil {
return nil, nil, errors.Errorf("error creating db: %v", err)
}
db = ndb
// Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing.
teardown = func() {
db.Close()
os.RemoveAll(dbPath)
os.RemoveAll(testDbRoot)
}
}
// Copy the chain params to ensure any modifications the tests do to
// the DAG parameters do not affect the global instance.
paramsCopy := *params
// Create the main chain instance.
chain, err := blockdag.New(&blockdag.Config{
DB: db,
DAGParams: &paramsCopy,
Checkpoints: nil,
TimeSource: blockdag.NewMedianTime(),
SigCache: txscript.NewSigCache(1000),
})
if err != nil {
teardown()
err := errors.Errorf("failed to create chain instance: %v", err)
return nil, nil, err
}
return chain, teardown, nil
}
// TestFullBlocks ensures all tests generated by the fullblocktests package
// have the expected result when processed via ProcessBlock.
func TestFullBlocks(t *testing.T) {
// TODO: (Stas) This test was disabled for until we have implemented Phantom
// Ticket: https://daglabs.atlassian.net/browse/DEV-60
t.SkipNow()
tests, err := fullblocktests.Generate(false)
if err != nil {
t.Fatalf("failed to generate tests: %v", err)
}
// Create a new database and chain instance to run tests against.
dag, teardownFunc, err := DAGSetup("fullblocktest",
&dagconfig.RegressionNetParams)
if err != nil {
t.Errorf("Failed to setup chain instance: %v", err)
return
}
defer teardownFunc()
// testAcceptedBlock attempts to process the block in the provided test
// instance and ensures that it was accepted according to the flags
// specified in the test.
testAcceptedBlock := func(item fullblocktests.AcceptedBlock) {
blockHeight := item.Height
block := util.NewBlock(item.Block)
block.SetChainHeight(blockHeight)
t.Logf("Testing block %s (hash %s, height %d)",
item.Name, block.Hash(), blockHeight)
isOrphan, delay, err := dag.ProcessBlock(block,
blockdag.BFNone)
if err != nil {
t.Fatalf("block %q (hash %s, height %d) should "+
"have been accepted: %v", item.Name,
block.Hash(), blockHeight, err)
}
if delay != item.Delay {
t.Fatalf("block %q (hash %s, height %d) unexpected "+
"delay -- got %v, want %v", item.Name,
block.Hash(), blockHeight, delay,
item.Delay)
}
if isOrphan != item.IsOrphan {
t.Fatalf("block %q (hash %s, height %d) unexpected "+
"orphan flag -- got %v, want %v", item.Name,
block.Hash(), blockHeight, isOrphan,
item.IsOrphan)
}
}
// testRejectedBlock attempts to process the block in the provided test
// instance and ensures that it was rejected with the reject code
// specified in the test.
testRejectedBlock := func(item fullblocktests.RejectedBlock) {
blockHeight := item.Height
block := util.NewBlock(item.Block)
block.SetChainHeight(blockHeight)
t.Logf("Testing block %s (hash %s, height %d)",
item.Name, block.Hash(), blockHeight)
_, _, err := dag.ProcessBlock(block, blockdag.BFNone)
if err == nil {
t.Fatalf("block %q (hash %s, height %d) should not "+
"have been accepted", item.Name, block.Hash(),
blockHeight)
}
// Ensure the error code is of the expected type and the reject
// code matches the value specified in the test instance.
rerr, ok := err.(blockdag.RuleError)
if !ok {
t.Fatalf("block %q (hash %s, height %d) returned "+
"unexpected error type -- got %T, want "+
"blockchain.RuleError", item.Name, block.Hash(),
blockHeight, err)
}
if rerr.ErrorCode != item.RejectCode {
t.Fatalf("block %q (hash %s, height %d) does not have "+
"expected reject code -- got %v, want %v",
item.Name, block.Hash(), blockHeight,
rerr.ErrorCode, item.RejectCode)
}
}
// testRejectedNonCanonicalBlock attempts to decode the block in the
// provided test instance and ensures that it failed to decode with a
// message error.
testRejectedNonCanonicalBlock := func(item fullblocktests.RejectedNonCanonicalBlock) {
headerLen := len(item.RawBlock)
if headerLen > 80 {
headerLen = 80
}
blockHash := daghash.DoubleHashH(item.RawBlock[0:headerLen])
blockHeight := item.Height
t.Logf("Testing block %s (hash %s, height %d)", item.Name,
blockHash, blockHeight)
// Ensure there is an error due to deserializing the block.
var msgBlock wire.MsgBlock
err := msgBlock.BtcDecode(bytes.NewReader(item.RawBlock), 0)
if _, ok := err.(*wire.MessageError); !ok {
t.Fatalf("block %q (hash %s, height %d) should have "+
"failed to decode", item.Name, blockHash,
blockHeight)
}
}
// testOrphanOrRejectedBlock attempts to process the block in the
// provided test instance and ensures that it was either accepted as an
// orphan or rejected with a rule violation.
testOrphanOrRejectedBlock := func(item fullblocktests.OrphanOrRejectedBlock) {
blockHeight := item.Height
block := util.NewBlock(item.Block)
block.SetChainHeight(blockHeight)
t.Logf("Testing block %s (hash %s, height %d)",
item.Name, block.Hash(), blockHeight)
isOrphan, delay, err := dag.ProcessBlock(block, blockdag.BFNone)
if err != nil {
// Ensure the error code is of the expected type.
if _, ok := err.(blockdag.RuleError); !ok {
t.Fatalf("block %q (hash %s, height %d) "+
"returned unexpected error type -- "+
"got %T, want blockchain.RuleError",
item.Name, block.Hash(), blockHeight,
err)
}
}
if delay != 0 {
t.Fatalf("block %q (hash %s, height %d) "+
"is too far in the future",
item.Name, block.Hash(), blockHeight)
}
if !isOrphan {
t.Fatalf("block %q (hash %s, height %d) was accepted, "+
"but is not considered an orphan", item.Name,
block.Hash(), blockHeight)
}
}
// testExpectedTip ensures the current tip of the blockchain is the
// block specified in the provided test instance.
testExpectedTip := func(item fullblocktests.ExpectedTip) {
blockHeight := item.Height
block := util.NewBlock(item.Block)
block.SetChainHeight(blockHeight)
t.Logf("Testing tip for block %s (hash %s, height %d)",
item.Name, block.Hash(), blockHeight)
// Ensure hash and height match.
if dag.SelectedTipHash() != item.Block.BlockHash() ||
dag.ChainHeight() != blockHeight { //TODO: (Ori) the use of dag.ChainHeight() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
t.Fatalf("block %q (hash %s, height %d) should be "+
"the current tip -- got (hash %s, height %d)",
item.Name, block.Hash(), blockHeight, dag.SelectedTipHash(),
dag.ChainHeight()) //TODO: (Ori) the use of dag.ChainHeight() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
}
}
for testNum, test := range tests {
for itemNum, item := range test {
switch item := item.(type) {
case fullblocktests.AcceptedBlock:
testAcceptedBlock(item)
case fullblocktests.RejectedBlock:
testRejectedBlock(item)
case fullblocktests.RejectedNonCanonicalBlock:
testRejectedNonCanonicalBlock(item)
case fullblocktests.OrphanOrRejectedBlock:
testOrphanOrRejectedBlock(item)
case fullblocktests.ExpectedTip:
testExpectedTip(item)
default:
t.Fatalf("test #%d, item #%d is not one of "+
"the supported test instance types -- "+
"got type: %T", testNum, itemNum, item)
}
}
}
}

View File

@@ -1,29 +0,0 @@
fullblocktests
==============
[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/blockchain/fullblocktests)
Package fullblocktests provides a set of full block tests to be used for testing
the consensus validation rules. The tests are intended to be flexible enough to
allow both unit-style tests directly against the blockchain code as well as
integration style tests over the peer-to-peer network. To achieve that goal,
each test contains additional information about the expected result, however
that information can be ignored when doing comparison tests between two
independent versions over the peer-to-peer network.
This package has intentionally been designed so it can be used as a standalone
package for any projects needing to test their implementation against a full set
of blocks that exercise the consensus validation rules.
## Installation and Updating
```bash
$ go get -u github.com/kaspanet/kaspad/blockchain/fullblocktests
```
## License
Package fullblocktests is licensed under the [copyfree](http://copyfree.org) ISC
License.

View File

@@ -1,20 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package fullblocktests provides a set of block consensus validation tests.
All of the generated test instances involve full blocks that are to be used for
testing the consensus validation rules. The tests are intended to be flexible
enough to allow both unit-style tests directly against the blockchain code as
well as integration style tests over the peer-to-peer network. To achieve that
goal, each test contains additional information about the expected result,
however that information can be ignored when doing comparison tests between two
independent versions over the peer-to-peer network.
This package has intentionally been designed so it can be used as a standalone
package for any projects needing to test their implementation against a full set
of blocks that exercise the consensus validation rules.
*/
package fullblocktests

File diff suppressed because it is too large Load Diff

View File

@@ -1,143 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package fullblocktests
import (
"encoding/hex"
"math"
"math/big"
"time"
"github.com/kaspanet/kaspad/util/hdkeychain"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
// newHashFromStr converts the passed big-endian hex string into a
// wire.Hash. It only differs from the one available in daghash in that
// it panics on an error since it will only (and must only) be called with
// hard-coded, and therefore known good, hashes.
func newHashFromStr(hexStr string) *daghash.Hash {
hash, err := daghash.NewHashFromStr(hexStr)
if err != nil {
panic(err)
}
return hash
}
// newTxIDFromStr converts the passed big-endian hex string into a
// wire.TxID. It only differs from the one available in daghash in that
// it panics on an error since it will only (and must only) be called with
// hard-coded, and therefore known good, hashes.
func newTxIDFromStr(hexStr string) *daghash.TxID {
txID, err := daghash.NewTxIDFromStr(hexStr)
if err != nil {
panic(err)
}
return txID
}
// fromHex converts the passed hex string into a byte slice and will panic if
// there is an error. This is only provided for the hard-coded constants so
// errors in the source code can be detected. It will only (and must only) be
// called for initialization purposes.
func fromHex(s string) []byte {
r, err := hex.DecodeString(s)
if err != nil {
panic("invalid hex in source file: " + s)
}
return r
}
var (
// bigOne is 1 represented as a big.Int. It is defined here to avoid
// the overhead of creating it multiple times.
bigOne = big.NewInt(1)
// regressionPowLimit is the highest proof of work value a Bitcoin block
// can have for the regression test network. It is the value 2^255 - 1.
regressionPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne)
// regTestGenesisBlock defines the genesis block of the block chain which serves
// as the public transaction ledger for the regression test network.
regTestGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: newHashFromStr("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"),
Timestamp: time.Unix(0x5b28c636, 0), // 2018-06-19 09:00:38 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
Nonce: 1,
},
Transactions: []*wire.MsgTx{{
Version: 1,
TxIn: []*wire.TxIn{{
PreviousOutpoint: wire.Outpoint{
TxID: daghash.TxID{},
Index: 0xffffffff,
},
SignatureScript: fromHex("04ffff001d010445" +
"5468652054696d65732030332f4a616e2f" +
"32303039204368616e63656c6c6f72206f" +
"6e206272696e6b206f66207365636f6e64" +
"206261696c6f757420666f72206261686b73"),
Sequence: math.MaxUint64,
}},
TxOut: []*wire.TxOut{{
Value: 0,
ScriptPubKey: fromHex("4104678afdb0fe5548271967f1" +
"a67130b7105cd6a828e03909a67962e0ea1f" +
"61deb649f6bc3f4cef38c4f35504e51ec138" +
"c4f35504e51ec112de5c384df7ba0b8d578a" +
"4c702b6bf11d5fac"),
}},
LockTime: 0,
SubnetworkID: *subnetworkid.SubnetworkIDNative,
}},
}
)
// regressionNetParams defines the network parameters for the regression test
// network.
//
// NOTE: The test generator intentionally does not use the existing definitions
// in the dagconfig package since the intent is to be able to generate known
// good tests which exercise that code. Using the dagconfig parameters would
// allow them to change out from under the tests potentially invalidating them.
var regressionNetParams = &dagconfig.Params{
Name: "regtest",
Net: wire.RegTest,
DefaultPort: "18444",
// DAG parameters
GenesisBlock: &regTestGenesisBlock,
GenesisHash: newHashFromStr("5bec7567af40504e0994db3b573c186fffcc4edefe096ff2e58d00523bd7e8a6"),
PowMax: regressionPowLimit,
BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 150,
TargetTimePerBlock: time.Second * 10, // 10 seconds
DifficultyAdjustmentWindowSize: 2640,
TimestampDeviationTolerance: 132,
GenerateSupported: true,
// Checkpoints ordered from oldest to newest.
Checkpoints: nil,
// Mempool parameters
RelayNonStdTxs: true,
// Address encoding magics
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair: hdkeychain.HDKeyPairRegressionNet,
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 1,
}

View File

@@ -1,924 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"fmt"
"github.com/pkg/errors"
"sync"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const (
// addrIndexName is the human-readable name for the index.
addrIndexName = "address index"
// level0MaxEntries is the maximum number of transactions that are
// stored in level 0 of an address index entry. Subsequent levels store
// 2^n * level0MaxEntries entries, or in words, double the maximum of
// the previous level.
level0MaxEntries = 8
// addrKeySize is the number of bytes an address key consumes in the
// index. It consists of 1 byte address type + 20 bytes hash160.
addrKeySize = 1 + 20
// levelKeySize is the number of bytes a level key in the address index
// consumes. It consists of the address key + 1 byte for the level.
levelKeySize = addrKeySize + 1
// levelOffset is the offset in the level key which identifes the level.
levelOffset = levelKeySize - 1
// addrKeyTypePubKeyHash is the address type in an address key which
// represents both a pay-to-pubkey-hash and a pay-to-pubkey address.
// This is done because both are identical for the purposes of the
// address index.
addrKeyTypePubKeyHash = 0
// addrKeyTypeScriptHash is the address type in an address key which
// represents a pay-to-script-hash address. This is necessary because
// the hash of a pubkey address might be the same as that of a script
// hash.
addrKeyTypeScriptHash = 1
// Size of a transaction entry. It consists of 8 bytes block id + 4
// bytes offset + 4 bytes length.
txEntrySize = 8 + 4 + 4
)
var (
// addrIndexKey is the key of the address index and the db bucket used
// to house it.
addrIndexKey = []byte("txbyaddridx")
// errUnsupportedAddressType is an error that is used to signal an
// unsupported address type has been used.
errUnsupportedAddressType = errors.New("address type is not supported " +
"by the address index")
)
// -----------------------------------------------------------------------------
// The address index maps addresses referenced in the blockchain to a list of
// all the transactions involving that address. Transactions are stored
// according to their order of appearance in the blockchain. That is to say
// first by block height and then by offset inside the block. It is also
// important to note that this implementation requires the transaction index
// since it is needed in order to catch up old blocks due to the fact the spent
// outputs will already be pruned from the utxo set.
//
// The approach used to store the index is similar to a log-structured merge
// tree (LSM tree) and is thus similar to how leveldb works internally.
//
// Every address consists of one or more entries identified by a level starting
// from 0 where each level holds a maximum number of entries such that each
// subsequent level holds double the maximum of the previous one. In equation
// form, the number of entries each level holds is 2^n * firstLevelMaxSize.
//
// New transactions are appended to level 0 until it becomes full at which point
// the entire level 0 entry is appended to the level 1 entry and level 0 is
// cleared. This process continues until level 1 becomes full at which point it
// will be appended to level 2 and cleared and so on.
//
// The result of this is the lower levels contain newer transactions and the
// transactions within each level are ordered from oldest to newest.
//
// The intent of this approach is to provide a balance between space efficiency
// and indexing cost. Storing one entry per transaction would have the lowest
// indexing cost, but would waste a lot of space because the same address hash
// would be duplicated for every transaction key. On the other hand, storing a
// single entry with all transactions would be the most space efficient, but
// would cause indexing cost to grow quadratically with the number of
// transactions involving the same address. The approach used here provides
// logarithmic insertion and retrieval.
//
// The serialized key format is:
//
// <addr type><addr hash><level>
//
// Field Type Size
// addr type uint8 1 byte
// addr hash hash160 20 bytes
// level uint8 1 byte
// -----
// Total: 22 bytes
//
// The serialized value format is:
//
// [<block id><start offset><tx length>,...]
//
// Field Type Size
// block id uint64 8 bytes
// start offset uint32 4 bytes
// tx length uint32 4 bytes
// -----
// Total: 16 bytes per indexed tx
// -----------------------------------------------------------------------------
// fetchBlockHashFunc defines a callback function to use in order to convert a
// serialized block ID to an associated block hash.
type fetchBlockHashFunc func(serializedID []byte) (*daghash.Hash, error)
// serializeAddrIndexEntry serializes the provided block id and transaction
// location according to the format described in detail above.
func serializeAddrIndexEntry(blockID uint64, txLoc wire.TxLoc) []byte {
// Serialize the entry.
serialized := make([]byte, 16)
byteOrder.PutUint64(serialized, blockID)
byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxStart))
byteOrder.PutUint32(serialized[12:], uint32(txLoc.TxLen))
return serialized
}
// deserializeAddrIndexEntry decodes the passed serialized byte slice into the
// provided region struct according to the format described in detail above and
// uses the passed block hash fetching function in order to conver the block ID
// to the associated block hash.
func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion, fetchBlockHash fetchBlockHashFunc) error {
// Ensure there are enough bytes to decode.
if len(serialized) < txEntrySize {
return errDeserialize("unexpected end of data")
}
hash, err := fetchBlockHash(serialized[0:8])
if err != nil {
return err
}
region.Hash = hash
region.Offset = byteOrder.Uint32(serialized[8:12])
region.Len = byteOrder.Uint32(serialized[12:16])
return nil
}
// keyForLevel returns the key for a specific address and level in the address
// index entry.
func keyForLevel(addrKey [addrKeySize]byte, level uint8) [levelKeySize]byte {
var key [levelKeySize]byte
copy(key[:], addrKey[:])
key[levelOffset] = level
return key
}
// dbPutAddrIndexEntry updates the address index to include the provided entry
// according to the level-based scheme described in detail above.
func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint64, txLoc wire.TxLoc) error {
// Start with level 0 and its initial max number of entries.
curLevel := uint8(0)
maxLevelBytes := level0MaxEntries * txEntrySize
// Simply append the new entry to level 0 and return now when it will
// fit. This is the most common path.
newData := serializeAddrIndexEntry(blockID, txLoc)
level0Key := keyForLevel(addrKey, 0)
level0Data := bucket.Get(level0Key[:])
if len(level0Data)+len(newData) <= maxLevelBytes {
mergedData := newData
if len(level0Data) > 0 {
mergedData = make([]byte, len(level0Data)+len(newData))
copy(mergedData, level0Data)
copy(mergedData[len(level0Data):], newData)
}
return bucket.Put(level0Key[:], mergedData)
}
// At this point, level 0 is full, so merge each level into higher
// levels as many times as needed to free up level 0.
prevLevelData := level0Data
for {
// Each new level holds twice as much as the previous one.
curLevel++
maxLevelBytes *= 2
// Move to the next level as long as the current level is full.
curLevelKey := keyForLevel(addrKey, curLevel)
curLevelData := bucket.Get(curLevelKey[:])
if len(curLevelData) == maxLevelBytes {
prevLevelData = curLevelData
continue
}
// The current level has room for the data in the previous one,
// so merge the data from previous level into it.
mergedData := prevLevelData
if len(curLevelData) > 0 {
mergedData = make([]byte, len(curLevelData)+
len(prevLevelData))
copy(mergedData, curLevelData)
copy(mergedData[len(curLevelData):], prevLevelData)
}
err := bucket.Put(curLevelKey[:], mergedData)
if err != nil {
return err
}
// Move all of the levels before the previous one up a level.
for mergeLevel := curLevel - 1; mergeLevel > 0; mergeLevel-- {
mergeLevelKey := keyForLevel(addrKey, mergeLevel)
prevLevelKey := keyForLevel(addrKey, mergeLevel-1)
prevData := bucket.Get(prevLevelKey[:])
err := bucket.Put(mergeLevelKey[:], prevData)
if err != nil {
return err
}
}
break
}
// Finally, insert the new entry into level 0 now that it is empty.
return bucket.Put(level0Key[:], newData)
}
// dbFetchAddrIndexEntries returns block regions for transactions referenced by
// the given address key and the number of entries skipped since it could have
// been less in the case where there are less total entries than the requested
// number of entries to skip.
func dbFetchAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, numToSkip, numRequested uint32, reverse bool, fetchBlockHash fetchBlockHashFunc) ([]database.BlockRegion, uint32, error) {
// When the reverse flag is not set, all levels need to be fetched
// because numToSkip and numRequested are counted from the oldest
// transactions (highest level) and thus the total count is needed.
// However, when the reverse flag is set, only enough records to satisfy
// the requested amount are needed.
var level uint8
var serialized []byte
for !reverse || len(serialized) < int(numToSkip+numRequested)*txEntrySize {
curLevelKey := keyForLevel(addrKey, level)
levelData := bucket.Get(curLevelKey[:])
if levelData == nil {
// Stop when there are no more levels.
break
}
// Higher levels contain older transactions, so prepend them.
prepended := make([]byte, len(serialized)+len(levelData))
copy(prepended, levelData)
copy(prepended[len(levelData):], serialized)
serialized = prepended
level++
}
// When the requested number of entries to skip is larger than the
// number available, skip them all and return now with the actual number
// skipped.
numEntries := uint32(len(serialized) / txEntrySize)
if numToSkip >= numEntries {
return nil, numEntries, nil
}
// Nothing more to do when there are no requested entries.
if numRequested == 0 {
return nil, numToSkip, nil
}
// Limit the number to load based on the number of available entries,
// the number to skip, and the number requested.
numToLoad := numEntries - numToSkip
if numToLoad > numRequested {
numToLoad = numRequested
}
// Start the offset after all skipped entries and load the calculated
// number.
results := make([]database.BlockRegion, numToLoad)
for i := uint32(0); i < numToLoad; i++ {
// Calculate the read offset according to the reverse flag.
var offset uint32
if reverse {
offset = (numEntries - numToSkip - i - 1) * txEntrySize
} else {
offset = (numToSkip + i) * txEntrySize
}
// Deserialize and populate the result.
err := deserializeAddrIndexEntry(serialized[offset:],
&results[i], fetchBlockHash)
if err != nil {
// Ensure any deserialization errors are returned as
// database corruption errors.
if isDeserializeErr(err) {
err = database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("failed to "+
"deserialized address index "+
"for key %x: %s", addrKey, err),
}
}
return nil, 0, err
}
}
return results, numToSkip, nil
}
// minEntriesToReachLevel returns the minimum number of entries that are
// required to reach the given address index level.
func minEntriesToReachLevel(level uint8) int {
maxEntriesForLevel := level0MaxEntries
minRequired := 1
for l := uint8(1); l <= level; l++ {
minRequired += maxEntriesForLevel
maxEntriesForLevel *= 2
}
return minRequired
}
// maxEntriesForLevel returns the maximum number of entries allowed for the
// given address index level.
func maxEntriesForLevel(level uint8) int {
numEntries := level0MaxEntries
for l := level; l > 0; l-- {
numEntries *= 2
}
return numEntries
}
// dbRemoveAddrIndexEntries removes the specified number of entries from from
// the address index for the provided key. An assertion error will be returned
// if the count exceeds the total number of entries in the index.
func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, count int) error {
// Nothing to do if no entries are being deleted.
if count <= 0 {
return nil
}
// Make use of a local map to track pending updates and define a closure
// to apply it to the database. This is done in order to reduce the
// number of database reads and because there is more than one exit
// path that needs to apply the updates.
pendingUpdates := make(map[uint8][]byte)
applyPending := func() error {
for level, data := range pendingUpdates {
curLevelKey := keyForLevel(addrKey, level)
if len(data) == 0 {
err := bucket.Delete(curLevelKey[:])
if err != nil {
return err
}
continue
}
err := bucket.Put(curLevelKey[:], data)
if err != nil {
return err
}
}
return nil
}
// Loop forwards through the levels while removing entries until the
// specified number has been removed. This will potentially result in
// entirely empty lower levels which will be backfilled below.
var highestLoadedLevel uint8
numRemaining := count
for level := uint8(0); numRemaining > 0; level++ {
// Load the data for the level from the database.
curLevelKey := keyForLevel(addrKey, level)
curLevelData := bucket.Get(curLevelKey[:])
if len(curLevelData) == 0 && numRemaining > 0 {
return AssertError(fmt.Sprintf("dbRemoveAddrIndexEntries "+
"not enough entries for address key %x to "+
"delete %d entries", addrKey, count))
}
pendingUpdates[level] = curLevelData
highestLoadedLevel = level
// Delete the entire level as needed.
numEntries := len(curLevelData) / txEntrySize
if numRemaining >= numEntries {
pendingUpdates[level] = nil
numRemaining -= numEntries
continue
}
// Remove remaining entries to delete from the level.
offsetEnd := len(curLevelData) - (numRemaining * txEntrySize)
pendingUpdates[level] = curLevelData[:offsetEnd]
break
}
// When all elements in level 0 were not removed there is nothing left
// to do other than updating the database.
if len(pendingUpdates[0]) != 0 {
return applyPending()
}
// At this point there are one or more empty levels before the current
// level which need to be backfilled and the current level might have
// had some entries deleted from it as well. Since all levels after
// level 0 are required to either be empty, half full, or completely
// full, the current level must be adjusted accordingly by backfilling
// each previous levels in a way which satisfies the requirements. Any
// entries that are left are assigned to level 0 after the loop as they
// are guaranteed to fit by the logic in the loop. In other words, this
// effectively squashes all remaining entries in the current level into
// the lowest possible levels while following the level rules.
//
// Note that the level after the current level might also have entries
// and gaps are not allowed, so this also keeps track of the lowest
// empty level so the code below knows how far to backfill in case it is
// required.
lowestEmptyLevel := uint8(255)
curLevelData := pendingUpdates[highestLoadedLevel]
curLevelMaxEntries := maxEntriesForLevel(highestLoadedLevel)
for level := highestLoadedLevel; level > 0; level-- {
// When there are not enough entries left in the current level
// for the number that would be required to reach it, clear the
// the current level which effectively moves them all up to the
// previous level on the next iteration. Otherwise, there are
// are sufficient entries, so update the current level to
// contain as many entries as possible while still leaving
// enough remaining entries required to reach the level.
numEntries := len(curLevelData) / txEntrySize
prevLevelMaxEntries := curLevelMaxEntries / 2
minPrevRequired := minEntriesToReachLevel(level - 1)
if numEntries < prevLevelMaxEntries+minPrevRequired {
lowestEmptyLevel = level
pendingUpdates[level] = nil
} else {
// This level can only be completely full or half full,
// so choose the appropriate offset to ensure enough
// entries remain to reach the level.
var offset int
if numEntries-curLevelMaxEntries >= minPrevRequired {
offset = curLevelMaxEntries * txEntrySize
} else {
offset = prevLevelMaxEntries * txEntrySize
}
pendingUpdates[level] = curLevelData[:offset]
curLevelData = curLevelData[offset:]
}
curLevelMaxEntries = prevLevelMaxEntries
}
pendingUpdates[0] = curLevelData
if len(curLevelData) == 0 {
lowestEmptyLevel = 0
}
// When the highest loaded level is empty, it's possible the level after
// it still has data and thus that data needs to be backfilled as well.
for len(pendingUpdates[highestLoadedLevel]) == 0 {
// When the next level is empty too, the is no data left to
// continue backfilling, so there is nothing left to do.
// Otherwise, populate the pending updates map with the newly
// loaded data and update the highest loaded level accordingly.
level := highestLoadedLevel + 1
curLevelKey := keyForLevel(addrKey, level)
levelData := bucket.Get(curLevelKey[:])
if len(levelData) == 0 {
break
}
pendingUpdates[level] = levelData
highestLoadedLevel = level
// At this point the highest level is not empty, but it might
// be half full. When that is the case, move it up a level to
// simplify the code below which backfills all lower levels that
// are still empty. This also means the current level will be
// empty, so the loop will perform another another iteration to
// potentially backfill this level with data from the next one.
curLevelMaxEntries := maxEntriesForLevel(level)
if len(levelData)/txEntrySize != curLevelMaxEntries {
pendingUpdates[level] = nil
pendingUpdates[level-1] = levelData
level--
curLevelMaxEntries /= 2
}
// Backfill all lower levels that are still empty by iteratively
// halfing the data until the lowest empty level is filled.
for level > lowestEmptyLevel {
offset := (curLevelMaxEntries / 2) * txEntrySize
pendingUpdates[level] = levelData[:offset]
levelData = levelData[offset:]
pendingUpdates[level-1] = levelData
level--
curLevelMaxEntries /= 2
}
// The lowest possible empty level is now the highest loaded
// level.
lowestEmptyLevel = highestLoadedLevel
}
// Apply the pending updates.
return applyPending()
}
// addrToKey converts known address types to an addrindex key. An error is
// returned for unsupported types.
func addrToKey(addr util.Address) ([addrKeySize]byte, error) {
switch addr := addr.(type) {
case *util.AddressPubKeyHash:
var result [addrKeySize]byte
result[0] = addrKeyTypePubKeyHash
copy(result[1:], addr.Hash160()[:])
return result, nil
case *util.AddressScriptHash:
var result [addrKeySize]byte
result[0] = addrKeyTypeScriptHash
copy(result[1:], addr.Hash160()[:])
return result, nil
}
return [addrKeySize]byte{}, errUnsupportedAddressType
}
// AddrIndex implements a transaction by address index. That is to say, it
// supports querying all transactions that reference a given address because
// they are either crediting or debiting the address. The returned transactions
// are ordered according to their order of appearance in the blockchain. In
// other words, first by block height and then by offset inside the block.
//
// In addition, support is provided for a memory-only index of unconfirmed
// transactions such as those which are kept in the memory pool before inclusion
// in a block.
type AddrIndex struct {
// The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a
// separate mutex.
db database.DB
dagParams *dagconfig.Params
// The following fields are used to quickly link transactions and
// addresses that have not been included into a block yet when an
// address index is being maintained. The are protected by the
// unconfirmedLock field.
//
// The txnsByAddr field is used to keep an index of all transactions
// which either create an output to a given address or spend from a
// previous output to it keyed by the address.
//
// The addrsByTx field is essentially the reverse and is used to
// keep an index of all addresses which a given transaction involves.
// This allows fairly efficient updates when transactions are removed
// once they are included into a block.
unconfirmedLock sync.RWMutex
txnsByAddr map[[addrKeySize]byte]map[daghash.TxID]*util.Tx
addrsByTx map[daghash.TxID]map[[addrKeySize]byte]struct{}
}
// Ensure the AddrIndex type implements the Indexer interface.
var _ Indexer = (*AddrIndex)(nil)
// Ensure the AddrIndex type implements the NeedsInputser interface.
var _ NeedsInputser = (*AddrIndex)(nil)
// NeedsInputs signals that the index requires the referenced inputs in order
// to properly create the index.
//
// This implements the NeedsInputser interface.
func (idx *AddrIndex) NeedsInputs() bool {
return true
}
// Init is only provided to satisfy the Indexer interface as there is nothing to
// initialize for this index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
idx.db = db
return nil
}
// Key returns the database key to use for the index as a byte slice.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Key() []byte {
return addrIndexKey
}
// Name returns the human-readable name of the index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Name() string {
return addrIndexName
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the bucket for the address
// index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Create(dbTx database.Tx) error {
_, err := dbTx.Metadata().CreateBucket(addrIndexKey)
return err
}
// writeIndexData represents the address index data to be written for one block.
// It consists of the address mapped to an ordered list of the transactions
// that involve the address in block. It is ordered so the transactions can be
// stored in the order they appear in the block.
type writeIndexData map[[addrKeySize]byte][]int
// indexScriptPubKey extracts all standard addresses from the passed public key
// script and maps each of them to the associated transaction using the passed
// map.
func (idx *AddrIndex) indexScriptPubKey(data writeIndexData, scriptPubKey []byte, txIdx int) {
// Nothing to index if the script is non-standard or otherwise doesn't
// contain any addresses.
_, addr, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
idx.dagParams)
if err != nil || addr == nil {
return
}
addrKey, err := addrToKey(addr)
if err != nil {
// Ignore unsupported address types.
return
}
// Avoid inserting the transaction more than once. Since the
// transactions are indexed serially any duplicates will be
// indexed in a row, so checking the most recent entry for the
// address is enough to detect duplicates.
indexedTxns := data[addrKey]
numTxns := len(indexedTxns)
if numTxns > 0 && indexedTxns[numTxns-1] == txIdx {
return
}
indexedTxns = append(indexedTxns, txIdx)
data[addrKey] = indexedTxns
}
// indexBlock extract all of the standard addresses from all of the transactions
// in the passed block and maps each of them to the associated transaction using
// the passed map.
func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *blockdag.BlockDAG) {
for txIdx, tx := range block.Transactions() {
// Coinbases do not reference any inputs. Since the block is
// required to have already gone through full validation, it has
// already been proven on the first transaction in the block is
// a coinbase.
if txIdx > util.CoinbaseTransactionIndex {
for _, txIn := range tx.MsgTx().TxIn {
// The UTXO should always have the input since
// the index contract requires it, however, be
// safe and simply ignore any missing entries.
entry, ok := dag.GetUTXOEntry(txIn.PreviousOutpoint)
if !ok {
continue
}
idx.indexScriptPubKey(data, entry.ScriptPubKey(), txIdx)
}
}
for _, txOut := range tx.MsgTx().TxOut {
idx.indexScriptPubKey(data, txOut.ScriptPubKey, txIdx)
}
}
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the main chain. This indexer adds a mapping for each address
// the transactions in the block involve.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
_ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
// The offset and length of the transactions within the serialized
// block.
txLocs, err := block.TxLoc()
if err != nil {
return err
}
// Build all of the address to transaction mappings in a local map.
addrsToTxns := make(writeIndexData)
idx.indexBlock(addrsToTxns, block, dag)
// Add all of the index entries for each address.
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
for addrKey, txIdxs := range addrsToTxns {
for _, txIdx := range txIdxs {
err := dbPutAddrIndexEntry(addrIdxBucket, addrKey,
blockID, txLocs[txIdx])
if err != nil {
return err
}
}
}
return nil
}
// DisconnectBlock is invoked by the index manager when a block has been
// disconnected from the main chain. This indexer removes the address mappings
// each transaction in the block involve.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) DisconnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG) error {
// Build all of the address to transaction mappings in a local map.
addrsToTxns := make(writeIndexData)
idx.indexBlock(addrsToTxns, block, dag)
// Remove all of the index entries for each address.
bucket := dbTx.Metadata().Bucket(addrIndexKey)
for addrKey, txIdxs := range addrsToTxns {
err := dbRemoveAddrIndexEntries(bucket, addrKey, len(txIdxs))
if err != nil {
return err
}
}
return nil
}
// TxRegionsForAddress returns a slice of block regions which identify each
// transaction that involves the passed address according to the specified
// number to skip, number requested, and whether or not the results should be
// reversed. It also returns the number actually skipped since it could be less
// in the case where there are not enough entries.
//
// NOTE: These results only include transactions confirmed in blocks. See the
// UnconfirmedTxnsForAddress method for obtaining unconfirmed transactions
// that involve a given address.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr util.Address, numToSkip, numRequested uint32, reverse bool) ([]database.BlockRegion, uint32, error) {
addrKey, err := addrToKey(addr)
if err != nil {
return nil, 0, err
}
var regions []database.BlockRegion
var skipped uint32
err = idx.db.View(func(dbTx database.Tx) error {
// Create closure to lookup the block hash given the ID using
// the database transaction.
fetchBlockHash := func(id []byte) (*daghash.Hash, error) {
// Deserialize and populate the result.
return blockdag.DBFetchBlockHashBySerializedID(dbTx, id)
}
var err error
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
regions, skipped, err = dbFetchAddrIndexEntries(addrIdxBucket,
addrKey, numToSkip, numRequested, reverse,
fetchBlockHash)
return err
})
return regions, skipped, err
}
// indexUnconfirmedAddresses modifies the unconfirmed (memory-only) address
// index to include mappings for the addresses encoded by the passed public key
// script to the transaction.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) indexUnconfirmedAddresses(scriptPubKey []byte, tx *util.Tx) {
// The error is ignored here since the only reason it can fail is if the
// script fails to parse and it was already validated before being
// admitted to the mempool.
_, addr, _ := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
idx.dagParams)
// Ignore unsupported address types.
addrKey, err := addrToKey(addr)
if err != nil {
return
}
// Add a mapping from the address to the transaction.
idx.unconfirmedLock.Lock()
addrIndexEntry := idx.txnsByAddr[addrKey]
if addrIndexEntry == nil {
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
idx.txnsByAddr[addrKey] = addrIndexEntry
}
addrIndexEntry[*tx.ID()] = tx
// Add a mapping from the transaction to the address.
addrsByTxEntry := idx.addrsByTx[*tx.ID()]
if addrsByTxEntry == nil {
addrsByTxEntry = make(map[[addrKeySize]byte]struct{})
idx.addrsByTx[*tx.ID()] = addrsByTxEntry
}
addrsByTxEntry[addrKey] = struct{}{}
idx.unconfirmedLock.Unlock()
}
// AddUnconfirmedTx adds all addresses related to the transaction to the
// unconfirmed (memory-only) address index.
//
// NOTE: This transaction MUST have already been validated by the memory pool
// before calling this function with it and have all of the inputs available in
// the provided utxo view. Failure to do so could result in some or all
// addresses not being indexed.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) AddUnconfirmedTx(tx *util.Tx, utxoSet blockdag.UTXOSet) {
// Index addresses of all referenced previous transaction outputs.
//
// The existence checks are elided since this is only called after the
// transaction has already been validated and thus all inputs are
// already known to exist.
for _, txIn := range tx.MsgTx().TxIn {
entry, ok := utxoSet.Get(txIn.PreviousOutpoint)
if !ok {
// Ignore missing entries. This should never happen
// in practice since the function comments specifically
// call out all inputs must be available.
continue
}
idx.indexUnconfirmedAddresses(entry.ScriptPubKey(), tx)
}
// Index addresses of all created outputs.
for _, txOut := range tx.MsgTx().TxOut {
idx.indexUnconfirmedAddresses(txOut.ScriptPubKey, tx)
}
}
// RemoveUnconfirmedTx removes the passed transaction from the unconfirmed
// (memory-only) address index.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) RemoveUnconfirmedTx(txID *daghash.TxID) {
idx.unconfirmedLock.Lock()
defer idx.unconfirmedLock.Unlock()
// Remove all address references to the transaction from the address
// index and remove the entry for the address altogether if it no longer
// references any transactions.
for addrKey := range idx.addrsByTx[*txID] {
delete(idx.txnsByAddr[addrKey], *txID)
if len(idx.txnsByAddr[addrKey]) == 0 {
delete(idx.txnsByAddr, addrKey)
}
}
// Remove the entry from the transaction to address lookup map as well.
delete(idx.addrsByTx, *txID)
}
// UnconfirmedTxnsForAddress returns all transactions currently in the
// unconfirmed (memory-only) address index that involve the passed address.
// Unsupported address types are ignored and will result in no results.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
// Ignore unsupported address types.
addrKey, err := addrToKey(addr)
if err != nil {
return nil
}
// Protect concurrent access.
idx.unconfirmedLock.RLock()
defer idx.unconfirmedLock.RUnlock()
// Return a new slice with the results if there are any. This ensures
// safe concurrency.
if txns, exists := idx.txnsByAddr[addrKey]; exists {
addressTxns := make([]*util.Tx, 0, len(txns))
for _, tx := range txns {
addressTxns = append(addressTxns, tx)
}
return addressTxns
}
return nil
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("addrindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the addrindex with --dropaddrindex", lastKnownBlockID-currentBlockID)
}
// NewAddrIndex returns a new instance of an indexer that is used to create a
// mapping of all addresses in the blockchain to the respective transactions
// that involve them.
//
// It implements the Indexer interface which plugs into the IndexManager that in
// turn is used by the blockchain package. This allows the index to be
// seamlessly maintained along with the chain.
func NewAddrIndex(dagParams *dagconfig.Params) *AddrIndex {
return &AddrIndex{
dagParams: dagParams,
txnsByAddr: make(map[[addrKeySize]byte]map[daghash.TxID]*util.Tx),
addrsByTx: make(map[daghash.TxID]map[[addrKeySize]byte]struct{}),
}
}
// DropAddrIndex drops the address index from the provided database if it
// exists.
func DropAddrIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, addrIndexKey, addrIndexName, interrupt)
}

View File

@@ -1,277 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"testing"
"github.com/kaspanet/kaspad/wire"
)
// addrIndexBucket provides a mock address index database bucket by implementing
// the internalBucket interface.
type addrIndexBucket struct {
levels map[[levelKeySize]byte][]byte
}
// Clone returns a deep copy of the mock address index bucket.
func (b *addrIndexBucket) Clone() *addrIndexBucket {
levels := make(map[[levelKeySize]byte][]byte)
for k, v := range b.levels {
vCopy := make([]byte, len(v))
copy(vCopy, v)
levels[k] = vCopy
}
return &addrIndexBucket{levels: levels}
}
// Get returns the value associated with the key from the mock address index
// bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Get(key []byte) []byte {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
return b.levels[levelKey]
}
// Put stores the provided key/value pair to the mock address index bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Put(key []byte, value []byte) error {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
b.levels[levelKey] = value
return nil
}
// Delete removes the provided key from the mock address index bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Delete(key []byte) error {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
delete(b.levels, levelKey)
return nil
}
// printLevels returns a string with a visual representation of the provided
// address key taking into account the max size of each level. It is useful
// when creating and debugging test cases.
func (b *addrIndexBucket) printLevels(addrKey [addrKeySize]byte) string {
highestLevel := uint8(0)
for k := range b.levels {
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
continue
}
level := uint8(k[levelOffset])
if level > highestLevel {
highestLevel = level
}
}
var levelBuf bytes.Buffer
_, _ = levelBuf.WriteString("\n")
maxEntries := level0MaxEntries
for level := uint8(0); level <= highestLevel; level++ {
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
for i := 0; i < numEntries; i++ {
start := i * txEntrySize
num := byteOrder.Uint32(data[start:])
_, _ = levelBuf.WriteString(fmt.Sprintf("%02d ", num))
}
for i := numEntries; i < maxEntries; i++ {
_, _ = levelBuf.WriteString("_ ")
}
_, _ = levelBuf.WriteString("\n")
maxEntries *= 2
}
return levelBuf.String()
}
// sanityCheck ensures that all data stored in the bucket for the given address
// adheres to the level-based rules described by the address index
// documentation.
func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal int) error {
// Find the highest level for the key.
highestLevel := uint8(0)
for k := range b.levels {
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
continue
}
level := uint8(k[levelOffset])
if level > highestLevel {
highestLevel = level
}
}
// Ensure the expected total number of entries are present and that
// all levels adhere to the rules described in the address index
// documentation.
var totalEntries int
maxEntries := level0MaxEntries
for level := uint8(0); level <= highestLevel; level++ {
// Level 0 can'have more entries than the max allowed if the
// levels after it have data and it can't be empty. All other
// levels must either be half full or full.
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
totalEntries += numEntries
if level == 0 {
if (highestLevel != 0 && numEntries == 0) ||
numEntries > maxEntries {
return errors.Errorf("level %d has %d entries",
level, numEntries)
}
} else if numEntries != maxEntries && numEntries != maxEntries/2 {
return errors.Errorf("level %d has %d entries", level,
numEntries)
}
maxEntries *= 2
}
if totalEntries != expectedTotal {
return errors.Errorf("expected %d entries - got %d", expectedTotal,
totalEntries)
}
// Ensure all of the numbers are in order starting from the highest
// level moving to the lowest level.
expectedNum := uint32(0)
for level := highestLevel + 1; level > 0; level-- {
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
for i := 0; i < numEntries; i++ {
start := i * txEntrySize
num := byteOrder.Uint32(data[start:])
if num != expectedNum {
return errors.Errorf("level %d offset %d does "+
"not contain the expected number of "+
"%d - got %d", level, i, num,
expectedNum)
}
expectedNum++
}
}
return nil
}
// TestAddrIndexLevels ensures that adding and deleting entries to the address
// index creates multiple levels as described by the address index
// documentation.
func TestAddrIndexLevels(t *testing.T) {
t.Parallel()
tests := []struct {
name string
key [addrKeySize]byte
numInsert int
printLevels bool // Set to help debug a specific test.
}{
{
name: "level 0 not full",
numInsert: level0MaxEntries - 1,
},
{
name: "level 1 half",
numInsert: level0MaxEntries + 1,
},
{
name: "level 1 full",
numInsert: level0MaxEntries*2 + 1,
},
{
name: "level 2 half, level 1 half",
numInsert: level0MaxEntries*3 + 1,
},
{
name: "level 2 half, level 1 full",
numInsert: level0MaxEntries*4 + 1,
},
{
name: "level 2 full, level 1 half",
numInsert: level0MaxEntries*5 + 1,
},
{
name: "level 2 full, level 1 full",
numInsert: level0MaxEntries*6 + 1,
},
{
name: "level 3 half, level 2 half, level 1 half",
numInsert: level0MaxEntries*7 + 1,
},
{
name: "level 3 full, level 2 half, level 1 full",
numInsert: level0MaxEntries*12 + 1,
},
}
nextTest:
for testNum, test := range tests {
// Insert entries in order.
populatedBucket := &addrIndexBucket{
levels: make(map[[levelKeySize]byte][]byte),
}
for i := 0; i < test.numInsert; i++ {
txLoc := wire.TxLoc{TxStart: i * 2}
err := dbPutAddrIndexEntry(populatedBucket, test.key,
uint64(i), txLoc)
if err != nil {
t.Errorf("dbPutAddrIndexEntry #%d (%s) - "+
"unexpected error: %v", testNum,
test.name, err)
continue nextTest
}
}
if test.printLevels {
t.Log(populatedBucket.printLevels(test.key))
}
// Delete entries from the populated bucket until all entries
// have been deleted. The bucket is reset to the fully
// populated bucket on each iteration so every combination is
// tested. Notice the upper limit purposes exceeds the number
// of entries to ensure attempting to delete more entries than
// there are works correctly.
for numDelete := 0; numDelete <= test.numInsert+1; numDelete++ {
// Clone populated bucket to run each delete against.
bucket := populatedBucket.Clone()
// Remove the number of entries for this iteration.
err := dbRemoveAddrIndexEntries(bucket, test.key,
numDelete)
if err != nil {
if numDelete <= test.numInsert {
t.Errorf("dbRemoveAddrIndexEntries (%s) "+
" delete %d - unexpected error: "+
"%v", test.name, numDelete, err)
continue nextTest
}
}
if test.printLevels {
t.Log(bucket.printLevels(test.key))
}
// Sanity check the levels to ensure the adhere to all
// rules.
numExpected := test.numInsert
if numDelete <= test.numInsert {
numExpected -= numDelete
}
err = bucket.sanityCheck(test.key, numExpected)
if err != nil {
t.Errorf("sanity check fail (%s) delete %d: %v",
test.name, numDelete, err)
continue nextTest
}
}
}
}

View File

@@ -1,112 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package indexers implements optional block chain indexes.
*/
package indexers
import (
"encoding/binary"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
)
var (
// byteOrder is the preferred byte order used for serializing numeric
// fields for storage in the database.
byteOrder = binary.LittleEndian
// errInterruptRequested indicates that an operation was cancelled due
// to a user-requested interrupt.
errInterruptRequested = errors.New("interrupt requested")
)
// NeedsInputser provides a generic interface for an indexer to specify the it
// requires the ability to look up inputs for a transaction.
type NeedsInputser interface {
NeedsInputs() bool
}
// Indexer provides a generic interface for an indexer that is managed by an
// index manager such as the Manager type provided by this package.
type Indexer interface {
// Key returns the key of the index as a byte slice.
Key() []byte
// Name returns the human-readable name of the index.
Name() string
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time.
Create(dbTx database.Tx) error
// Init is invoked when the index manager is first initializing the
// index. This differs from the Create method in that it is called on
// every load, including the case the index was just created.
Init(db database.DB, dag *blockdag.BlockDAG) error
// ConnectBlock is invoked when the index manager is notified that a new
// block has been connected to the DAG.
ConnectBlock(dbTx database.Tx,
block *util.Block,
blockID uint64,
dag *blockdag.BlockDAG,
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData,
virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error
}
// AssertError identifies an error that indicates an internal code consistency
// issue and should be treated as a critical and unrecoverable error.
type AssertError string
// Error returns the assertion error as a huma-readable string and satisfies
// the error interface.
func (e AssertError) Error() string {
return "assertion failed: " + string(e)
}
// errDeserialize signifies that a problem was encountered when deserializing
// data.
type errDeserialize string
// Error implements the error interface.
func (e errDeserialize) Error() string {
return string(e)
}
// isDeserializeErr returns whether or not the passed error is an errDeserialize
// error.
func isDeserializeErr(err error) bool {
_, ok := err.(errDeserialize)
return ok
}
// internalBucket is an abstraction over a database bucket. It is used to make
// the code easier to test since it allows mock objects in the tests to only
// implement these functions instead of everything a database.Bucket supports.
type internalBucket interface {
Get(key []byte) []byte
Put(key []byte, value []byte) error
Delete(key []byte) error
}
// interruptRequested returns true when the provided channel has been closed.
// This simplifies early shutdown slightly since the caller can just use an if
// statement instead of a select.
func interruptRequested(interrupted <-chan struct{}) bool {
select {
case <-interrupted:
return true
default:
}
return false
}

View File

@@ -1,389 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
)
var (
// indexTipsBucketName is the name of the db bucket used to house the
// current tip of each index.
indexTipsBucketName = []byte("idxtips")
indexCurrentBlockIDBucketName = []byte("idxcurrentblockid")
)
// Manager defines an index manager that manages multiple optional indexes and
// implements the blockchain.IndexManager interface so it can be seamlessly
// plugged into normal chain processing.
type Manager struct {
db database.DB
enabledIndexes []Indexer
}
// Ensure the Manager type implements the blockchain.IndexManager interface.
var _ blockdag.IndexManager = (*Manager)(nil)
// indexDropKey returns the key for an index which indicates it is in the
// process of being dropped.
func indexDropKey(idxKey []byte) []byte {
dropKey := make([]byte, len(idxKey)+1)
dropKey[0] = 'd'
copy(dropKey[1:], idxKey)
return dropKey
}
// maybeFinishDrops determines if each of the enabled indexes are in the middle
// of being dropped and finishes dropping them when the are. This is necessary
// because dropping and index has to be done in several atomic steps rather than
// one big atomic step due to the massive number of entries.
func (m *Manager) maybeFinishDrops(interrupt <-chan struct{}) error {
indexNeedsDrop := make([]bool, len(m.enabledIndexes))
err := m.db.View(func(dbTx database.Tx) error {
// None of the indexes needs to be dropped if the index tips
// bucket hasn't been created yet.
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
if indexesBucket == nil {
return nil
}
// Mark the indexer as requiring a drop if one is already in
// progress.
for i, indexer := range m.enabledIndexes {
dropKey := indexDropKey(indexer.Key())
if indexesBucket.Get(dropKey) != nil {
indexNeedsDrop[i] = true
}
}
return nil
})
if err != nil {
return err
}
if interruptRequested(interrupt) {
return errInterruptRequested
}
// Finish dropping any of the enabled indexes that are already in the
// middle of being dropped.
for i, indexer := range m.enabledIndexes {
if !indexNeedsDrop[i] {
continue
}
log.Infof("Resuming %s drop", indexer.Name())
err := dropIndex(m.db, indexer.Key(), indexer.Name(), interrupt)
if err != nil {
return err
}
}
return nil
}
// maybeCreateIndexes determines if each of the enabled indexes have already
// been created and creates them if not.
func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error {
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
for _, indexer := range m.enabledIndexes {
// Nothing to do if the index tip already exists.
idxKey := indexer.Key()
if indexesBucket.Get(idxKey) != nil {
continue
}
// The tip for the index does not exist, so create it and
// invoke the create callback for the index so it can perform
// any one-time initialization it requires.
if err := indexer.Create(dbTx); err != nil {
return err
}
// TODO (Mike): this is temporary solution to prevent node from not starting
// because it thinks indexers are not initialized.
// Indexers, however, do not work properly, and a general solution to their work operation is required
indexesBucket.Put(idxKey, []byte{0})
}
return nil
}
// Init initializes the enabled indexes. This is called during chain
// initialization and primarily consists of catching up all indexes to the
// current best chain tip. This is necessary since each index can be disabled
// and re-enabled at any time and attempting to catch-up indexes at the same
// time new blocks are being downloaded would lead to an overall longer time to
// catch up due to the I/O contention.
//
// This is part of the blockchain.IndexManager interface.
func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
// Nothing to do when no indexes are enabled.
if len(m.enabledIndexes) == 0 {
return nil
}
if interruptRequested(interrupt) {
return errInterruptRequested
}
m.db = db
// Finish and drops that were previously interrupted.
if err := m.maybeFinishDrops(interrupt); err != nil {
return err
}
// Create the initial state for the indexes as needed.
err := m.db.Update(func(dbTx database.Tx) error {
// Create the bucket for the current tips as needed.
meta := dbTx.Metadata()
_, err := meta.CreateBucketIfNotExists(indexTipsBucketName)
if err != nil {
return err
}
if _, err := meta.CreateBucketIfNotExists(indexCurrentBlockIDBucketName); err != nil {
return err
}
return m.maybeCreateIndexes(dbTx)
})
if err != nil {
return err
}
// Initialize each of the enabled indexes.
for _, indexer := range m.enabledIndexes {
if err := indexer.Init(db, blockDAG); err != nil {
return err
}
}
return m.recoverIfNeeded()
}
// recoverIfNeeded checks if the node worked for some time
// without one of the current enabled indexes, and if it's
// the case, recovers the missing blocks from the index.
func (m *Manager) recoverIfNeeded() error {
return m.db.Update(func(dbTx database.Tx) error {
lastKnownBlockID := blockdag.DBFetchCurrentBlockID(dbTx)
for _, indexer := range m.enabledIndexes {
serializedCurrentIdxBlockID := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Get(indexer.Key())
currentIdxBlockID := uint64(0)
if serializedCurrentIdxBlockID != nil {
currentIdxBlockID = blockdag.DeserializeBlockID(serializedCurrentIdxBlockID)
}
if lastKnownBlockID > currentIdxBlockID {
err := indexer.Recover(dbTx, currentIdxBlockID, lastKnownBlockID)
if err != nil {
return err
}
}
}
return nil
})
}
// ConnectBlock must be invoked when a block is extending the main chain. It
// keeps track of the state of each index it is managing, performs some sanity
// checks, and invokes each indexer.
//
// This is part of the blockchain.IndexManager interface.
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// Call each of the currently active optional indexes with the block
// being connected so they can update accordingly.
for _, index := range m.enabledIndexes {
// Notify the indexer with the connected block so it can index it.
if err := index.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData); err != nil {
return err
}
}
// Add the new block ID index entry for the block being connected and
// update the current internal block ID accordingly.
err := m.updateIndexersWithCurrentBlockID(dbTx, block.Hash(), blockID)
if err != nil {
return err
}
return nil
}
func (m *Manager) updateIndexersWithCurrentBlockID(dbTx database.Tx, blockHash *daghash.Hash, blockID uint64) error {
serializedBlockID := blockdag.SerializeBlockID(blockID)
for _, index := range m.enabledIndexes {
err := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Put(index.Key(), serializedBlockID)
if err != nil {
return err
}
}
return nil
}
// NewManager returns a new index manager with the provided indexes enabled.
//
// The manager returned satisfies the blockchain.IndexManager interface and thus
// cleanly plugs into the normal blockchain processing path.
func NewManager(enabledIndexes []Indexer) *Manager {
return &Manager{
enabledIndexes: enabledIndexes,
}
}
// dropIndex drops the passed index from the database. Since indexes can be
// massive, it deletes the index in multiple database transactions in order to
// keep memory usage to reasonable levels. It also marks the drop in progress
// so the drop can be resumed if it is stopped before it is done before the
// index can be used again.
func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan struct{}) error {
// Nothing to do if the index doesn't already exist.
var needsDelete bool
err := db.View(func(dbTx database.Tx) error {
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
if indexesBucket != nil && indexesBucket.Get(idxKey) != nil {
needsDelete = true
}
return nil
})
if err != nil {
return err
}
if !needsDelete {
log.Infof("Not dropping %s because it does not exist", idxName)
return nil
}
// Mark that the index is in the process of being dropped so that it
// can be resumed on the next start if interrupted before the process is
// complete.
log.Infof("Dropping all %s entries. This might take a while...",
idxName)
err = db.Update(func(dbTx database.Tx) error {
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
return indexesBucket.Put(indexDropKey(idxKey), idxKey)
})
if err != nil {
return err
}
// Since the indexes can be so large, attempting to simply delete
// the bucket in a single database transaction would result in massive
// memory usage and likely crash many systems due to ulimits. In order
// to avoid this, use a cursor to delete a maximum number of entries out
// of the bucket at a time. Recurse buckets depth-first to delete any
// sub-buckets.
const maxDeletions = 2000000
var totalDeleted uint64
// Recurse through all buckets in the index, cataloging each for
// later deletion.
var subBuckets [][][]byte
var subBucketClosure func(database.Tx, []byte, [][]byte) error
subBucketClosure = func(dbTx database.Tx,
subBucket []byte, tlBucket [][]byte) error {
// Get full bucket name and append to subBuckets for later
// deletion.
var bucketName [][]byte
if (tlBucket == nil) || (len(tlBucket) == 0) {
bucketName = append(bucketName, subBucket)
} else {
bucketName = append(tlBucket, subBucket)
}
subBuckets = append(subBuckets, bucketName)
// Recurse sub-buckets to append to subBuckets slice.
bucket := dbTx.Metadata()
for _, subBucketName := range bucketName {
bucket = bucket.Bucket(subBucketName)
}
return bucket.ForEachBucket(func(k []byte) error {
return subBucketClosure(dbTx, k, bucketName)
})
}
// Call subBucketClosure with top-level bucket.
err = db.View(func(dbTx database.Tx) error {
return subBucketClosure(dbTx, idxKey, nil)
})
if err != nil {
return nil
}
// Iterate through each sub-bucket in reverse, deepest-first, deleting
// all keys inside them and then dropping the buckets themselves.
for i := range subBuckets {
bucketName := subBuckets[len(subBuckets)-1-i]
// Delete maxDeletions key/value pairs at a time.
for numDeleted := maxDeletions; numDeleted == maxDeletions; {
numDeleted = 0
err := db.Update(func(dbTx database.Tx) error {
subBucket := dbTx.Metadata()
for _, subBucketName := range bucketName {
subBucket = subBucket.Bucket(subBucketName)
}
cursor := subBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() &&
numDeleted < maxDeletions {
if err := cursor.Delete(); err != nil {
return err
}
numDeleted++
}
return nil
})
if err != nil {
return err
}
if numDeleted > 0 {
totalDeleted += uint64(numDeleted)
log.Infof("Deleted %d keys (%d total) from %s",
numDeleted, totalDeleted, idxName)
}
}
if interruptRequested(interrupt) {
return errInterruptRequested
}
// Drop the bucket itself.
err = db.Update(func(dbTx database.Tx) error {
bucket := dbTx.Metadata()
for j := 0; j < len(bucketName)-1; j++ {
bucket = bucket.Bucket(bucketName[j])
}
return bucket.DeleteBucket(bucketName[len(bucketName)-1])
})
}
// Remove the index tip, index bucket, and in-progress drop flag now
// that all index entries have been removed.
err = db.Update(func(dbTx database.Tx) error {
meta := dbTx.Metadata()
indexesBucket := meta.Bucket(indexTipsBucketName)
if err := indexesBucket.Delete(idxKey); err != nil {
return err
}
if err := meta.Bucket(indexCurrentBlockIDBucketName).Delete(idxKey); err != nil {
return err
}
return indexesBucket.Delete(indexDropKey(idxKey))
})
if err != nil {
return err
}
log.Infof("Dropped %s", idxName)
return nil
}

View File

@@ -1,431 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"fmt"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
const (
// txIndexName is the human-readable name for the index.
txIndexName = "transaction index"
includingBlocksIndexKeyEntrySize = 8 // 4 bytes for offset + 4 bytes for transaction length
)
var (
includingBlocksIndexKey = []byte("includingblocksidx")
acceptingBlocksIndexKey = []byte("acceptingblocksidx")
)
// txsAcceptedByVirtual is the in-memory index of txIDs that were accepted
// by the current virtual
var txsAcceptedByVirtual map[daghash.TxID]bool
// -----------------------------------------------------------------------------
// The transaction index consists of an entry for every transaction in the DAG.
//
// There are two buckets used in total. The first bucket maps the hash of
// each transaction to its location in each block it's included in. The second bucket
// contains all of the blocks that from their viewpoint the transaction has been
// accepted (i.e. the transaction is found in their blue set without double spends),
// and their blue block (or themselves) that included the transaction.
//
// NOTE: Although it is technically possible for multiple transactions to have
// the same hash as long as the previous transaction with the same hash is fully
// spent, this code only stores the most recent one because doing otherwise
// would add a non-trivial amount of space and overhead for something that will
// realistically never happen per the probability and even if it did, the old
// one must be fully spent and so the most likely transaction a caller would
// want for a given hash is the most recent one anyways.
//
// The including blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
//
// <block id> = <start offset><tx length>
//
// Field Type Size
// block id uint64 8 bytes
// start offset uint32 4 bytes
// tx length uint32 4 bytes
// -----
// Total: 16 bytes
//
// The accepting blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
//
// <accepting block id> = <including block id>
//
// Field Type Size
// accepting block id uint64 8 bytes
// including block id uint64 8 bytes
// -----
// Total: 16 bytes
//
// -----------------------------------------------------------------------------
func putIncludingBlocksEntry(target []byte, txLoc wire.TxLoc) {
byteOrder.PutUint32(target, uint32(txLoc.TxStart))
byteOrder.PutUint32(target[4:], uint32(txLoc.TxLen))
}
func dbPutIncludingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
bucket, err := dbTx.Metadata().Bucket(includingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
if err != nil {
return err
}
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
}
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
bucket, err := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
if err != nil {
return err
}
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
}
// dbFetchFirstTxRegion uses an existing database transaction to fetch the block
// region for the provided transaction hash from the transaction index. When
// there is no entry for the provided hash, nil will be returned for the both
// the region and the error.
//
// P.S Because the transaction can be found in multiple blocks, this function arbitarily
// returns the first block region that is stored in the txindex.
func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.BlockRegion, error) {
// Load the record from the database and return now if it doesn't exist.
txBucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txID[:])
if txBucket == nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No block region "+
"was found for %s", txID),
}
}
cursor := txBucket.Cursor()
if ok := cursor.First(); !ok {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No block region "+
"was found for %s", txID),
}
}
serializedBlockID := cursor.Key()
serializedData := cursor.Value()
if len(serializedData) == 0 {
return nil, nil
}
// Ensure the serialized data has enough bytes to properly deserialize.
if len(serializedData) < includingBlocksIndexKeyEntrySize {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt transaction index "+
"entry for %s", txID),
}
}
// Load the block hash associated with the block ID.
hash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
if err != nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt transaction index "+
"entry for %s: %s", txID, err),
}
}
// Deserialize the final entry.
region := database.BlockRegion{Hash: &daghash.Hash{}}
copy(region.Hash[:], hash[:])
region.Offset = byteOrder.Uint32(serializedData[:4])
region.Len = byteOrder.Uint32(serializedData[4:])
return &region, nil
}
// dbAddTxIndexEntries uses an existing database transaction to add a
// transaction index entry for every transaction in the passed block.
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint64, multiBlockTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// The offset and length of the transactions within the serialized
// block.
txLocs, err := block.TxLoc()
if err != nil {
return err
}
// As an optimization, allocate a single slice big enough to hold all
// of the serialized transaction index entries for the block and
// serialize them directly into the slice. Then, pass the appropriate
// subslice to the database to be written. This approach significantly
// cuts down on the number of required allocations.
includingBlocksOffset := 0
serializedIncludingBlocksValues := make([]byte, len(block.Transactions())*includingBlocksIndexKeyEntrySize)
for i, tx := range block.Transactions() {
putIncludingBlocksEntry(serializedIncludingBlocksValues[includingBlocksOffset:], txLocs[i])
endOffset := includingBlocksOffset + includingBlocksIndexKeyEntrySize
err := dbPutIncludingBlocksEntry(dbTx, tx.ID(), blockID,
serializedIncludingBlocksValues[includingBlocksOffset:endOffset:endOffset])
if err != nil {
return err
}
includingBlocksOffset += includingBlocksIndexKeyEntrySize
}
for _, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
var includingBlockID uint64
if blockTxsAcceptanceData.BlockHash.IsEqual(block.Hash()) {
includingBlockID = blockID
} else {
includingBlockID, err = blockdag.DBFetchBlockIDByHash(dbTx, &blockTxsAcceptanceData.BlockHash)
if err != nil {
return err
}
}
serializedIncludingBlockID := blockdag.SerializeBlockID(includingBlockID)
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
err = dbPutAcceptingBlocksEntry(dbTx, txAcceptanceData.Tx.ID(), blockID, serializedIncludingBlockID)
if err != nil {
return err
}
}
}
return nil
}
func updateTxsAcceptedByVirtual(virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// Initialize a new txsAcceptedByVirtual
entries := 0
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
entries += len(blockTxsAcceptanceData.TxAcceptanceData)
}
txsAcceptedByVirtual = make(map[daghash.TxID]bool, entries)
// Copy virtualTxsAcceptanceData to txsAcceptedByVirtual
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
txsAcceptedByVirtual[*txAcceptanceData.Tx.ID()] = true
}
}
return nil
}
// TxIndex implements a transaction by hash index. That is to say, it supports
// querying all transactions by their hash.
type TxIndex struct {
db database.DB
}
// Ensure the TxIndex type implements the Indexer interface.
var _ Indexer = (*TxIndex)(nil)
// Init initializes the hash-based transaction index. In particular, it finds
// the highest used block ID and stores it for later use when connecting or
// disconnecting blocks.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
idx.db = db
// Initialize the txsAcceptedByVirtual index
virtualTxsAcceptanceData, err := dag.TxsAcceptedByVirtual()
if err != nil {
return err
}
err = updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
if err != nil {
return err
}
return nil
}
// Key returns the database key to use for the index as a byte slice.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Key() []byte {
return includingBlocksIndexKey
}
// Name returns the human-readable name of the index.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Name() string {
return txIndexName
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the buckets for the hash-based
// transaction index and the internal block ID indexes.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Create(dbTx database.Tx) error {
meta := dbTx.Metadata()
if _, err := meta.CreateBucket(includingBlocksIndexKey); err != nil {
return err
}
_, err := meta.CreateBucket(acceptingBlocksIndexKey)
return err
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the DAG. This indexer adds a hash-to-transaction mapping
// for every transaction in the passed block.
//
// This is part of the Indexer interface.
func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
if err := dbAddTxIndexEntries(dbTx, block, blockID, acceptedTxsData); err != nil {
return err
}
err := updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
if err != nil {
return err
}
return nil
}
// TxFirstBlockRegion returns the first block region for the provided transaction hash
// from the transaction index. The block region can in turn be used to load the
// raw transaction bytes. When there is no entry for the provided hash, nil
// will be returned for the both the entry and the error.
//
// This function is safe for concurrent access.
func (idx *TxIndex) TxFirstBlockRegion(txID *daghash.TxID) (*database.BlockRegion, error) {
var region *database.BlockRegion
err := idx.db.View(func(dbTx database.Tx) error {
var err error
region, err = dbFetchFirstTxRegion(dbTx, txID)
return err
})
return region, err
}
// TxBlocks returns the hashes of the blocks where the transaction exists
func (idx *TxIndex) TxBlocks(txHash *daghash.Hash) ([]*daghash.Hash, error) {
blockHashes := make([]*daghash.Hash, 0)
err := idx.db.View(func(dbTx database.Tx) error {
var err error
blockHashes, err = dbFetchTxBlocks(dbTx, txHash)
if err != nil {
return err
}
return nil
})
return blockHashes, err
}
func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]*daghash.Hash, error) {
blockHashes := make([]*daghash.Hash, 0)
bucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txHash[:])
if bucket == nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No including blocks "+
"were found for %s", txHash),
}
}
err := bucket.ForEach(func(serializedBlockID, _ []byte) error {
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
if err != nil {
return err
}
blockHashes = append(blockHashes, blockHash)
return nil
})
if err != nil {
return nil, err
}
return blockHashes, nil
}
// BlockThatAcceptedTx returns the hash of the block where the transaction got accepted (from the virtual block point of view)
func (idx *TxIndex) BlockThatAcceptedTx(dag *blockdag.BlockDAG, txID *daghash.TxID) (*daghash.Hash, error) {
var acceptingBlock *daghash.Hash
err := idx.db.View(func(dbTx database.Tx) error {
var err error
acceptingBlock, err = dbFetchTxAcceptingBlock(dbTx, txID, dag)
return err
})
return acceptingBlock, err
}
func dbFetchTxAcceptingBlock(dbTx database.Tx, txID *daghash.TxID, dag *blockdag.BlockDAG) (*daghash.Hash, error) {
// If the transaction was accepted by the current virtual,
// return the zeroHash immediately
if _, ok := txsAcceptedByVirtual[*txID]; ok {
return &daghash.ZeroHash, nil
}
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txID[:])
if bucket == nil {
return nil, nil
}
cursor := bucket.Cursor()
if !cursor.First() {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("Accepting blocks bucket is "+
"empty for %s", txID),
}
}
for ; cursor.Key() != nil; cursor.Next() {
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, cursor.Key())
if err != nil {
return nil, err
}
if dag.IsInSelectedParentChain(blockHash) {
return blockHash, nil
}
}
return nil, nil
}
// NewTxIndex returns a new instance of an indexer that is used to create a
// mapping of the hashes of all transactions in the blockchain to the respective
// block, location within the block, and size of the transaction.
//
// It implements the Indexer interface which plugs into the IndexManager that in
// turn is used by the blockchain package. This allows the index to be
// seamlessly maintained along with the chain.
func NewTxIndex() *TxIndex {
return &TxIndex{}
}
// DropTxIndex drops the transaction index from the provided database if it
// exists. Since the address index relies on it, the address index will also be
// dropped when it exists.
func DropTxIndex(db database.DB, interrupt <-chan struct{}) error {
err := dropIndex(db, addrIndexKey, addrIndexName, interrupt)
if err != nil {
return err
}
err = dropIndex(db, includingBlocksIndexKey, addrIndexName, interrupt)
if err != nil {
return err
}
return dropIndex(db, acceptingBlocksIndexKey, txIndexName, interrupt)
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("txindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the txindex with --droptxindex", lastKnownBlockID-currentBlockID)
}

View File

@@ -1,144 +0,0 @@
package indexers
import (
"bytes"
"reflect"
"testing"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
func createTransaction(t *testing.T, value uint64, originTx *wire.MsgTx, outputIndex uint32) *wire.MsgTx {
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
if err != nil {
t.Fatalf("Error creating signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutpoint: wire.Outpoint{
TxID: *originTx.TxID(),
Index: outputIndex,
},
Sequence: wire.MaxTxInSequenceNum,
SignatureScript: signatureScript,
}
txOut := wire.NewTxOut(value, blockdag.OpTrueScript)
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
return tx
}
func TestTxIndexConnectBlock(t *testing.T) {
blocks := make(map[daghash.Hash]*util.Block)
txIndex := NewTxIndex()
indexManager := NewManager([]Indexer{txIndex})
params := dagconfig.SimNetParams
params.BlockCoinbaseMaturity = 0
params.K = 1
config := blockdag.Config{
IndexManager: indexManager,
DAGParams: &params,
}
dag, teardown, err := blockdag.DAGSetup("TestTxIndexConnectBlock", config)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Failed to setup DAG instance: %v", err)
}
if teardown != nil {
defer teardown()
}
prepareAndProcessBlock := func(parentHashes []*daghash.Hash, transactions []*wire.MsgTx, blockName string) *wire.MsgBlock {
block, err := mining.PrepareBlockForTest(dag, &params, parentHashes, transactions, false)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: block %v got unexpected error from PrepareBlockForTest: %v", blockName, err)
}
utilBlock := util.NewBlock(block)
blocks[*block.BlockHash()] = utilBlock
isOrphan, delay, err := dag.ProcessBlock(utilBlock, blockdag.BFNoPoWCheck)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: dag.ProcessBlock got unexpected error for block %v: %v", blockName, err)
}
if delay != 0 {
t.Fatalf("TestTxIndexConnectBlock: block %s "+
"is too far in the future", blockName)
}
if isOrphan {
t.Fatalf("TestTxIndexConnectBlock: block %v was unexpectedly orphan", blockName)
}
return block
}
block1 := prepareAndProcessBlock([]*daghash.Hash{params.GenesisHash}, nil, "1")
block2Tx := createTransaction(t, block1.Transactions[0].TxOut[0].Value, block1.Transactions[0], 0)
block2 := prepareAndProcessBlock([]*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{block2Tx}, "2")
block3Tx := createTransaction(t, block2.Transactions[0].TxOut[0].Value, block2.Transactions[0], 0)
block3 := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3")
block2TxID := block2Tx.TxID()
block2TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
block3Hash := block3.BlockHash()
if !block2TxNewAcceptedBlock.IsEqual(block3Hash) {
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3Hash, block2TxNewAcceptedBlock)
}
block3TxID := block3Tx.TxID()
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block3TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
if !block3TxNewAcceptedBlock.IsEqual(&daghash.ZeroHash) {
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
"been accepted by the virtual block but instead got accepted in block %v", block3TxNewAcceptedBlock)
}
block3A := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3A")
block4 := prepareAndProcessBlock([]*daghash.Hash{block3.BlockHash()}, nil, "4")
prepareAndProcessBlock([]*daghash.Hash{block3A.BlockHash(), block4.BlockHash()}, nil, "5")
block2TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
block3AHash := block3A.BlockHash()
if !block2TxAcceptedBlock.IsEqual(block3AHash) {
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3AHash, block2TxAcceptedBlock)
}
region, err := txIndex.TxFirstBlockRegion(block3TxID)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: no block region was found for block3Tx")
}
regionBlock, ok := blocks[*region.Hash]
if !ok {
t.Fatalf("TestTxIndexConnectBlock: couldn't find block with hash %v", region.Hash)
}
regionBlockBytes, err := regionBlock.Bytes()
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Couldn't serialize block to bytes")
}
block3TxInBlock := regionBlockBytes[region.Offset : region.Offset+region.Len]
block3TxBuf := bytes.NewBuffer(make([]byte, 0, block3Tx.SerializeSize()))
block3Tx.BtcEncode(block3TxBuf, 0)
blockTxBytes := block3TxBuf.Bytes()
if !reflect.DeepEqual(blockTxBytes, block3TxInBlock) {
t.Errorf("TestTxIndexConnectBlock: the block region that was in the bucket doesn't match block3Tx")
}
}

View File

@@ -1,218 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"math"
"sort"
"sync"
"time"
)
const (
// maxAllowedOffsetSeconds is the maximum number of seconds in either
// direction that local clock will be adjusted. When the median time
// of the network is outside of this range, no offset will be applied.
maxAllowedOffsetSecs = 70 * 60 // 1 hour 10 minutes
// similarTimeSecs is the number of seconds in either direction from the
// local clock that is used to determine that it is likley wrong and
// hence to show a warning.
similarTimeSecs = 5 * 60 // 5 minutes
)
var (
// maxMedianTimeEntries is the maximum number of entries allowed in the
// median time data. This is a variable as opposed to a constant so the
// test code can modify it.
maxMedianTimeEntries = 200
)
// MedianTimeSource provides a mechanism to add several time samples which are
// used to determine a median time which is then used as an offset to the local
// clock.
type MedianTimeSource interface {
// AdjustedTime returns the current time adjusted by the median time
// offset as calculated from the time samples added by AddTimeSample.
AdjustedTime() time.Time
// AddTimeSample adds a time sample that is used when determining the
// median time of the added samples.
AddTimeSample(id string, timeVal time.Time)
// Offset returns the number of seconds to adjust the local clock based
// upon the median of the time samples added by AddTimeData.
Offset() time.Duration
}
// int64Sorter implements sort.Interface to allow a slice of 64-bit integers to
// be sorted.
type int64Sorter []int64
// Len returns the number of 64-bit integers in the slice. It is part of the
// sort.Interface implementation.
func (s int64Sorter) Len() int {
return len(s)
}
// Swap swaps the 64-bit integers at the passed indices. It is part of the
// sort.Interface implementation.
func (s int64Sorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less returns whether the 64-bit integer with index i should sort before the
// 64-bit integer with index j. It is part of the sort.Interface
// implementation.
func (s int64Sorter) Less(i, j int) bool {
return s[i] < s[j]
}
// medianTime provides an implementation of the MedianTimeSource interface.
// It is limited to maxMedianTimeEntries includes the same buggy behavior as
// the time offset mechanism in Bitcoin Core. This is necessary because it is
// used in the consensus code.
type medianTime struct {
mtx sync.Mutex
knownIDs map[string]struct{}
offsets []int64
offsetSecs int64
invalidTimeChecked bool
}
// Ensure the medianTime type implements the MedianTimeSource interface.
var _ MedianTimeSource = (*medianTime)(nil)
// AdjustedTime returns the current time adjusted by the median time offset as
// calculated from the time samples added by AddTimeSample.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) AdjustedTime() time.Time {
m.mtx.Lock()
defer m.mtx.Unlock()
// Limit the adjusted time to 1 second precision.
now := time.Unix(time.Now().Unix(), 0)
return now.Add(time.Duration(m.offsetSecs) * time.Second)
}
// AddTimeSample adds a time sample that is used when determining the median
// time of the added samples.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) AddTimeSample(sourceID string, timeVal time.Time) {
m.mtx.Lock()
defer m.mtx.Unlock()
// Don't add time data from the same source.
if _, exists := m.knownIDs[sourceID]; exists {
return
}
m.knownIDs[sourceID] = struct{}{}
// Truncate the provided offset to seconds and append it to the slice
// of offsets while respecting the maximum number of allowed entries by
// replacing the oldest entry with the new entry once the maximum number
// of entries is reached.
now := time.Unix(time.Now().Unix(), 0)
offsetSecs := int64(timeVal.Sub(now).Seconds())
numOffsets := len(m.offsets)
if numOffsets == maxMedianTimeEntries && maxMedianTimeEntries > 0 {
m.offsets = m.offsets[1:]
numOffsets--
}
m.offsets = append(m.offsets, offsetSecs)
numOffsets++
// Sort the offsets so the median can be obtained as needed later.
sortedOffsets := make([]int64, numOffsets)
copy(sortedOffsets, m.offsets)
sort.Sort(int64Sorter(sortedOffsets))
offsetDuration := time.Duration(offsetSecs) * time.Second
log.Debugf("Added time sample of %s (total: %d)", offsetDuration,
numOffsets)
// NOTE: The following code intentionally has a bug to mirror the
// buggy behavior in Bitcoin Core since the median time is used in the
// consensus rules.
//
// In particular, the offset is only updated when the number of entries
// is odd, but the max number of entries is 200, an even number. Thus,
// the offset will never be updated again once the max number of entries
// is reached.
// The median offset is only updated when there are enough offsets and
// the number of offsets is odd so the middle value is the true median.
// Thus, there is nothing to do when those conditions are not met.
if numOffsets < 5 || numOffsets&0x01 != 1 {
return
}
// At this point the number of offsets in the list is odd, so the
// middle value of the sorted offsets is the median.
median := sortedOffsets[numOffsets/2]
// Set the new offset when the median offset is within the allowed
// offset range.
if math.Abs(float64(median)) < maxAllowedOffsetSecs {
m.offsetSecs = median
} else {
// The median offset of all added time data is larger than the
// maximum allowed offset, so don't use an offset. This
// effectively limits how far the local clock can be skewed.
m.offsetSecs = 0
if !m.invalidTimeChecked {
m.invalidTimeChecked = true
// Find if any time samples have a time that is close
// to the local time.
var remoteHasCloseTime bool
for _, offset := range sortedOffsets {
if math.Abs(float64(offset)) < similarTimeSecs {
remoteHasCloseTime = true
break
}
}
// Warn if none of the time samples are close.
if !remoteHasCloseTime {
log.Warnf("Please check your date and time " +
"are correct! btcd will not work " +
"properly with an invalid time")
}
}
}
medianDuration := time.Duration(m.offsetSecs) * time.Second
log.Debugf("New time offset: %d", medianDuration)
}
// Offset returns the number of seconds to adjust the local clock based upon the
// median of the time samples added by AddTimeData.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) Offset() time.Duration {
m.mtx.Lock()
defer m.mtx.Unlock()
return time.Duration(m.offsetSecs) * time.Second
}
// NewMedianTime returns a new instance of concurrency-safe implementation of
// the MedianTimeSource interface. The returned implementation contains the
// rules necessary for proper time handling in the chain consensus rules and
// expects the time samples to be added from the timestamp field of the version
// message received from remote peers that successfully connect and negotiate.
func NewMedianTime() MedianTimeSource {
return &medianTime{
knownIDs: make(map[string]struct{}),
offsets: make([]int64, 0, maxMedianTimeEntries),
}
}

View File

@@ -1,104 +0,0 @@
// Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"strconv"
"testing"
"time"
)
// TestMedianTime tests the medianTime implementation.
func TestMedianTime(t *testing.T) {
tests := []struct {
in []int64
wantOffset int64
useDupID bool
}{
// Not enough samples must result in an offset of 0.
{in: []int64{1}, wantOffset: 0},
{in: []int64{1, 2}, wantOffset: 0},
{in: []int64{1, 2, 3}, wantOffset: 0},
{in: []int64{1, 2, 3, 4}, wantOffset: 0},
// Various number of entries. The expected offset is only
// updated on odd number of elements.
{in: []int64{-13, 57, -4, -23, -12}, wantOffset: -12},
{in: []int64{55, -13, 61, -52, 39, 55}, wantOffset: 39},
{in: []int64{-62, -58, -30, -62, 51, -30, 15}, wantOffset: -30},
{in: []int64{29, -47, 39, 54, 42, 41, 8, -33}, wantOffset: 39},
{in: []int64{37, 54, 9, -21, -56, -36, 5, -11, -39}, wantOffset: -11},
{in: []int64{57, -28, 25, -39, 9, 63, -16, 19, -60, 25}, wantOffset: 9},
{in: []int64{-5, -4, -3, -2, -1}, wantOffset: -3, useDupID: true},
// The offset stops being updated once the max number of entries
// has been reached. This is actually a bug from Bitcoin Core,
// but since the time is ultimately used as a part of the
// consensus rules, it must be mirrored.
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52}, wantOffset: 17},
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45}, wantOffset: 17},
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45, 4}, wantOffset: 17},
// Offsets that are too far away from the local time should
// be ignored.
{in: []int64{-4201, 4202, -4203, 4204, -4205}, wantOffset: 0},
// Exercise the condition where the median offset is greater
// than the max allowed adjustment, but there is at least one
// sample that is close enough to the current time to avoid
// triggering a warning about an invalid local clock.
{in: []int64{4201, 4202, 4203, 4204, -299}, wantOffset: 0},
}
// Modify the max number of allowed median time entries for these tests.
maxMedianTimeEntries = 10
defer func() { maxMedianTimeEntries = 200 }()
for i, test := range tests {
filter := NewMedianTime()
for j, offset := range test.in {
id := strconv.Itoa(j)
now := time.Unix(time.Now().Unix(), 0)
tOffset := now.Add(time.Duration(offset) * time.Second)
filter.AddTimeSample(id, tOffset)
// Ensure the duplicate IDs are ignored.
if test.useDupID {
// Modify the offsets to ensure the final median
// would be different if the duplicate is added.
tOffset = tOffset.Add(time.Duration(offset) *
time.Second)
filter.AddTimeSample(id, tOffset)
}
}
// Since it is possible that the time.Now call in AddTimeSample
// and the time.Now calls here in the tests will be off by one
// second, allow a fudge factor to compensate.
gotOffset := filter.Offset()
wantOffset := time.Duration(test.wantOffset) * time.Second
wantOffset2 := time.Duration(test.wantOffset-1) * time.Second
if gotOffset != wantOffset && gotOffset != wantOffset2 {
t.Errorf("Offset #%d: unexpected offset -- got %v, "+
"want %v or %v", i, gotOffset, wantOffset,
wantOffset2)
continue
}
// Since it is possible that the time.Now call in AdjustedTime
// and the time.Now call here in the tests will be off by one
// second, allow a fudge factor to compensate.
adjustedTime := filter.AdjustedTime()
now := time.Unix(time.Now().Unix(), 0)
wantTime := now.Add(filter.Offset())
wantTime2 := now.Add(filter.Offset() - time.Second)
if !adjustedTime.Equal(wantTime) && !adjustedTime.Equal(wantTime2) {
t.Errorf("AdjustedTime #%d: unexpected result -- got %v, "+
"want %v or %v", i, adjustedTime, wantTime,
wantTime2)
continue
}
}
}

View File

@@ -1,105 +0,0 @@
package blockdag
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// phantom calculates and returns the block's blue set, selected parent and blue score.
// Chain start is determined by going down the DAG through the selected path
// (follow the selected parent of each block) k + 1 steps.
// The blue set of a block are all blue blocks in its past.
// To optimize memory usage, for each block we are storing only the blue blocks in
// its selected parent's anticone that are in the future of the chain start
// as well as the selected parent itself - the rest of the
// blue set can be restored by traversing the selected parent chain and combining
// the .blues of all blocks in it.
// The blue score is the total number of blocks in this block's blue set
// of the selected parent. (the blue score of the genesis block is defined as 0)
// The selected parent is chosen by determining which block's parent will give this block the highest blue score.
func phantom(block *blockNode, k uint32) (blues []*blockNode, selectedParent *blockNode, score uint64) {
bestScore := uint64(0)
var bestParent *blockNode
var bestBlues []*blockNode
var bestHash *daghash.Hash
for _, parent := range block.parents {
chainStart := digToChainStart(parent, k)
candidates := blueCandidates(chainStart)
blues := traverseCandidates(block, candidates, parent)
score := uint64(len(blues)) + parent.blueScore
if score > bestScore || (score == bestScore && (bestHash == nil || daghash.Less(parent.hash, bestHash))) {
bestScore = score
bestBlues = blues
bestParent = parent
bestHash = parent.hash
}
}
return bestBlues, bestParent, bestScore
}
// digToChainStart digs through the selected path and returns the block in depth k+1
func digToChainStart(parent *blockNode, k uint32) *blockNode {
current := parent
for i := uint32(0); i < k; i++ {
if current.isGenesis() {
break
}
current = current.selectedParent
}
return current
}
func blueCandidates(chainStart *blockNode) blockSet {
candidates := newSet()
candidates.add(chainStart)
queue := []*blockNode{chainStart}
for len(queue) > 0 {
var current *blockNode
current, queue = queue[0], queue[1:]
children := current.children
for _, child := range children {
if !candidates.contains(child) {
candidates.add(child)
queue = append(queue, child)
}
}
}
return candidates
}
//traverseCandidates returns all the blocks that are in the future of the chain start and in the anticone of the selected parent
func traverseCandidates(newBlock *blockNode, candidates blockSet, selectedParent *blockNode) []*blockNode {
blues := []*blockNode{}
selectedParentPast := newSet()
queue := newDownHeap()
visited := newSet()
for _, parent := range newBlock.parents {
queue.Push(parent)
}
for queue.Len() > 0 {
current := queue.pop()
if candidates.contains(current) {
if current == selectedParent || selectedParentPast.anyChildInSet(current) {
selectedParentPast.add(current)
} else {
blues = append(blues, current)
}
for _, parent := range current.parents {
if !visited.contains(parent) {
visited.add(parent)
queue.Push(parent)
}
}
}
}
return append(blues, selectedParent)
}

View File

@@ -1,892 +0,0 @@
package blockdag
import (
"fmt"
"reflect"
"sort"
"testing"
"time"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/dagconfig"
)
type testBlockData struct {
parents []string
id string //id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash
expectedScore uint64
expectedSelectedParent string
expectedBlues []string
}
//TestPhantom iterate over several dag simulations, and checks
//that the blue score, blue set and selected parent of each
//block calculated as expected
func TestPhantom(t *testing.T) {
netParams := dagconfig.SimNetParams
blockVersion := int32(0x10000000)
tests := []struct {
k uint32
dagData []*testBlockData
virtualBlockID string
expectedReds []string
}{
{
//Block hash order:AKJIHGFEDCB
k: 1,
virtualBlockID: "K",
expectedReds: []string{"D"},
dagData: []*testBlockData{
{
parents: []string{"A"},
id: "B",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"A"},
id: "C",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"B"},
id: "D",
expectedScore: 2,
expectedSelectedParent: "B",
expectedBlues: []string{"B"},
},
{
parents: []string{"B"},
id: "E",
expectedScore: 2,
expectedSelectedParent: "B",
expectedBlues: []string{"B"},
},
{
parents: []string{"C"},
id: "F",
expectedScore: 2,
expectedSelectedParent: "C",
expectedBlues: []string{"C"},
},
{
parents: []string{"C", "D"},
id: "G",
expectedScore: 4,
expectedSelectedParent: "C",
expectedBlues: []string{"D", "B", "C"},
},
{
parents: []string{"C", "E"},
id: "H",
expectedScore: 4,
expectedSelectedParent: "C",
expectedBlues: []string{"E", "B", "C"},
},
{
parents: []string{"E", "G"},
id: "I",
expectedScore: 5,
expectedSelectedParent: "E",
expectedBlues: []string{"G", "D", "E"},
},
{
parents: []string{"F"},
id: "J",
expectedScore: 3,
expectedSelectedParent: "F",
expectedBlues: []string{"F"},
},
{
parents: []string{"H", "I", "J"},
id: "K",
expectedScore: 9,
expectedSelectedParent: "H",
expectedBlues: []string{"I", "G", "J", "F", "H"},
},
},
},
{
//block hash order:AVUTSRQPONMLKJIHGFEDCB
k: 2,
virtualBlockID: "V",
expectedReds: []string{"D", "J", "P"},
dagData: []*testBlockData{
{
parents: []string{"A"},
id: "B",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"A"},
id: "C",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"B"},
id: "D",
expectedScore: 2,
expectedSelectedParent: "B",
expectedBlues: []string{"B"},
},
{
parents: []string{"B"},
id: "E",
expectedScore: 2,
expectedSelectedParent: "B",
expectedBlues: []string{"B"},
},
{
parents: []string{"C"},
id: "F",
expectedScore: 2,
expectedSelectedParent: "C",
expectedBlues: []string{"C"},
},
{
parents: []string{"C"},
id: "G",
expectedScore: 2,
expectedSelectedParent: "C",
expectedBlues: []string{"C"},
},
{
parents: []string{"G"},
id: "H",
expectedScore: 3,
expectedSelectedParent: "G",
expectedBlues: []string{"G"},
},
{
parents: []string{"E"},
id: "I",
expectedScore: 3,
expectedSelectedParent: "E",
expectedBlues: []string{"E"},
},
{
parents: []string{"E"},
id: "J",
expectedScore: 3,
expectedSelectedParent: "E",
expectedBlues: []string{"E"},
},
{
parents: []string{"I"},
id: "K",
expectedScore: 4,
expectedSelectedParent: "I",
expectedBlues: []string{"I"},
},
{
parents: []string{"K", "H"},
id: "L",
expectedScore: 5,
expectedSelectedParent: "K",
expectedBlues: []string{"K"},
},
{
parents: []string{"F", "L"},
id: "M",
expectedScore: 10,
expectedSelectedParent: "F",
expectedBlues: []string{"L", "K", "I", "H", "G", "E", "B", "F"},
},
{
parents: []string{"G", "K"},
id: "N",
expectedScore: 7,
expectedSelectedParent: "G",
expectedBlues: []string{"K", "I", "E", "B", "G"},
},
{
parents: []string{"J", "N"},
id: "O",
expectedScore: 8,
expectedSelectedParent: "N",
expectedBlues: []string{"N"},
},
{
parents: []string{"D"},
id: "P",
expectedScore: 3,
expectedSelectedParent: "D",
expectedBlues: []string{"D"},
},
{
parents: []string{"O", "P"},
id: "Q",
expectedScore: 10,
expectedSelectedParent: "P",
expectedBlues: []string{"O", "N", "K", "J", "I", "E", "P"},
},
{
parents: []string{"L", "Q"},
id: "R",
expectedScore: 11,
expectedSelectedParent: "Q",
expectedBlues: []string{"Q"},
},
{
parents: []string{"M", "R"},
id: "S",
expectedScore: 15,
expectedSelectedParent: "M",
expectedBlues: []string{"R", "Q", "O", "N", "M"},
},
{
parents: []string{"H", "F"},
id: "T",
expectedScore: 5,
expectedSelectedParent: "F",
expectedBlues: []string{"H", "G", "F"},
},
{
parents: []string{"M", "T"},
id: "U",
expectedScore: 12,
expectedSelectedParent: "M",
expectedBlues: []string{"T", "M"},
},
{
parents: []string{"S", "U"},
id: "V",
expectedScore: 18,
expectedSelectedParent: "S",
expectedBlues: []string{"U", "T", "S"},
},
},
},
{
//Block hash order:AXWVUTSRQPONMLKJIHGFEDCB
k: 1,
virtualBlockID: "X",
expectedReds: []string{"D", "F", "G", "H", "J", "K", "L", "N", "O", "Q", "R", "S", "U", "V"},
dagData: []*testBlockData{
{
parents: []string{"A"},
id: "B",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"A"},
id: "C",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"A"},
id: "D",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"A"},
id: "E",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"B"},
id: "F",
expectedScore: 2,
expectedSelectedParent: "B",
expectedBlues: []string{"B"},
},
{
parents: []string{"B"},
id: "G",
expectedScore: 2,
expectedSelectedParent: "B",
expectedBlues: []string{"B"},
},
{
parents: []string{"C"},
id: "H",
expectedScore: 2,
expectedSelectedParent: "C",
expectedBlues: []string{"C"},
},
{
parents: []string{"C"},
id: "I",
expectedScore: 2,
expectedSelectedParent: "C",
expectedBlues: []string{"C"},
},
{
parents: []string{"B"},
id: "J",
expectedScore: 2,
expectedSelectedParent: "B",
expectedBlues: []string{"B"},
},
{
parents: []string{"D"},
id: "K",
expectedScore: 2,
expectedSelectedParent: "D",
expectedBlues: []string{"D"},
},
{
parents: []string{"D"},
id: "L",
expectedScore: 2,
expectedSelectedParent: "D",
expectedBlues: []string{"D"},
},
{
parents: []string{"E"},
id: "M",
expectedScore: 2,
expectedSelectedParent: "E",
expectedBlues: []string{"E"},
},
{
parents: []string{"E"},
id: "N",
expectedScore: 2,
expectedSelectedParent: "E",
expectedBlues: []string{"E"},
},
{
parents: []string{"F", "G", "J"},
id: "O",
expectedScore: 5,
expectedSelectedParent: "F",
expectedBlues: []string{"J", "G", "F"},
},
{
parents: []string{"B", "M", "I"},
id: "P",
expectedScore: 6,
expectedSelectedParent: "B",
expectedBlues: []string{"M", "I", "E", "C", "B"},
},
{
parents: []string{"K", "E"},
id: "Q",
expectedScore: 4,
expectedSelectedParent: "E",
expectedBlues: []string{"K", "D", "E"},
},
{
parents: []string{"L", "N"},
id: "R",
expectedScore: 3,
expectedSelectedParent: "L",
expectedBlues: []string{"L"},
},
{
parents: []string{"I", "Q"},
id: "S",
expectedScore: 5,
expectedSelectedParent: "Q",
expectedBlues: []string{"Q"},
},
{
parents: []string{"K", "P"},
id: "T",
expectedScore: 7,
expectedSelectedParent: "P",
expectedBlues: []string{"P"},
},
{
parents: []string{"K", "L"},
id: "U",
expectedScore: 4,
expectedSelectedParent: "K",
expectedBlues: []string{"L", "K"},
},
{
parents: []string{"U", "R"},
id: "V",
expectedScore: 5,
expectedSelectedParent: "R",
expectedBlues: []string{"U", "R"},
},
{
parents: []string{"S", "U", "T"},
id: "W",
expectedScore: 8,
expectedSelectedParent: "T",
expectedBlues: []string{"T"},
},
{
parents: []string{"V", "W", "H"},
id: "X",
expectedScore: 9,
expectedSelectedParent: "W",
expectedBlues: []string{"W"},
},
},
},
{
//Secret mining attack: The attacker is mining
//blocks B,C,D,E,F,G,T in secret without propagating
//them, so all blocks except T should be red, because
//they don't follow the rules of PHANTOM that require
//you to point to all the parents that you know, and
//propagate your block as soon as it's mined
//Block hash order:AYXWVUTSRQPONMLKJIHGFEDCB
k: 1,
virtualBlockID: "Y",
expectedReds: []string{"B", "C", "D", "E", "F", "G", "L"},
dagData: []*testBlockData{
{
parents: []string{"A"},
id: "B",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"B"},
id: "C",
expectedScore: 2,
expectedSelectedParent: "B",
expectedBlues: []string{"B"},
},
{
parents: []string{"C"},
id: "D",
expectedScore: 3,
expectedSelectedParent: "C",
expectedBlues: []string{"C"},
},
{
parents: []string{"D"},
id: "E",
expectedScore: 4,
expectedSelectedParent: "D",
expectedBlues: []string{"D"},
},
{
parents: []string{"E"},
id: "F",
expectedScore: 5,
expectedSelectedParent: "E",
expectedBlues: []string{"E"},
},
{
parents: []string{"F"},
id: "G",
expectedScore: 6,
expectedSelectedParent: "F",
expectedBlues: []string{"F"},
},
{
parents: []string{"A"},
id: "H",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"A"},
id: "I",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"H", "I"},
id: "J",
expectedScore: 3,
expectedSelectedParent: "H",
expectedBlues: []string{"I", "H"},
},
{
parents: []string{"H", "I"},
id: "K",
expectedScore: 3,
expectedSelectedParent: "H",
expectedBlues: []string{"I", "H"},
},
{
parents: []string{"I"},
id: "L",
expectedScore: 2,
expectedSelectedParent: "I",
expectedBlues: []string{"I"},
},
{
parents: []string{"J", "K", "L"},
id: "M",
expectedScore: 5,
expectedSelectedParent: "J",
expectedBlues: []string{"K", "J"},
},
{
parents: []string{"J", "K", "L"},
id: "N",
expectedScore: 5,
expectedSelectedParent: "J",
expectedBlues: []string{"K", "J"},
},
{
parents: []string{"N", "M"},
id: "O",
expectedScore: 7,
expectedSelectedParent: "M",
expectedBlues: []string{"N", "M"},
},
{
parents: []string{"N", "M"},
id: "P",
expectedScore: 7,
expectedSelectedParent: "M",
expectedBlues: []string{"N", "M"},
},
{
parents: []string{"N", "M"},
id: "Q",
expectedScore: 7,
expectedSelectedParent: "M",
expectedBlues: []string{"N", "M"},
},
{
parents: []string{"O", "P", "Q"},
id: "R",
expectedScore: 10,
expectedSelectedParent: "O",
expectedBlues: []string{"Q", "P", "O"},
},
{
parents: []string{"O", "P", "Q"},
id: "S",
expectedScore: 10,
expectedSelectedParent: "O",
expectedBlues: []string{"Q", "P", "O"},
},
{
parents: []string{"G", "S", "R"},
id: "T",
expectedScore: 12,
expectedSelectedParent: "R",
expectedBlues: []string{"S", "R"},
},
{
parents: []string{"S", "R"},
id: "U",
expectedScore: 12,
expectedSelectedParent: "R",
expectedBlues: []string{"S", "R"},
},
{
parents: []string{"T", "U"},
id: "V",
expectedScore: 14,
expectedSelectedParent: "T",
expectedBlues: []string{"U", "T"},
},
{
parents: []string{"T", "U"},
id: "W",
expectedScore: 14,
expectedSelectedParent: "T",
expectedBlues: []string{"U", "T"},
},
{
parents: []string{"U", "T"},
id: "X",
expectedScore: 14,
expectedSelectedParent: "T",
expectedBlues: []string{"U", "T"},
},
{
parents: []string{"V", "W", "X"},
id: "Y",
expectedScore: 17,
expectedSelectedParent: "V",
expectedBlues: []string{"X", "W", "V"},
},
},
},
{
//Censorship mining attack: The attacker is mining blocks B,C,D,E,F,G in secret without propagating them,
//so all blocks except B and C should be red, because they don't follow the rules of
//PHANTOM that require you to point to all the parents that you know
//Block hash order:AYXWVUTSRQPONMLKJIHGFEDCB
k: 1,
virtualBlockID: "Y",
expectedReds: []string{"D", "E", "F", "G", "L"},
dagData: []*testBlockData{
{
parents: []string{"A"},
id: "B",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"B"},
id: "C",
expectedScore: 2,
expectedSelectedParent: "B",
expectedBlues: []string{"B"},
},
{
parents: []string{"C"},
id: "D",
expectedScore: 3,
expectedSelectedParent: "C",
expectedBlues: []string{"C"},
},
{
parents: []string{"D"},
id: "E",
expectedScore: 4,
expectedSelectedParent: "D",
expectedBlues: []string{"D"},
},
{
parents: []string{"E"},
id: "F",
expectedScore: 5,
expectedSelectedParent: "E",
expectedBlues: []string{"E"},
},
{
parents: []string{"F"},
id: "G",
expectedScore: 6,
expectedSelectedParent: "F",
expectedBlues: []string{"F"},
},
{
parents: []string{"A"},
id: "H",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"A"},
id: "I",
expectedScore: 1,
expectedSelectedParent: "A",
expectedBlues: []string{"A"},
},
{
parents: []string{"H", "I", "B"},
id: "J",
expectedScore: 4,
expectedSelectedParent: "B",
expectedBlues: []string{"I", "H", "B"},
},
{
parents: []string{"H", "I", "B"},
id: "K",
expectedScore: 4,
expectedSelectedParent: "B",
expectedBlues: []string{"I", "H", "B"},
},
{
parents: []string{"I"},
id: "L",
expectedScore: 2,
expectedSelectedParent: "I",
expectedBlues: []string{"I"},
},
{
parents: []string{"J", "K", "L", "C"},
id: "M",
expectedScore: 7,
expectedSelectedParent: "J",
expectedBlues: []string{"K", "C", "J"},
},
{
parents: []string{"J", "K", "L", "C"},
id: "N",
expectedScore: 7,
expectedSelectedParent: "J",
expectedBlues: []string{"K", "C", "J"},
},
{
parents: []string{"N", "M", "D"},
id: "O",
expectedScore: 9,
expectedSelectedParent: "M",
expectedBlues: []string{"N", "M"},
},
{
parents: []string{"N", "M", "D"},
id: "P",
expectedScore: 9,
expectedSelectedParent: "M",
expectedBlues: []string{"N", "M"},
},
{
parents: []string{"N", "M", "D"},
id: "Q",
expectedScore: 9,
expectedSelectedParent: "M",
expectedBlues: []string{"N", "M"},
},
{
parents: []string{"O", "P", "Q", "E"},
id: "R",
expectedScore: 12,
expectedSelectedParent: "O",
expectedBlues: []string{"Q", "P", "O"},
},
{
parents: []string{"O", "P", "Q", "E"},
id: "S",
expectedScore: 12,
expectedSelectedParent: "O",
expectedBlues: []string{"Q", "P", "O"},
},
{
parents: []string{"G", "S", "R"},
id: "T",
expectedScore: 14,
expectedSelectedParent: "R",
expectedBlues: []string{"S", "R"},
},
{
parents: []string{"S", "R", "F"},
id: "U",
expectedScore: 14,
expectedSelectedParent: "R",
expectedBlues: []string{"S", "R"},
},
{
parents: []string{"T", "U"},
id: "V",
expectedScore: 16,
expectedSelectedParent: "T",
expectedBlues: []string{"U", "T"},
},
{
parents: []string{"T", "U"},
id: "W",
expectedScore: 16,
expectedSelectedParent: "T",
expectedBlues: []string{"U", "T"},
},
{
parents: []string{"T", "U"},
id: "X",
expectedScore: 16,
expectedSelectedParent: "T",
expectedBlues: []string{"U", "T"},
},
{
parents: []string{"V", "W", "X"},
id: "Y",
expectedScore: 19,
expectedSelectedParent: "V",
expectedBlues: []string{"X", "W", "V"},
},
},
},
}
for i, test := range tests {
netParams.K = test.k
// Generate enough synthetic blocks for the rest of the test
blockDAG := newTestDAG(&netParams)
genesisNode := blockDAG.genesis
blockTime := genesisNode.Header().Timestamp
blockByIDMap := make(map[string]*blockNode)
idByBlockMap := make(map[*blockNode]string)
blockByIDMap["A"] = genesisNode
idByBlockMap[genesisNode] = "A"
for _, blockData := range test.dagData {
blockTime = blockTime.Add(time.Second)
parents := blockSet{}
for _, parentID := range blockData.parents {
parent := blockByIDMap[parentID]
parents.add(parent)
}
node := newTestNode(parents, blockVersion, 0, blockTime, test.k)
node.hash = &daghash.Hash{} //It helps to predict hash order
for i, char := range blockData.id {
node.hash[i] = byte(char)
}
blockDAG.index.AddNode(node)
addNodeAsChildToParents(node)
blockByIDMap[blockData.id] = node
idByBlockMap[node] = blockData.id
bluesIDs := make([]string, 0, len(node.blues))
for _, blue := range node.blues {
bluesIDs = append(bluesIDs, idByBlockMap[blue])
}
selectedParentID := idByBlockMap[node.selectedParent]
fullDataStr := fmt.Sprintf("blues: %v, selectedParent: %v, score: %v",
bluesIDs, selectedParentID, node.blueScore)
if blockData.expectedScore != node.blueScore {
t.Errorf("Test %d: Block %v expected to have score %v but got %v (fulldata: %v)",
i, blockData.id, blockData.expectedScore, node.blueScore, fullDataStr)
}
if blockData.expectedSelectedParent != selectedParentID {
t.Errorf("Test %d: Block %v expected to have selected parent %v but got %v (fulldata: %v)",
i, blockData.id, blockData.expectedSelectedParent, selectedParentID, fullDataStr)
}
if !reflect.DeepEqual(blockData.expectedBlues, bluesIDs) {
t.Errorf("Test %d: Block %v expected to have blues %v but got %v (fulldata: %v)",
i, blockData.id, blockData.expectedBlues, bluesIDs, fullDataStr)
}
}
reds := make(map[string]bool)
for id := range blockByIDMap {
reds[id] = true
}
for tip := blockByIDMap[test.virtualBlockID]; tip.selectedParent != nil; tip = tip.selectedParent {
tipID := idByBlockMap[tip]
delete(reds, tipID)
for _, blue := range tip.blues {
blueID := idByBlockMap[blue]
delete(reds, blueID)
}
}
if !checkReds(test.expectedReds, reds) {
redsIDs := make([]string, 0, len(reds))
for id := range reds {
redsIDs = append(redsIDs, id)
}
sort.Strings(redsIDs)
sort.Strings(test.expectedReds)
t.Errorf("Test %d: Expected reds %v but got %v", i, test.expectedReds, redsIDs)
}
}
}
func checkReds(expectedReds []string, reds map[string]bool) bool {
if len(expectedReds) != len(reds) {
return false
}
for _, redID := range expectedReds {
if !reds[redID] {
return false
}
}
return true
}

View File

@@ -1,219 +0,0 @@
// Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"fmt"
"time"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
)
// BehaviorFlags is a bitmask defining tweaks to the normal behavior when
// performing DAG processing and consensus rules checks.
type BehaviorFlags uint32
const (
// BFFastAdd may be set to indicate that several checks can be avoided
// for the block since it is already known to fit into the chain due to
// already proving it correct links into the chain up to a known
// checkpoint. This is primarily used for headers-first mode.
BFFastAdd BehaviorFlags = 1 << iota
// BFNoPoWCheck may be set to indicate the proof of work check which
// ensures a block hashes to a value less than the required target will
// not be performed.
BFNoPoWCheck
// BFWasUnorphaned may be set to indicate that a block was just now
// unorphaned
BFWasUnorphaned
// BFAfterDelay may be set to indicate that a block had timestamp too far
// in the future, just finished the delay
BFAfterDelay
// BFIsSync may be set to indicate that the block was sent as part of the
// netsync process
BFIsSync
// BFWasStored is set to indicate that the block was previously stored
// in the block index but was never fully processed
BFWasStored
// BFNone is a convenience value to specifically indicate no flags.
BFNone BehaviorFlags = 0
)
// BlockExists determines whether a block with the given hash exists in
// the DAG.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) BlockExists(hash *daghash.Hash) bool {
return dag.index.HaveBlock(hash)
}
// processOrphans determines if there are any orphans which depend on the passed
// block hash (they are no longer orphans if true) and potentially accepts them.
// It repeats the process for the newly accepted blocks (to detect further
// orphans which may no longer be orphans) until there are no more.
//
// The flags do not modify the behavior of this function directly, however they
// are needed to pass along to maybeAcceptBlock.
//
// This function MUST be called with the chain state lock held (for writes).
func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) error {
// Start with processing at least the passed hash. Leave a little room
// for additional orphan blocks that need to be processed without
// needing to grow the array in the common case.
processHashes := make([]*daghash.Hash, 0, 10)
processHashes = append(processHashes, hash)
for len(processHashes) > 0 {
// Pop the first hash to process from the slice.
processHash := processHashes[0]
processHashes[0] = nil // Prevent GC leak.
processHashes = processHashes[1:]
// Look up all orphans that are parented by the block we just
// accepted. This will typically only be one, but it could
// be multiple if multiple blocks are mined and broadcast
// around the same time. The one with the most proof of work
// will eventually win out. An indexing for loop is
// intentionally used over a range here as range does not
// reevaluate the slice on each iteration nor does it adjust the
// index for the modified slice.
for i := 0; i < len(dag.prevOrphans[*processHash]); i++ {
orphan := dag.prevOrphans[*processHash][i]
if orphan == nil {
log.Warnf("Found a nil entry at index %d in the "+
"orphan dependency list for block %s", i,
processHash)
continue
}
// Skip this orphan if one or more of its parents are
// still missing.
_, err := lookupParentNodes(orphan.block, dag)
if err != nil {
if ruleErr, ok := err.(RuleError); ok && ruleErr.ErrorCode == ErrParentBlockUnknown {
continue
}
return err
}
// Remove the orphan from the orphan pool.
orphanHash := orphan.block.Hash()
dag.removeOrphanBlock(orphan)
i--
// Potentially accept the block into the block DAG.
err = dag.maybeAcceptBlock(orphan.block, flags|BFWasUnorphaned)
if err != nil {
// Since we don't want to reject the original block because of
// a bad unorphaned child, only return an error if it's not a RuleError.
if _, ok := err.(RuleError); !ok {
return err
}
log.Warnf("Verification failed for orphan block %s: %s", orphanHash, err)
}
// Add this block to the list of blocks to process so
// any orphan blocks that depend on this block are
// handled too.
processHashes = append(processHashes, orphanHash)
}
}
return nil
}
// ProcessBlock is the main workhorse for handling insertion of new blocks into
// the block chain. It includes functionality such as rejecting duplicate
// blocks, ensuring blocks follow all rules, orphan handling, and insertion into
// the block DAG.
//
// When no errors occurred during processing, the first return value indicates
// whether or not the block is an orphan.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) ProcessBlock(block *util.Block, flags BehaviorFlags) (isOrphan bool, delay time.Duration, err error) {
dag.dagLock.Lock()
defer dag.dagLock.Unlock()
isDelayedBlock := flags&BFAfterDelay == BFAfterDelay
wasBlockStored := flags&BFWasStored == BFWasStored
blockHash := block.Hash()
log.Tracef("Processing block %s", blockHash)
// The block must not already exist in the DAG.
if dag.BlockExists(blockHash) && !wasBlockStored {
str := fmt.Sprintf("already have block %s", blockHash)
return false, 0, ruleError(ErrDuplicateBlock, str)
}
// The block must not already exist as an orphan.
if _, exists := dag.orphans[*blockHash]; exists {
str := fmt.Sprintf("already have block (orphan) %s", blockHash)
return false, 0, ruleError(ErrDuplicateBlock, str)
}
if !isDelayedBlock {
// Perform preliminary sanity checks on the block and its transactions.
delay, err := dag.checkBlockSanity(block, flags)
if err != nil {
return false, 0, err
}
if delay != 0 {
return false, delay, err
}
}
// Handle orphan blocks.
allParentsExist := true
for _, parentHash := range block.MsgBlock().Header.ParentHashes {
if !dag.BlockExists(parentHash) {
allParentsExist = false
}
}
if !allParentsExist {
// Some orphans during netsync are a normal part of the process, since the anticone
// of the chain-split is never explicitly requested.
// Therefore, if we are during netsync - don't report orphans to default logs.
//
// The number K*2 was chosen since in peace times anticone is limited to K blocks,
// while some red block can make it a bit bigger, but much more than that indicates
// there might be some problem with the netsync process.
if flags&BFIsSync == BFIsSync && uint32(len(dag.orphans)) < dag.dagParams.K*2 {
log.Debugf("Adding orphan block %s. This is normal part of netsync process", blockHash)
} else {
log.Infof("Adding orphan block %s", blockHash)
}
dag.addOrphanBlock(block)
return true, 0, nil
}
// The block has passed all context independent checks and appears sane
// enough to potentially accept it into the block DAG.
err = dag.maybeAcceptBlock(block, flags)
if err != nil {
return false, 0, err
}
// Accept any orphan blocks that depend on this block (they are
// no longer orphans) and repeat for those accepted blocks until
// there are no more.
err = dag.processOrphans(blockHash, flags)
if err != nil {
return false, 0, err
}
log.Debugf("Accepted block %s", blockHash)
return false, 0, nil
}

View File

@@ -1,131 +0,0 @@
package blockdag
import (
"bou.ke/monkey"
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"path/filepath"
"testing"
"time"
)
func TestProcessBlock(t *testing.T) {
dag, teardownFunc, err := DAGSetup("TestProcessBlock", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Errorf("Failed to setup dag instance: %v", err)
return
}
defer teardownFunc()
// Check that BFAfterDelay skip checkBlockSanity
called := false
guard := monkey.Patch((*BlockDAG).checkBlockSanity, func(_ *BlockDAG, _ *util.Block, _ BehaviorFlags) (time.Duration, error) {
called = true
return 0, nil
})
defer guard.Unpatch()
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(&Block100000), BFNoPoWCheck)
if err != nil {
t.Errorf("ProcessBlock: %s", err)
}
if delay != 0 {
t.Errorf("ProcessBlock: block is too far in the future")
}
if !isOrphan {
t.Errorf("ProcessBlock: unexpected returned non orphan block")
}
if !called {
t.Errorf("ProcessBlock: expected checkBlockSanity to be called")
}
Block100000Copy := Block100000
// Change nonce to change block hash
Block100000Copy.Header.Nonce++
called = false
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(&Block100000Copy), BFAfterDelay|BFNoPoWCheck)
if err != nil {
t.Errorf("ProcessBlock: %s", err)
}
if delay != 0 {
t.Errorf("ProcessBlock: block is too far in the future")
}
if !isOrphan {
t.Errorf("ProcessBlock: unexpected returned non orphan block")
}
if called {
t.Errorf("ProcessBlock: Didn't expected checkBlockSanity to be called")
}
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(dagconfig.SimNetParams.GenesisBlock), BFNone)
expectedErrMsg := fmt.Sprintf("already have block %s", dagconfig.SimNetParams.GenesisHash)
if err == nil || err.Error() != expectedErrMsg {
t.Errorf("ProcessBlock: Expected error \"%s\" but got \"%s\"", expectedErrMsg, err)
}
}
func TestProcessOrphans(t *testing.T) {
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Errorf("Failed to setup dag instance: %v", err)
return
}
defer teardownFunc()
dag.TestSetCoinbaseMaturity(0)
blocksFile := "blk_0_to_4.dat"
blocks, err := LoadBlocks(filepath.Join("testdata/", blocksFile))
if err != nil {
t.Fatalf("TestProcessOrphans: "+
"Error loading file '%s': %s\n", blocksFile, err)
}
// Get a reference to a parent block
parentBlock := blocks[1]
// Get a reference to a child block and mess with it so that:
// a. It gets added to the orphan pool
// b. It gets rejected once it's unorphaned
childBlock := blocks[2]
childBlock.MsgBlock().Header.UTXOCommitment = &daghash.ZeroHash
// Process the child block so that it gets added to the orphan pool
isOrphan, delay, err := dag.ProcessBlock(childBlock, BFNoPoWCheck)
if err != nil {
t.Fatalf("TestProcessOrphans: child block unexpectedly returned an error: %s", err)
}
if delay != 0 {
t.Fatalf("TestProcessOrphans: child block is too far in the future")
}
if !isOrphan {
t.Fatalf("TestProcessOrphans: incorrectly returned that child block is not an orphan")
}
// Process the parent block. Note that this will attempt to unorphan the child block
isOrphan, delay, err = dag.ProcessBlock(parentBlock, BFNone)
if err != nil {
t.Fatalf("TestProcessOrphans: parent block unexpectedly returned an error: %s", err)
}
if delay != 0 {
t.Fatalf("TestProcessOrphans: parent block is too far in the future")
}
if isOrphan {
t.Fatalf("TestProcessOrphans: incorrectly returned that parent block is an orphan")
}
// Make sure that the child block had been rejected
node := dag.index.LookupNode(childBlock.Hash())
if node == nil {
t.Fatalf("TestProcessOrphans: child block missing from block index")
}
if !dag.index.NodeStatus(node).KnownInvalid() {
t.Fatalf("TestProcessOrphans: child block erroneously not marked as invalid")
}
}

View File

@@ -1,254 +0,0 @@
package blockdag
// This file functions are not considered safe for regular use, and should be used for test purposes only.
import (
"compress/bzip2"
"encoding/binary"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"io"
"os"
"path/filepath"
"strings"
"sync"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb" // blank import ffldb so that its init() function runs before tests
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const (
// testDbType is the database backend type to use for the tests.
testDbType = "ffldb"
// testDbRoot is the root directory used to create all test databases.
testDbRoot = "testdbs"
// blockDataNet is the expected network in the test block data.
blockDataNet = wire.MainNet
)
// isSupportedDbType returns whether or not the passed database type is
// currently supported.
func isSupportedDbType(dbType string) bool {
supportedDrivers := database.SupportedDrivers()
for _, driver := range supportedDrivers {
if dbType == driver {
return true
}
}
return false
}
// FileExists returns whether or not the named file or directory exists.
func FileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// DAGSetup is used to create a new db and chain instance with the genesis
// block already inserted. In addition to the new chain instance, it returns
// a teardown function the caller should invoke when done testing to clean up.
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
if !isSupportedDbType(testDbType) {
return nil, nil, errors.Errorf("unsupported db type %s", testDbType)
}
var teardown func()
// To make sure that the teardown function is not called before any goroutines finished to run -
// overwrite `spawn` to count the number of running goroutines
spawnWaitGroup := sync.WaitGroup{}
realSpawn := spawn
spawn = func(f func()) {
spawnWaitGroup.Add(1)
realSpawn(func() {
f()
spawnWaitGroup.Done()
})
}
if config.DB == nil {
// Create the root directory for test databases.
if !FileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
err := errors.Errorf("unable to create test db "+
"root: %s", err)
return nil, nil, err
}
}
dbPath := filepath.Join(testDbRoot, dbName)
_ = os.RemoveAll(dbPath)
var err error
config.DB, err = database.Create(testDbType, dbPath, blockDataNet)
if err != nil {
return nil, nil, errors.Errorf("error creating db: %s", err)
}
// Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing.
teardown = func() {
spawnWaitGroup.Wait()
spawn = realSpawn
config.DB.Close()
os.RemoveAll(dbPath)
os.RemoveAll(testDbRoot)
}
} else {
teardown = func() {
spawnWaitGroup.Wait()
spawn = realSpawn
config.DB.Close()
}
}
config.TimeSource = NewMedianTime()
config.SigCache = txscript.NewSigCache(1000)
// Create the DAG instance.
dag, err := New(&config)
if err != nil {
teardown()
err := errors.Errorf("failed to create dag instance: %s", err)
return nil, nil, err
}
return dag, teardown, nil
}
// OpTrueScript is script returning TRUE
var OpTrueScript = []byte{txscript.OpTrue}
type txSubnetworkData struct {
subnetworkID *subnetworkid.SubnetworkID
Gas uint64
Payload []byte
}
func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, subnetworkData *txSubnetworkData) *wire.MsgTx {
txIns := []*wire.TxIn{}
txOuts := []*wire.TxOut{}
for i := uint32(0); i < numInputs; i++ {
txIns = append(txIns, &wire.TxIn{
PreviousOutpoint: *wire.NewOutpoint(&daghash.TxID{}, i),
SignatureScript: []byte{},
Sequence: wire.MaxTxInSequenceNum,
})
}
for i := uint32(0); i < numOutputs; i++ {
txOuts = append(txOuts, &wire.TxOut{
ScriptPubKey: OpTrueScript,
Value: outputValue,
})
}
if subnetworkData != nil {
return wire.NewSubnetworkMsgTx(wire.TxVersion, txIns, txOuts, subnetworkData.subnetworkID, subnetworkData.Gas, subnetworkData.Payload)
}
return wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
}
// VirtualForTest is an exported version for virtualBlock, so that it can be returned by exported test_util methods
type VirtualForTest *virtualBlock
// SetVirtualForTest replaces the dag's virtual block. This function is used for test purposes only
func SetVirtualForTest(dag *BlockDAG, virtual VirtualForTest) VirtualForTest {
oldVirtual := dag.virtual
dag.virtual = virtual
return VirtualForTest(oldVirtual)
}
// GetVirtualFromParentsForTest generates a virtual block with the given parents.
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (VirtualForTest, error) {
parents := newSet()
for _, hash := range parentHashes {
parent := dag.index.LookupNode(hash)
if parent == nil {
return nil, errors.Errorf("GetVirtualFromParentsForTest: didn't found node for hash %s", hash)
}
parents.add(parent)
}
virtual := newVirtualBlock(parents, dag.dagParams.K)
pastUTXO, _, err := dag.pastUTXO(&virtual.blockNode)
if err != nil {
return nil, err
}
diffUTXO := pastUTXO.clone().(*DiffUTXOSet)
err = diffUTXO.meldToBase()
if err != nil {
return nil, err
}
virtual.utxoSet = diffUTXO.base
return VirtualForTest(virtual), nil
}
// LoadBlocks reads files containing bitcoin block data (gzipped but otherwise
// in the format bitcoind writes) from disk and returns them as an array of
// util.Block. This is largely borrowed from the test code in btcdb.
func LoadBlocks(filename string) (blocks []*util.Block, err error) {
var network = wire.MainNet
var dr io.Reader
var fi io.ReadCloser
fi, err = os.Open(filename)
if err != nil {
return
}
if strings.HasSuffix(filename, ".bz2") {
dr = bzip2.NewReader(fi)
} else {
dr = fi
}
defer fi.Close()
var block *util.Block
err = nil
for height := uint64(0); err == nil; height++ {
var rintbuf uint32
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
if err == io.EOF {
// hit end of file at expected offset: no warning
height--
err = nil
break
}
if err != nil {
break
}
if rintbuf != uint32(network) {
break
}
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
blocklen := rintbuf
rbytes := make([]byte, blocklen)
// read block
dr.Read(rbytes)
block, err = util.NewBlockFromBytes(rbytes)
if err != nil {
return
}
blocks = append(blocks, block)
}
return
}

View File

@@ -1,56 +0,0 @@
package blockdag
import (
"github.com/pkg/errors"
"os"
"strings"
"testing"
"bou.ke/monkey"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
)
func TestIsSupportedDbType(t *testing.T) {
if !isSupportedDbType("ffldb") {
t.Errorf("ffldb should be a supported DB driver")
}
if isSupportedDbType("madeUpDb") {
t.Errorf("madeUpDb should not be a supported DB driver")
}
}
// TestDAGSetupErrors tests all error-cases in DAGSetup.
// The non-error-cases are tested in the more general tests.
func TestDAGSetupErrors(t *testing.T) {
os.RemoveAll(testDbRoot)
testDAGSetupErrorThroughPatching(t, "unable to create test db root: ", os.MkdirAll, func(path string, perm os.FileMode) error {
return errors.New("Made up error")
})
testDAGSetupErrorThroughPatching(t, "failed to create dag instance: ", New, func(config *Config) (*BlockDAG, error) {
return nil, errors.New("Made up error")
})
testDAGSetupErrorThroughPatching(t, "unsupported db type ", isSupportedDbType, func(dbType string) bool {
return false
})
testDAGSetupErrorThroughPatching(t, "error creating db: ", database.Create, func(dbType string, args ...interface{}) (database.DB, error) {
return nil, errors.New("Made up error")
})
}
func testDAGSetupErrorThroughPatching(t *testing.T, expectedErrorMessage string, targetFunction interface{}, replacementFunction interface{}) {
guard := monkey.Patch(targetFunction, replacementFunction)
defer guard.Unpatch()
_, tearDown, err := DAGSetup("TestDAGSetup", Config{
DAGParams: &dagconfig.MainNetParams,
})
if tearDown != nil {
defer tearDown()
}
if err == nil || !strings.HasPrefix(err.Error(), expectedErrorMessage) {
t.Errorf("DAGSetup: expected error to have prefix '%s' but got error '%v'", expectedErrorMessage, err)
}
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,356 +0,0 @@
// Copyright (c) 2016-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/util/daghash"
)
// ThresholdState define the various threshold states used when voting on
// consensus changes.
type ThresholdState byte
// These constants are used to identify specific threshold states.
const (
// ThresholdDefined is the first state for each deployment and is the
// state for the genesis block has by definition for all deployments.
ThresholdDefined ThresholdState = iota
// ThresholdStarted is the state for a deployment once its start time
// has been reached.
ThresholdStarted
// ThresholdLockedIn is the state for a deployment during the retarget
// period which is after the ThresholdStarted state period and the
// number of blocks that have voted for the deployment equal or exceed
// the required number of votes for the deployment.
ThresholdLockedIn
// ThresholdActive is the state for a deployment for all blocks after a
// retarget period in which the deployment was in the ThresholdLockedIn
// state.
ThresholdActive
// ThresholdFailed is the state for a deployment once its expiration
// time has been reached and it did not reach the ThresholdLockedIn
// state.
ThresholdFailed
// numThresholdsStates is the maximum number of threshold states used in
// tests.
numThresholdsStates
)
// thresholdStateStrings is a map of ThresholdState values back to their
// constant names for pretty printing.
var thresholdStateStrings = map[ThresholdState]string{
ThresholdDefined: "ThresholdDefined",
ThresholdStarted: "ThresholdStarted",
ThresholdLockedIn: "ThresholdLockedIn",
ThresholdActive: "ThresholdActive",
ThresholdFailed: "ThresholdFailed",
}
// String returns the ThresholdState as a human-readable name.
func (t ThresholdState) String() string {
if s := thresholdStateStrings[t]; s != "" {
return s
}
return fmt.Sprintf("Unknown ThresholdState (%d)", int(t))
}
// thresholdConditionChecker provides a generic interface that is invoked to
// determine when a consensus rule change threshold should be changed.
type thresholdConditionChecker interface {
// BeginTime returns the unix timestamp for the median block time after
// which voting on a rule change starts (at the next window).
BeginTime() uint64
// EndTime returns the unix timestamp for the median block time after
// which an attempted rule change fails if it has not already been
// locked in or activated.
EndTime() uint64
// RuleChangeActivationThreshold is the number of blocks for which the
// condition must be true in order to lock in a rule change.
RuleChangeActivationThreshold() uint64
// MinerConfirmationWindow is the number of blocks in each threshold
// state retarget window.
MinerConfirmationWindow() uint64
// Condition returns whether or not the rule change activation condition
// has been met. This typically involves checking whether or not the
// bit associated with the condition is set, but can be more complex as
// needed.
Condition(*blockNode) (bool, error)
}
// thresholdStateCache provides a type to cache the threshold states of each
// threshold window for a set of IDs.
type thresholdStateCache struct {
entries map[daghash.Hash]ThresholdState
}
// Lookup returns the threshold state associated with the given hash along with
// a boolean that indicates whether or not it is valid.
func (c *thresholdStateCache) Lookup(hash *daghash.Hash) (ThresholdState, bool) {
state, ok := c.entries[*hash]
return state, ok
}
// Update updates the cache to contain the provided hash to threshold state
// mapping.
func (c *thresholdStateCache) Update(hash *daghash.Hash, state ThresholdState) {
c.entries[*hash] = state
}
// newThresholdCaches returns a new array of caches to be used when calculating
// threshold states.
func newThresholdCaches(numCaches uint32) []thresholdStateCache {
caches := make([]thresholdStateCache, numCaches)
for i := 0; i < len(caches); i++ {
caches[i] = thresholdStateCache{
entries: make(map[daghash.Hash]ThresholdState),
}
}
return caches
}
// thresholdState returns the current rule change threshold state for the block
// AFTER the given node and deployment ID. The cache is used to ensure the
// threshold states for previous windows are only calculated once.
//
// This function MUST be called with the chain state lock held (for writes).
func (dag *BlockDAG) thresholdState(prevNode *blockNode, checker thresholdConditionChecker, cache *thresholdStateCache) (ThresholdState, error) {
// The threshold state for the window that contains the genesis block is
// defined by definition.
confirmationWindow := checker.MinerConfirmationWindow()
if prevNode == nil || (prevNode.chainHeight+1) < confirmationWindow {
return ThresholdDefined, nil
}
// Get the ancestor that is the last block of the previous confirmation
// window in order to get its threshold state. This can be done because
// the state is the same for all blocks within a given window.
prevNode = prevNode.SelectedAncestor(prevNode.chainHeight -
(prevNode.chainHeight+1)%confirmationWindow)
// Iterate backwards through each of the previous confirmation windows
// to find the most recently cached threshold state.
var neededStates []*blockNode
for prevNode != nil {
// Nothing more to do if the state of the block is already
// cached.
if _, ok := cache.Lookup(prevNode.hash); ok {
break
}
// The start and expiration times are based on the median block
// time, so calculate it now.
medianTime := prevNode.PastMedianTime(dag)
// The state is simply defined if the start time hasn't been
// been reached yet.
if uint64(medianTime.Unix()) < checker.BeginTime() {
cache.Update(prevNode.hash, ThresholdDefined)
break
}
// Add this node to the list of nodes that need the state
// calculated and cached.
neededStates = append(neededStates, prevNode)
// Get the ancestor that is the last block of the previous
// confirmation window.
prevNode = prevNode.RelativeAncestor(confirmationWindow)
}
// Start with the threshold state for the most recent confirmation
// window that has a cached state.
state := ThresholdDefined
if prevNode != nil {
var ok bool
state, ok = cache.Lookup(prevNode.hash)
if !ok {
return ThresholdFailed, AssertError(fmt.Sprintf(
"thresholdState: cache lookup failed for %s",
prevNode.hash))
}
}
// Since each threshold state depends on the state of the previous
// window, iterate starting from the oldest unknown window.
for neededNum := len(neededStates) - 1; neededNum >= 0; neededNum-- {
prevNode := neededStates[neededNum]
switch state {
case ThresholdDefined:
// The deployment of the rule change fails if it expires
// before it is accepted and locked in.
medianTime := prevNode.PastMedianTime(dag)
medianTimeUnix := uint64(medianTime.Unix())
if medianTimeUnix >= checker.EndTime() {
state = ThresholdFailed
break
}
// The state for the rule moves to the started state
// once its start time has been reached (and it hasn't
// already expired per the above).
if medianTimeUnix >= checker.BeginTime() {
state = ThresholdStarted
}
case ThresholdStarted:
// The deployment of the rule change fails if it expires
// before it is accepted and locked in.
medianTime := prevNode.PastMedianTime(dag)
if uint64(medianTime.Unix()) >= checker.EndTime() {
state = ThresholdFailed
break
}
// At this point, the rule change is still being voted
// on by the miners, so iterate backwards through the
// confirmation window to count all of the votes in it.
var count uint64
countNode := prevNode
for i := uint64(0); i < confirmationWindow; i++ {
condition, err := checker.Condition(countNode)
if err != nil {
return ThresholdFailed, err
}
if condition {
count++
}
// Get the previous block node.
countNode = countNode.selectedParent
}
// The state is locked in if the number of blocks in the
// period that voted for the rule change meets the
// activation threshold.
if count >= checker.RuleChangeActivationThreshold() {
state = ThresholdLockedIn
}
case ThresholdLockedIn:
// The new rule becomes active when its previous state
// was locked in.
state = ThresholdActive
// Nothing to do if the previous state is active or failed since
// they are both terminal states.
case ThresholdActive:
case ThresholdFailed:
}
// Update the cache to avoid recalculating the state in the
// future.
cache.Update(prevNode.hash, state)
}
return state, nil
}
// ThresholdState returns the current rule change threshold state of the given
// deployment ID for the block AFTER the end of the current best chain.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) ThresholdState(deploymentID uint32) (ThresholdState, error) {
dag.dagLock.Lock()
state, err := dag.deploymentState(dag.selectedTip(), deploymentID)
dag.dagLock.Unlock()
return state, err
}
// IsDeploymentActive returns true if the target deploymentID is active, and
// false otherwise.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) IsDeploymentActive(deploymentID uint32) (bool, error) {
dag.dagLock.Lock()
state, err := dag.deploymentState(dag.selectedTip(), deploymentID)
dag.dagLock.Unlock()
if err != nil {
return false, err
}
return state == ThresholdActive, nil
}
// deploymentState returns the current rule change threshold for a given
// deploymentID. The threshold is evaluated from the point of view of the block
// node passed in as the first argument to this method.
//
// It is important to note that, as the variable name indicates, this function
// expects the block node prior to the block for which the deployment state is
// desired. In other words, the returned deployment state is for the block
// AFTER the passed node.
//
// This function MUST be called with the chain state lock held (for writes).
func (dag *BlockDAG) deploymentState(prevNode *blockNode, deploymentID uint32) (ThresholdState, error) {
if deploymentID > uint32(len(dag.dagParams.Deployments)) {
return ThresholdFailed, DeploymentError(deploymentID)
}
deployment := &dag.dagParams.Deployments[deploymentID]
checker := deploymentChecker{deployment: deployment, chain: dag}
cache := &dag.deploymentCaches[deploymentID]
return dag.thresholdState(prevNode, checker, cache)
}
// initThresholdCaches initializes the threshold state caches for each warning
// bit and defined deployment and provides warnings if the chain is current per
// the warnUnknownVersions and warnUnknownRuleActivations functions.
func (dag *BlockDAG) initThresholdCaches() error {
// Initialize the warning and deployment caches by calculating the
// threshold state for each of them. This will ensure the caches are
// populated and any states that needed to be recalculated due to
// definition changes is done now.
prevNode := dag.selectedTip().selectedParent
for bit := uint32(0); bit < vbNumBits; bit++ {
checker := bitConditionChecker{bit: bit, chain: dag}
cache := &dag.warningCaches[bit]
_, err := dag.thresholdState(prevNode, checker, cache)
if err != nil {
return err
}
}
for id := 0; id < len(dag.dagParams.Deployments); id++ {
deployment := &dag.dagParams.Deployments[id]
cache := &dag.deploymentCaches[id]
checker := deploymentChecker{deployment: deployment, chain: dag}
_, err := dag.thresholdState(prevNode, checker, cache)
if err != nil {
return err
}
}
// No warnings about unknown rules or versions until the chain is
// current.
if dag.isCurrent() {
// Warn if a high enough percentage of the last blocks have
// unexpected versions.
bestNode := dag.selectedTip()
if err := dag.warnUnknownVersions(bestNode); err != nil {
return err
}
// Warn if any unknown new rules are either about to activate or
// have already been activated.
if err := dag.warnUnknownRuleActivations(bestNode); err != nil {
return err
}
}
return nil
}

View File

@@ -1,134 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"testing"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestThresholdStateStringer tests the stringized output for the
// ThresholdState type.
func TestThresholdStateStringer(t *testing.T) {
t.Parallel()
tests := []struct {
in ThresholdState
want string
}{
{ThresholdDefined, "ThresholdDefined"},
{ThresholdStarted, "ThresholdStarted"},
{ThresholdLockedIn, "ThresholdLockedIn"},
{ThresholdActive, "ThresholdActive"},
{ThresholdFailed, "ThresholdFailed"},
{0xff, "Unknown ThresholdState (255)"},
}
// Detect additional threshold states that don't have the stringer added.
if len(tests)-1 != int(numThresholdsStates) {
t.Errorf("It appears a threshold statewas added without " +
"adding an associated stringer test")
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.String()
if result != test.want {
t.Errorf("String #%d\n got: %s want: %s", i, result,
test.want)
continue
}
}
}
// TestThresholdStateCache ensure the threshold state cache works as intended
// including adding entries, updating existing entries, and flushing.
func TestThresholdStateCache(t *testing.T) {
t.Parallel()
tests := []struct {
name string
numEntries int
state ThresholdState
}{
{name: "2 entries defined", numEntries: 2, state: ThresholdDefined},
{name: "7 entries started", numEntries: 7, state: ThresholdStarted},
{name: "10 entries active", numEntries: 10, state: ThresholdActive},
{name: "5 entries locked in", numEntries: 5, state: ThresholdLockedIn},
{name: "3 entries failed", numEntries: 3, state: ThresholdFailed},
}
nextTest:
for _, test := range tests {
cache := &newThresholdCaches(1)[0]
for i := 0; i < test.numEntries; i++ {
var hash daghash.Hash
hash[0] = uint8(i + 1)
// Ensure the hash isn't available in the cache already.
_, ok := cache.Lookup(&hash)
if ok {
t.Errorf("Lookup (%s): has entry for hash %v",
test.name, hash)
continue nextTest
}
// Ensure hash that was added to the cache reports it's
// available and the state is the expected value.
cache.Update(&hash, test.state)
state, ok := cache.Lookup(&hash)
if !ok {
t.Errorf("Lookup (%s): missing entry for hash "+
"%v", test.name, hash)
continue nextTest
}
if state != test.state {
t.Errorf("Lookup (%s): state mismatch - got "+
"%v, want %v", test.name, state,
test.state)
continue nextTest
}
// Ensure adding an existing hash with the same state
// doesn't break the existing entry.
cache.Update(&hash, test.state)
state, ok = cache.Lookup(&hash)
if !ok {
t.Errorf("Lookup (%s): missing entry after "+
"second add for hash %v", test.name,
hash)
continue nextTest
}
if state != test.state {
t.Errorf("Lookup (%s): state mismatch after "+
"second add - got %v, want %v",
test.name, state, test.state)
continue nextTest
}
// Ensure adding an existing hash with a different state
// updates the existing entry.
newState := ThresholdFailed
if newState == test.state {
newState = ThresholdStarted
}
cache.Update(&hash, newState)
state, ok = cache.Lookup(&hash)
if !ok {
t.Errorf("Lookup (%s): missing entry after "+
"state change for hash %v", test.name,
hash)
continue nextTest
}
if state != newState {
t.Errorf("Lookup (%s): state mismatch after "+
"state change - got %v, want %v",
test.name, state, newState)
continue nextTest
}
}
}
}

View File

@@ -1,32 +0,0 @@
package blockdag
import (
"bytes"
"github.com/golang/groupcache/lru"
"github.com/kaspanet/kaspad/btcec"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const ecmhCacheSize = 4_000_000
var (
utxoToECMHCache = lru.New(ecmhCacheSize)
)
func utxoMultiset(entry *UTXOEntry, outpoint *wire.Outpoint) (*btcec.Multiset, error) {
w := &bytes.Buffer{}
err := serializeUTXO(w, entry, outpoint)
if err != nil {
return nil, err
}
serializedUTXO := w.Bytes()
utxoHash := daghash.DoubleHashH(serializedUTXO)
if cachedMSPoint, ok := utxoToECMHCache.Get(utxoHash); ok {
return cachedMSPoint.(*btcec.Multiset), nil
}
msPoint := btcec.NewMultiset(btcec.S256()).Add(serializedUTXO)
utxoToECMHCache.Add(utxoHash, msPoint)
return msPoint, nil
}

View File

@@ -1,191 +0,0 @@
package blockdag
import (
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/locks"
"github.com/pkg/errors"
)
var multisetPointSize = 32
type blockUTXODiffData struct {
diff *UTXODiff
diffChild *blockNode
}
type utxoDiffStore struct {
dag *BlockDAG
dirty map[daghash.Hash]struct{}
loaded map[daghash.Hash]*blockUTXODiffData
mtx *locks.PriorityMutex
}
func newUTXODiffStore(dag *BlockDAG) *utxoDiffStore {
return &utxoDiffStore{
dag: dag,
dirty: make(map[daghash.Hash]struct{}),
loaded: make(map[daghash.Hash]*blockUTXODiffData),
mtx: locks.NewPriorityMutex(),
}
}
func (diffStore *utxoDiffStore) setBlockDiff(node *blockNode, diff *UTXODiff) error {
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
// load the diff data from DB to diffStore.loaded
_, exists, err := diffStore.diffDataByHash(node.hash)
if err != nil {
return err
}
if !exists {
diffStore.loaded[*node.hash] = &blockUTXODiffData{}
}
diffStore.loaded[*node.hash].diff = diff
diffStore.setBlockAsDirty(node.hash)
return nil
}
func (diffStore *utxoDiffStore) setBlockDiffChild(node *blockNode, diffChild *blockNode) error {
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
// load the diff data from DB to diffStore.loaded
_, exists, err := diffStore.diffDataByHash(node.hash)
if err != nil {
return err
}
if !exists {
return diffNotFoundError(node)
}
diffStore.loaded[*node.hash].diffChild = diffChild
diffStore.setBlockAsDirty(node.hash)
return nil
}
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbTx database.Tx, blockHashes []*daghash.Hash) error {
for _, hash := range blockHashes {
err := diffStore.removeBlockDiffData(dbTx, hash)
if err != nil {
return err
}
}
return nil
}
func (diffStore *utxoDiffStore) removeBlockDiffData(dbTx database.Tx, blockHash *daghash.Hash) error {
diffStore.mtx.LowPriorityWriteLock()
defer diffStore.mtx.LowPriorityWriteUnlock()
delete(diffStore.loaded, *blockHash)
err := dbRemoveDiffData(dbTx, blockHash)
if err != nil {
return err
}
return nil
}
func (diffStore *utxoDiffStore) setBlockAsDirty(blockHash *daghash.Hash) {
diffStore.dirty[*blockHash] = struct{}{}
}
func (diffStore *utxoDiffStore) diffDataByHash(hash *daghash.Hash) (*blockUTXODiffData, bool, error) {
if diffData, ok := diffStore.loaded[*hash]; ok {
return diffData, true, nil
}
diffData, err := diffStore.diffDataFromDB(hash)
if err != nil {
return nil, false, err
}
exists := diffData != nil
if exists {
diffStore.loaded[*hash] = diffData
}
return diffData, exists, nil
}
func diffNotFoundError(node *blockNode) error {
return errors.Errorf("Couldn't find diff data for block %s", node.hash)
}
func (diffStore *utxoDiffStore) diffByNode(node *blockNode) (*UTXODiff, error) {
diffStore.mtx.HighPriorityReadLock()
defer diffStore.mtx.HighPriorityReadUnlock()
diffData, exists, err := diffStore.diffDataByHash(node.hash)
if err != nil {
return nil, err
}
if !exists {
return nil, diffNotFoundError(node)
}
return diffData.diff, nil
}
func (diffStore *utxoDiffStore) diffChildByNode(node *blockNode) (*blockNode, error) {
diffStore.mtx.HighPriorityReadLock()
defer diffStore.mtx.HighPriorityReadUnlock()
diffData, exists, err := diffStore.diffDataByHash(node.hash)
if err != nil {
return nil, err
}
if !exists {
return nil, diffNotFoundError(node)
}
return diffData.diffChild, nil
}
func (diffStore *utxoDiffStore) diffDataFromDB(hash *daghash.Hash) (*blockUTXODiffData, error) {
var diffData *blockUTXODiffData
err := diffStore.dag.db.View(func(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(utxoDiffsBucketName)
serializedBlockDiffData := bucket.Get(hash[:])
if serializedBlockDiffData != nil {
var err error
diffData, err = diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
return err
}
return nil
})
if err != nil {
return nil, err
}
return diffData, nil
}
// flushToDB writes all dirty diff data to the database. If all writes
// succeed, this clears the dirty set.
func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
if len(diffStore.dirty) == 0 {
return nil
}
for hash := range diffStore.dirty {
diffData := diffStore.loaded[hash]
err := dbStoreDiffData(dbTx, &hash, diffData)
if err != nil {
return err
}
}
return nil
}
func (diffStore *utxoDiffStore) clearDirtyEntries() {
diffStore.dirty = make(map[daghash.Hash]struct{})
}
// dbStoreDiffData stores the UTXO diff data to the database.
// This overwrites the current entry if there exists one.
func dbStoreDiffData(dbTx database.Tx, hash *daghash.Hash, diffData *blockUTXODiffData) error {
serializedDiffData, err := serializeBlockUTXODiffData(diffData)
if err != nil {
return err
}
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Put(hash[:], serializedDiffData)
}
func dbRemoveDiffData(dbTx database.Tx, hash *daghash.Hash) error {
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Delete(hash[:])
}

View File

@@ -1,86 +0,0 @@
package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"reflect"
"testing"
)
func TestUTXODiffStore(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Fatalf("TestUTXODiffStore: Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
nodeCounter := byte(0)
createNode := func() *blockNode {
nodeCounter++
node := &blockNode{hash: &daghash.Hash{nodeCounter}}
dag.index.AddNode(node)
return node
}
// Check that an error is returned when asking for non existing node
nonExistingNode := createNode()
_, err = dag.utxoDiffStore.diffByNode(nonExistingNode)
expectedErrString := fmt.Sprintf("Couldn't find diff data for block %s", nonExistingNode.hash)
if err == nil || err.Error() != expectedErrString {
t.Errorf("diffByNode: expected error %s but got %s", expectedErrString, err)
}
// Add node's diff data to the utxoDiffStore and check if it's checked correctly.
node := createNode()
diff := NewUTXODiff()
diff.toAdd.add(wire.Outpoint{TxID: daghash.TxID{0x01}, Index: 0}, &UTXOEntry{amount: 1, scriptPubKey: []byte{0x01}})
diff.toRemove.add(wire.Outpoint{TxID: daghash.TxID{0x02}, Index: 0}, &UTXOEntry{amount: 2, scriptPubKey: []byte{0x02}})
if err := dag.utxoDiffStore.setBlockDiff(node, diff); err != nil {
t.Fatalf("setBlockDiff: unexpected error: %s", err)
}
diffChild := createNode()
if err := dag.utxoDiffStore.setBlockDiffChild(node, diffChild); err != nil {
t.Fatalf("setBlockDiffChild: unexpected error: %s", err)
}
if storeDiff, err := dag.utxoDiffStore.diffByNode(node); err != nil {
t.Fatalf("diffByNode: unexpected error: %s", err)
} else if !reflect.DeepEqual(storeDiff, diff) {
t.Errorf("Expected diff and storeDiff to be equal")
}
if storeDiffChild, err := dag.utxoDiffStore.diffChildByNode(node); err != nil {
t.Fatalf("diffByNode: unexpected error: %s", err)
} else if !reflect.DeepEqual(storeDiffChild, diffChild) {
t.Errorf("Expected diff and storeDiff to be equal")
}
// Flush changes to db, delete them from the dag.utxoDiffStore.loaded
// map, and check if the diff data is re-fetched from the database.
err = dag.db.Update(func(dbTx database.Tx) error {
return dag.utxoDiffStore.flushToDB(dbTx)
})
if err != nil {
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
}
delete(dag.utxoDiffStore.loaded, *node.hash)
if storeDiff, err := dag.utxoDiffStore.diffByNode(node); err != nil {
t.Fatalf("diffByNode: unexpected error: %s", err)
} else if !reflect.DeepEqual(storeDiff, diff) {
t.Errorf("Expected diff and storeDiff to be equal")
}
// Check if getBlockDiff caches the result in dag.utxoDiffStore.loaded
if loadedDiffData, ok := dag.utxoDiffStore.loaded[*node.hash]; !ok {
t.Errorf("the diff data wasn't added to loaded map after requesting it")
} else if !reflect.DeepEqual(loadedDiffData.diff, diff) {
t.Errorf("Expected diff and loadedDiff to be equal")
}
}

View File

@@ -1,314 +0,0 @@
package blockdag
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
"io"
"math/big"
"github.com/kaspanet/kaspad/btcec"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
// serializeBlockUTXODiffData serializes diff data in the following format:
// Name | Data type | Description
// ------------ | --------- | -----------
// hasDiffChild | Boolean | Indicates if a diff child exist
// diffChild | Hash | The diffChild's hash. Empty if hasDiffChild is true.
// diff | UTXODiff | The diff data's diff
func serializeBlockUTXODiffData(diffData *blockUTXODiffData) ([]byte, error) {
w := &bytes.Buffer{}
hasDiffChild := diffData.diffChild != nil
err := wire.WriteElement(w, hasDiffChild)
if err != nil {
return nil, err
}
if hasDiffChild {
err := wire.WriteElement(w, diffData.diffChild.hash)
if err != nil {
return nil, err
}
}
err = serializeUTXODiff(w, diffData.diff)
if err != nil {
return nil, err
}
return w.Bytes(), nil
}
// utxoEntryHeaderCode returns the calculated header code to be used when
// serializing the provided utxo entry.
func utxoEntryHeaderCode(entry *UTXOEntry) uint64 {
// As described in the serialization format comments, the header code
// encodes the blue score shifted over one bit and the block reward flag
// in the lowest bit.
headerCode := uint64(entry.BlockBlueScore()) << 1
if entry.IsCoinbase() {
headerCode |= 0x01
}
return headerCode
}
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffDataBytes []byte) (*blockUTXODiffData, error) {
diffData := &blockUTXODiffData{}
serializedDiffData := bytes.NewBuffer(serializedDiffDataBytes)
var hasDiffChild bool
err := wire.ReadElement(serializedDiffData, &hasDiffChild)
if err != nil {
return nil, err
}
if hasDiffChild {
hash := &daghash.Hash{}
err := wire.ReadElement(serializedDiffData, hash)
if err != nil {
return nil, err
}
diffData.diffChild = diffStore.dag.index.LookupNode(hash)
}
diffData.diff = &UTXODiff{
useMultiset: true,
}
diffData.diff.toAdd, err = deserializeDiffEntries(serializedDiffData)
if err != nil {
return nil, err
}
diffData.diff.toRemove, err = deserializeDiffEntries(serializedDiffData)
if err != nil {
return nil, err
}
diffData.diff.diffMultiset, err = deserializeMultiset(serializedDiffData)
if err != nil {
return nil, err
}
return diffData, nil
}
func deserializeDiffEntries(r io.Reader) (utxoCollection, error) {
count, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
collection := utxoCollection{}
for i := uint64(0); i < count; i++ {
outpointSize, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
serializedOutpoint := make([]byte, outpointSize)
err = binary.Read(r, byteOrder, serializedOutpoint)
if err != nil {
return nil, err
}
outpoint, err := deserializeOutpoint(serializedOutpoint)
if err != nil {
return nil, err
}
utxoEntrySize, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
serializedEntry := make([]byte, utxoEntrySize)
err = binary.Read(r, byteOrder, serializedEntry)
if err != nil {
return nil, err
}
utxoEntry, err := deserializeUTXOEntry(serializedEntry)
if err != nil {
return nil, err
}
collection.add(*outpoint, utxoEntry)
}
return collection, nil
}
// deserializeMultiset deserializes an EMCH multiset.
// See serializeMultiset for more details.
func deserializeMultiset(r io.Reader) (*btcec.Multiset, error) {
xBytes := make([]byte, multisetPointSize)
yBytes := make([]byte, multisetPointSize)
err := binary.Read(r, byteOrder, xBytes)
if err != nil {
return nil, err
}
err = binary.Read(r, byteOrder, yBytes)
if err != nil {
return nil, err
}
var x, y big.Int
x.SetBytes(xBytes)
y.SetBytes(yBytes)
return btcec.NewMultisetFromPoint(btcec.S256(), &x, &y), nil
}
// serializeUTXODiff serializes UTXODiff by serializing
// UTXODiff.toAdd, UTXODiff.toRemove and UTXODiff.Multiset one after the other.
func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
if !diff.useMultiset {
return errors.New("Cannot serialize a UTXO diff without a multiset")
}
err := serializeUTXOCollection(w, diff.toAdd)
if err != nil {
return err
}
err = serializeUTXOCollection(w, diff.toRemove)
if err != nil {
return err
}
err = serializeMultiset(w, diff.diffMultiset)
if err != nil {
return err
}
return nil
}
// serializeUTXOCollection serializes utxoCollection by iterating over
// the utxo entries and serializing them and their corresponding outpoint
// prefixed by a varint that indicates their size.
func serializeUTXOCollection(w io.Writer, collection utxoCollection) error {
err := wire.WriteVarInt(w, uint64(len(collection)))
if err != nil {
return err
}
for outpoint, utxoEntry := range collection {
err := serializeUTXO(w, utxoEntry, &outpoint)
if err != nil {
return err
}
}
return nil
}
// serializeMultiset serializes an ECMH multiset. The serialization
// is done by taking the (x,y) coordinnates of the multiset point and
// padding each one of them with 32 byte (it'll be 32 byte in most
// cases anyway except one of the coordinates is zero) and writing
// them one after the other.
func serializeMultiset(w io.Writer, ms *btcec.Multiset) error {
x, y := ms.Point()
xBytes := make([]byte, multisetPointSize)
copy(xBytes, x.Bytes())
yBytes := make([]byte, multisetPointSize)
copy(yBytes, y.Bytes())
err := binary.Write(w, byteOrder, xBytes)
if err != nil {
return err
}
err = binary.Write(w, byteOrder, yBytes)
if err != nil {
return err
}
return nil
}
// serializeUTXO serializes a utxo entry-outpoint pair
func serializeUTXO(w io.Writer, entry *UTXOEntry, outpoint *wire.Outpoint) error {
serializedOutpoint := *outpointKey(*outpoint)
err := wire.WriteVarInt(w, uint64(len(serializedOutpoint)))
if err != nil {
return err
}
err = binary.Write(w, byteOrder, serializedOutpoint)
if err != nil {
return err
}
serializedUTXOEntry := serializeUTXOEntry(entry)
err = wire.WriteVarInt(w, uint64(len(serializedUTXOEntry)))
if err != nil {
return err
}
err = binary.Write(w, byteOrder, serializedUTXOEntry)
if err != nil {
return err
}
return nil
}
// serializeUTXOEntry returns the entry serialized to a format that is suitable
// for long-term storage. The format is described in detail above.
func serializeUTXOEntry(entry *UTXOEntry) []byte {
// Encode the header code.
headerCode := utxoEntryHeaderCode(entry)
// Calculate the size needed to serialize the entry.
size := serializeSizeVLQ(headerCode) +
compressedTxOutSize(uint64(entry.Amount()), entry.ScriptPubKey())
// Serialize the header code followed by the compressed unspent
// transaction output.
serialized := make([]byte, size)
offset := putVLQ(serialized, headerCode)
offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()),
entry.ScriptPubKey())
return serialized
}
// deserializeOutpoint decodes an outpoint from the passed serialized byte
// slice into a new wire.Outpoint using a format that is suitable for long-
// term storage. this format is described in detail above.
func deserializeOutpoint(serialized []byte) (*wire.Outpoint, error) {
if len(serialized) <= daghash.HashSize {
return nil, errDeserialize("unexpected end of data")
}
txID := daghash.TxID{}
txID.SetBytes(serialized[:daghash.HashSize])
index, _ := deserializeVLQ(serialized[daghash.HashSize:])
return wire.NewOutpoint(&txID, uint32(index)), nil
}
// deserializeUTXOEntry decodes a UTXO entry from the passed serialized byte
// slice into a new UTXOEntry using a format that is suitable for long-term
// storage. The format is described in detail above.
func deserializeUTXOEntry(serialized []byte) (*UTXOEntry, error) {
// Deserialize the header code.
code, offset := deserializeVLQ(serialized)
if offset >= len(serialized) {
return nil, errDeserialize("unexpected end of data after header")
}
// Decode the header code.
//
// Bit 0 indicates whether the containing transaction is a coinbase.
// Bits 1-x encode blue score of the containing transaction.
isCoinbase := code&0x01 != 0
blockBlueScore := code >> 1
// Decode the compressed unspent transaction output.
amount, scriptPubKey, _, err := decodeCompressedTxOut(serialized[offset:])
if err != nil {
return nil, errDeserialize(fmt.Sprintf("unable to decode "+
"UTXO: %s", err))
}
entry := &UTXOEntry{
amount: amount,
scriptPubKey: scriptPubKey,
blockBlueScore: blockBlueScore,
packedFlags: 0,
}
if isCoinbase {
entry.packedFlags |= tfCoinbase
}
return entry, nil
}

View File

@@ -1,294 +0,0 @@
// Copyright (c) 2016-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"math"
"github.com/kaspanet/kaspad/dagconfig"
)
const (
// vbTopBits defines the bits to set in the version to signal that the
// version bits scheme is being used.
vbTopBits = 0x10000000
// vbTopMask is the bitmask to use to determine whether or not the
// version bits scheme is in use.
vbTopMask = 0xe0000000
// vbNumBits is the total number of bits available for use with the
// version bits scheme.
vbNumBits = 29
// unknownVerNumToCheck is the number of previous blocks to consider
// when checking for a threshold of unknown block versions for the
// purposes of warning the user.
unknownVerNumToCheck = 100
// unknownVerWarnNum is the threshold of previous blocks that have an
// unknown version to use for the purposes of warning the user.
unknownVerWarnNum = unknownVerNumToCheck / 2
)
// bitConditionChecker provides a thresholdConditionChecker which can be used to
// test whether or not a specific bit is set when it's not supposed to be
// according to the expected version based on the known deployments and the
// current state of the chain. This is useful for detecting and warning about
// unknown rule activations.
type bitConditionChecker struct {
bit uint32
chain *BlockDAG
}
// Ensure the bitConditionChecker type implements the thresholdConditionChecker
// interface.
var _ thresholdConditionChecker = bitConditionChecker{}
// BeginTime returns the unix timestamp for the median block time after which
// voting on a rule change starts (at the next window).
//
// Since this implementation checks for unknown rules, it returns 0 so the rule
// is always treated as active.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) BeginTime() uint64 {
return 0
}
// EndTime returns the unix timestamp for the median block time after which an
// attempted rule change fails if it has not already been locked in or
// activated.
//
// Since this implementation checks for unknown rules, it returns the maximum
// possible timestamp so the rule is always treated as active.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) EndTime() uint64 {
return math.MaxUint64
}
// RuleChangeActivationThreshold is the number of blocks for which the condition
// must be true in order to lock in a rule change.
//
// This implementation returns the value defined by the chain params the checker
// is associated with.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) RuleChangeActivationThreshold() uint64 {
return c.chain.dagParams.RuleChangeActivationThreshold
}
// MinerConfirmationWindow is the number of blocks in each threshold state
// retarget window.
//
// This implementation returns the value defined by the chain params the checker
// is associated with.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) MinerConfirmationWindow() uint64 {
return c.chain.dagParams.MinerConfirmationWindow
}
// Condition returns true when the specific bit associated with the checker is
// set and it's not supposed to be according to the expected version based on
// the known deployments and the current state of the chain.
//
// This function MUST be called with the chain state lock held (for writes).
//
// This is part of the thresholdConditionChecker interface implementation.
func (c bitConditionChecker) Condition(node *blockNode) (bool, error) {
conditionMask := uint32(1) << c.bit
version := uint32(node.version)
if version&vbTopMask != vbTopBits {
return false, nil
}
if version&conditionMask == 0 {
return false, nil
}
expectedVersion, err := c.chain.calcNextBlockVersion(node.selectedParent)
if err != nil {
return false, err
}
return uint32(expectedVersion)&conditionMask == 0, nil
}
// deploymentChecker provides a thresholdConditionChecker which can be used to
// test a specific deployment rule. This is required for properly detecting
// and activating consensus rule changes.
type deploymentChecker struct {
deployment *dagconfig.ConsensusDeployment
chain *BlockDAG
}
// Ensure the deploymentChecker type implements the thresholdConditionChecker
// interface.
var _ thresholdConditionChecker = deploymentChecker{}
// BeginTime returns the unix timestamp for the median block time after which
// voting on a rule change starts (at the next window).
//
// This implementation returns the value defined by the specific deployment the
// checker is associated with.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) BeginTime() uint64 {
return c.deployment.StartTime
}
// EndTime returns the unix timestamp for the median block time after which an
// attempted rule change fails if it has not already been locked in or
// activated.
//
// This implementation returns the value defined by the specific deployment the
// checker is associated with.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) EndTime() uint64 {
return c.deployment.ExpireTime
}
// RuleChangeActivationThreshold is the number of blocks for which the condition
// must be true in order to lock in a rule change.
//
// This implementation returns the value defined by the chain params the checker
// is associated with.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) RuleChangeActivationThreshold() uint64 {
return c.chain.dagParams.RuleChangeActivationThreshold
}
// MinerConfirmationWindow is the number of blocks in each threshold state
// retarget window.
//
// This implementation returns the value defined by the chain params the checker
// is associated with.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) MinerConfirmationWindow() uint64 {
return c.chain.dagParams.MinerConfirmationWindow
}
// Condition returns true when the specific bit defined by the deployment
// associated with the checker is set.
//
// This is part of the thresholdConditionChecker interface implementation.
func (c deploymentChecker) Condition(node *blockNode) (bool, error) {
conditionMask := uint32(1) << c.deployment.BitNumber
version := uint32(node.version)
return (version&vbTopMask == vbTopBits) && (version&conditionMask != 0),
nil
}
// calcNextBlockVersion calculates the expected version of the block after the
// passed previous block node based on the state of started and locked in
// rule change deployments.
//
// This function differs from the exported CalcNextBlockVersion in that the
// exported version uses the current best chain as the previous block node
// while this function accepts any block node.
//
// This function MUST be called with the chain state lock held (for writes).
func (dag *BlockDAG) calcNextBlockVersion(prevNode *blockNode) (int32, error) {
// Set the appropriate bits for each actively defined rule deployment
// that is either in the process of being voted on, or locked in for the
// activation at the next threshold window change.
expectedVersion := uint32(vbTopBits)
for id := 0; id < len(dag.dagParams.Deployments); id++ {
deployment := &dag.dagParams.Deployments[id]
cache := &dag.deploymentCaches[id]
checker := deploymentChecker{deployment: deployment, chain: dag}
state, err := dag.thresholdState(prevNode, checker, cache)
if err != nil {
return 0, err
}
if state == ThresholdStarted || state == ThresholdLockedIn {
expectedVersion |= uint32(1) << deployment.BitNumber
}
}
return int32(expectedVersion), nil
}
// CalcNextBlockVersion calculates the expected version of the block after the
// end of the current best chain based on the state of started and locked in
// rule change deployments.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) CalcNextBlockVersion() (int32, error) {
version, err := dag.calcNextBlockVersion(dag.selectedTip())
return version, err
}
// warnUnknownRuleActivations displays a warning when any unknown new rules are
// either about to activate or have been activated. This will only happen once
// when new rules have been activated and every block for those about to be
// activated.
//
// This function MUST be called with the chain state lock held (for writes)
func (dag *BlockDAG) warnUnknownRuleActivations(node *blockNode) error {
// Warn if any unknown new rules are either about to activate or have
// already been activated.
for bit := uint32(0); bit < vbNumBits; bit++ {
checker := bitConditionChecker{bit: bit, chain: dag}
cache := &dag.warningCaches[bit]
state, err := dag.thresholdState(node.selectedParent, checker, cache)
if err != nil {
return err
}
switch state {
case ThresholdActive:
if !dag.unknownRulesWarned {
log.Warnf("Unknown new rules activated (bit %d)",
bit)
dag.unknownRulesWarned = true
}
case ThresholdLockedIn:
window := checker.MinerConfirmationWindow()
activationChainHeight := window - (node.chainHeight % window)
log.Warnf("Unknown new rules are about to activate in "+
"%d blocks (bit %d)", activationChainHeight, bit)
}
}
return nil
}
// warnUnknownVersions logs a warning if a high enough percentage of the last
// blocks have unexpected versions.
//
// This function MUST be called with the chain state lock held (for writes)
func (dag *BlockDAG) warnUnknownVersions(node *blockNode) error {
// Nothing to do if already warned.
if dag.unknownVersionsWarned {
return nil
}
// Warn if enough previous blocks have unexpected versions.
numUpgraded := uint32(0)
for i := uint32(0); i < unknownVerNumToCheck && node != nil; i++ {
expectedVersion, err := dag.calcNextBlockVersion(node.selectedParent)
if err != nil {
return err
}
if (node.version & ^expectedVersion) != 0 {
numUpgraded++
}
node = node.selectedParent
}
if numUpgraded > unknownVerWarnNum {
log.Warn("Unknown block versions are being mined, so new " +
"rules might be in effect. Are you running the " +
"latest version of the software?")
dag.unknownVersionsWarned = true
}
return nil
}

350
btcd.go
View File

@@ -1,350 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"runtime/pprof"
"strings"
"github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/limits"
"github.com/kaspanet/kaspad/server"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/util/fs"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/version"
)
const (
// blockDbNamePrefix is the prefix for the block database name. The
// database type is appended to this value to form the full block
// database name.
blockDbNamePrefix = "blocks"
)
var (
cfg *config.Config
)
// winServiceMain is only invoked on Windows. It detects when btcd is running
// as a service and reacts accordingly.
var winServiceMain func() (bool, error)
// btcdMain is the real main function for btcd. It is necessary to work around
// the fact that deferred functions do not run when os.Exit() is called. The
// optional serverChan parameter is mainly used by the service code to be
// notified with the server once it is setup so it can gracefully stop it when
// requested from the service control manager.
func btcdMain(serverChan chan<- *server.Server) error {
// Load configuration and parse command line. This function also
// initializes logging and configures it accordingly.
err := config.LoadAndSetActiveConfig()
if err != nil {
return err
}
cfg = config.ActiveConfig()
defer panics.HandlePanic(btcdLog, nil, nil)
// Get a channel that will be closed when a shutdown signal has been
// triggered either from an OS signal such as SIGINT (Ctrl+C) or from
// another subsystem such as the RPC server.
interrupt := signal.InterruptListener()
defer btcdLog.Info("Shutdown complete")
// Show version at startup.
btcdLog.Infof("Version %s", version.Version())
// Enable http profiling server if requested.
if cfg.Profile != "" {
spawn(func() {
listenAddr := net.JoinHostPort("", cfg.Profile)
btcdLog.Infof("Profile server listening on %s", listenAddr)
profileRedirect := http.RedirectHandler("/debug/pprof",
http.StatusSeeOther)
http.Handle("/", profileRedirect)
btcdLog.Errorf("%s", http.ListenAndServe(listenAddr, nil))
})
}
// Write cpu profile if requested.
if cfg.CPUProfile != "" {
f, err := os.Create(cfg.CPUProfile)
if err != nil {
btcdLog.Errorf("Unable to create cpu profile: %s", err)
return err
}
pprof.StartCPUProfile(f)
defer f.Close()
defer pprof.StopCPUProfile()
}
// Perform upgrades to btcd as new versions require it.
if err := doUpgrades(); err != nil {
btcdLog.Errorf("%s", err)
return err
}
// Return now if an interrupt signal was triggered.
if signal.InterruptRequested(interrupt) {
return nil
}
if cfg.ResetDatabase {
err := removeDatabase()
if err != nil {
btcdLog.Errorf("%s", err)
return err
}
}
// Load the block database.
db, err := loadBlockDB()
if err != nil {
btcdLog.Errorf("%s", err)
return err
}
defer func() {
// Ensure the database is sync'd and closed on shutdown.
btcdLog.Infof("Gracefully shutting down the database...")
db.Close()
}()
// Return now if an interrupt signal was triggered.
if signal.InterruptRequested(interrupt) {
return nil
}
// Drop indexes and exit if requested.
//
// NOTE: The order is important here because dropping the tx index also
// drops the address index since it relies on it.
if cfg.DropAddrIndex {
if err := indexers.DropAddrIndex(db, interrupt); err != nil {
btcdLog.Errorf("%s", err)
return err
}
return nil
}
if cfg.DropTxIndex {
if err := indexers.DropTxIndex(db, interrupt); err != nil {
btcdLog.Errorf("%s", err)
return err
}
return nil
}
if cfg.DropAcceptanceIndex {
if err := indexers.DropAcceptanceIndex(db, interrupt); err != nil {
btcdLog.Errorf("%s", err)
return err
}
return nil
}
// Create server and start it.
server, err := server.NewServer(cfg.Listeners, db, config.ActiveConfig().NetParams(),
interrupt)
if err != nil {
// TODO: this logging could do with some beautifying.
btcdLog.Errorf("Unable to start server on %s: %s",
strings.Join(cfg.Listeners, ", "), err)
return err
}
defer func() {
btcdLog.Infof("Gracefully shutting down the server...")
server.Stop()
server.WaitForShutdown()
srvrLog.Infof("Server shutdown complete")
}()
server.Start()
if serverChan != nil {
serverChan <- server
}
// Wait until the interrupt signal is received from an OS signal or
// shutdown is requested through one of the subsystems such as the RPC
// server.
<-interrupt
return nil
}
func removeDatabase() error {
dbPath := blockDbPath(cfg.DbType)
return os.RemoveAll(dbPath)
}
// removeRegressionDB removes the existing regression test database if running
// in regression test mode and it already exists.
func removeRegressionDB(dbPath string) error {
// Don't do anything if not in regression test mode.
if !cfg.RegressionTest {
return nil
}
// Remove the old regression test database if it already exists.
fi, err := os.Stat(dbPath)
if err == nil {
btcdLog.Infof("Removing regression test database from '%s'", dbPath)
if fi.IsDir() {
err := os.RemoveAll(dbPath)
if err != nil {
return err
}
} else {
err := os.Remove(dbPath)
if err != nil {
return err
}
}
}
return nil
}
// dbPath returns the path to the block database given a database type.
func blockDbPath(dbType string) string {
// The database name is based on the database type.
dbName := blockDbNamePrefix + "_" + dbType
if dbType == "sqlite" {
dbName = dbName + ".db"
}
dbPath := filepath.Join(cfg.DataDir, dbName)
return dbPath
}
// warnMultipleDBs shows a warning if multiple block database types are detected.
// This is not a situation most users want. It is handy for development however
// to support multiple side-by-side databases.
func warnMultipleDBs() {
// This is intentionally not using the known db types which depend
// on the database types compiled into the binary since we want to
// detect legacy db types as well.
dbTypes := []string{"ffldb", "leveldb", "sqlite"}
duplicateDbPaths := make([]string, 0, len(dbTypes)-1)
for _, dbType := range dbTypes {
if dbType == cfg.DbType {
continue
}
// Store db path as a duplicate db if it exists.
dbPath := blockDbPath(dbType)
if fs.FileExists(dbPath) {
duplicateDbPaths = append(duplicateDbPaths, dbPath)
}
}
// Warn if there are extra databases.
if len(duplicateDbPaths) > 0 {
selectedDbPath := blockDbPath(cfg.DbType)
btcdLog.Warnf("WARNING: There are multiple block chain databases "+
"using different database types.\nYou probably don't "+
"want to waste disk space by having more than one.\n"+
"Your current database is located at [%s].\nThe "+
"additional database is located at %s", selectedDbPath,
strings.Join(duplicateDbPaths, ", "))
}
}
// loadBlockDB loads (or creates when needed) the block database taking into
// account the selected database backend and returns a handle to it. It also
// contains additional logic such warning the user if there are multiple
// databases which consume space on the file system and ensuring the regression
// test database is clean when in regression test mode.
func loadBlockDB() (database.DB, error) {
// The memdb backend does not have a file path associated with it, so
// handle it uniquely. We also don't want to worry about the multiple
// database type warnings when running with the memory database.
if cfg.DbType == "memdb" {
btcdLog.Infof("Creating block database in memory.")
db, err := database.Create(cfg.DbType)
if err != nil {
return nil, err
}
return db, nil
}
warnMultipleDBs()
// The database name is based on the database type.
dbPath := blockDbPath(cfg.DbType)
// The regression test is special in that it needs a clean database for
// each run, so remove it now if it already exists.
removeRegressionDB(dbPath)
btcdLog.Infof("Loading block database from '%s'", dbPath)
db, err := database.Open(cfg.DbType, dbPath, config.ActiveConfig().NetParams().Net)
if err != nil {
// Return the error if it's not because the database doesn't
// exist.
if dbErr, ok := err.(database.Error); !ok || dbErr.ErrorCode !=
database.ErrDbDoesNotExist {
return nil, err
}
// Create the db if it does not exist.
err = os.MkdirAll(cfg.DataDir, 0700)
if err != nil {
return nil, err
}
db, err = database.Create(cfg.DbType, dbPath, config.ActiveConfig().NetParams().Net)
if err != nil {
return nil, err
}
}
btcdLog.Info("Block database loaded")
return db, nil
}
func main() {
// Use all processor cores.
runtime.GOMAXPROCS(runtime.NumCPU())
// Block and transaction processing can cause bursty allocations. This
// limits the garbage collector from excessively overallocating during
// bursts. This value was arrived at with the help of profiling live
// usage.
debug.SetGCPercent(10)
// Up some limits.
if err := limits.SetLimits(); err != nil {
fmt.Fprintf(os.Stderr, "failed to set limits: %s\n", err)
os.Exit(1)
}
// Call serviceMain on Windows to handle running as a service. When
// the return isService flag is true, exit now since we ran as a
// service. Otherwise, just fall through to normal operation.
if runtime.GOOS == "windows" {
isService, err := winServiceMain()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if isService {
os.Exit(0)
}
}
// Work around defer not working after os.Exit()
if err := btcdMain(nil); err != nil {
os.Exit(1)
}
}

View File

@@ -1,68 +0,0 @@
btcec
=====
[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)](https://travis-ci.org/btcsuite/btcec)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://godoc.org/github.com/kaspanet/kaspad/btcec?status.png)](http://godoc.org/github.com/kaspanet/kaspad/btcec)
Package btcec implements elliptic curve cryptography needed for working with
Bitcoin (secp256k1 only for now). It is designed so that it may be used with the
standard crypto/ecdsa packages provided with go. A comprehensive suite of test
is provided to ensure proper functionality. Package btcec was originally based
on work from ThePiachu which is licensed under the same terms as Go, but it has
signficantly diverged since then. The btcsuite developers original is licensed
under the liberal ISC license.
Although this package was primarily written for btcd, it has intentionally been
designed so it can be used as a standalone package for any projects needing to
use secp256k1 elliptic curve cryptography.
## Installation and Updating
```bash
$ go get -u github.com/kaspanet/kaspad/btcec
```
## Examples
* [Sign Message](http://godoc.org/github.com/kaspanet/kaspad/btcec#example-package--SignMessage)
Demonstrates signing a message with a secp256k1 private key that is first
parsed form raw bytes and serializing the generated signature.
* [Verify Signature](http://godoc.org/github.com/kaspanet/kaspad/btcec#example-package--VerifySignature)
Demonstrates verifying a secp256k1 signature against a public key that is
first parsed from raw bytes. The signature is also parsed from raw bytes.
* [Encryption](http://godoc.org/github.com/kaspanet/kaspad/btcec#example-package--EncryptMessage)
Demonstrates encrypting a message for a public key that is first parsed from
raw bytes, then decrypting it using the corresponding private key.
* [Decryption](http://godoc.org/github.com/kaspanet/kaspad/btcec#example-package--DecryptMessage)
Demonstrates decrypting a message using a private key that is first parsed
from raw bytes.
## GPG Verification Key
All official release tags are signed by Conformal so users can ensure the code
has not been tampered with and is coming from the btcsuite developers. To
verify the signature perform the following:
- Download the public key from the Conformal website at
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
- Import the public key into your GPG keyring:
```bash
gpg --import GIT-GPG-KEY-conformal.txt
```
- Verify the release tag with the following command where `TAG_NAME` is a
placeholder for the specific tag:
```bash
git tag -v TAG_NAME
```
## License
Package btcec is licensed under the [copyfree](http://copyfree.org) ISC License
except for btcec.go and btcec_test.go which is under the same license as Go.

View File

@@ -1,123 +0,0 @@
// Copyright 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcec
import "testing"
// BenchmarkAddJacobian benchmarks the secp256k1 curve addJacobian function with
// Z values of 1 so that the associated optimizations are used.
func BenchmarkAddJacobian(b *testing.B) {
b.StopTimer()
x1 := new(fieldVal).SetHex("34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6")
y1 := new(fieldVal).SetHex("0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232")
z1 := new(fieldVal).SetHex("1")
x2 := new(fieldVal).SetHex("34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6")
y2 := new(fieldVal).SetHex("0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232")
z2 := new(fieldVal).SetHex("1")
x3, y3, z3 := new(fieldVal), new(fieldVal), new(fieldVal)
curve := S256()
b.StartTimer()
for i := 0; i < b.N; i++ {
curve.addJacobian(x1, y1, z1, x2, y2, z2, x3, y3, z3)
}
}
// BenchmarkAddJacobianNotZOne benchmarks the secp256k1 curve addJacobian
// function with Z values other than one so the optimizations associated with
// Z=1 aren't used.
func BenchmarkAddJacobianNotZOne(b *testing.B) {
b.StopTimer()
x1 := new(fieldVal).SetHex("d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718")
y1 := new(fieldVal).SetHex("5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190")
z1 := new(fieldVal).SetHex("2")
x2 := new(fieldVal).SetHex("91abba6a34b7481d922a4bd6a04899d5a686f6cf6da4e66a0cb427fb25c04bd4")
y2 := new(fieldVal).SetHex("03fede65e30b4e7576a2abefc963ddbf9fdccbf791b77c29beadefe49951f7d1")
z2 := new(fieldVal).SetHex("3")
x3, y3, z3 := new(fieldVal), new(fieldVal), new(fieldVal)
curve := S256()
b.StartTimer()
for i := 0; i < b.N; i++ {
curve.addJacobian(x1, y1, z1, x2, y2, z2, x3, y3, z3)
}
}
// BenchmarkScalarBaseMult benchmarks the secp256k1 curve ScalarBaseMult
// function.
func BenchmarkScalarBaseMult(b *testing.B) {
k := fromHex("d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575")
curve := S256()
for i := 0; i < b.N; i++ {
curve.ScalarBaseMult(k.Bytes())
}
}
// BenchmarkScalarBaseMultLarge benchmarks the secp256k1 curve ScalarBaseMult
// function with abnormally large k values.
func BenchmarkScalarBaseMultLarge(b *testing.B) {
k := fromHex("d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c005751111111011111110")
curve := S256()
for i := 0; i < b.N; i++ {
curve.ScalarBaseMult(k.Bytes())
}
}
// BenchmarkScalarMult benchmarks the secp256k1 curve ScalarMult function.
func BenchmarkScalarMult(b *testing.B) {
x := fromHex("34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6")
y := fromHex("0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232")
k := fromHex("d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575")
curve := S256()
for i := 0; i < b.N; i++ {
curve.ScalarMult(x, y, k.Bytes())
}
}
// BenchmarkNAF benchmarks the NAF function.
func BenchmarkNAF(b *testing.B) {
k := fromHex("d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575")
for i := 0; i < b.N; i++ {
NAF(k.Bytes())
}
}
// BenchmarkSigVerify benchmarks how long it takes the secp256k1 curve to
// verify signatures.
func BenchmarkSigVerify(b *testing.B) {
b.StopTimer()
// Randomly generated keypair.
// Private key: 9e0699c91ca1e3b7e3c9ba71eb71c89890872be97576010fe593fbf3fd57e66d
pubKey := PublicKey{
Curve: S256(),
X: fromHex("d2e670a19c6d753d1a6d8b20bd045df8a08fb162cf508956c31268c6d81ffdab"),
Y: fromHex("ab65528eefbb8057aa85d597258a3fbd481a24633bc9b47a9aa045c91371de52"),
}
// Double sha256 of []byte{0x01, 0x02, 0x03, 0x04}
msgHash := fromHex("8de472e2399610baaa7f84840547cd409434e31f5d3bd71e4d947f283874f9c0")
sig := Signature{
R: fromHex("fef45d2892953aa5bbcdb057b5e98b208f1617a7498af7eb765574e29b5d9c2c"),
S: fromHex("d47563f52aac6b04b55de236b7c515eb9311757db01e02cff079c3ca6efb063f"),
}
if !sig.Verify(msgHash.Bytes(), &pubKey) {
b.Errorf("Signature failed to verify")
return
}
b.StartTimer()
for i := 0; i < b.N; i++ {
sig.Verify(msgHash.Bytes(), &pubKey)
}
}
// BenchmarkFieldNormalize benchmarks how long it takes the internal field
// to perform normalization (which includes modular reduction).
func BenchmarkFieldNormalize(b *testing.B) {
// The normalize function is constant time so default value is fine.
f := new(fieldVal)
for i := 0; i < b.N; i++ {
f.Normalize()
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,889 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Copyright 2011 ThePiachu. All rights reserved.
// Copyright 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcec
import (
"crypto/rand"
"fmt"
"math/big"
"testing"
)
// isJacobianOnS256Curve returns boolean if the point (x,y,z) is on the
// secp256k1 curve.
func isJacobianOnS256Curve(x, y, z *fieldVal) bool {
// Elliptic curve equation for secp256k1 is: y^2 = x^3 + 7
// In Jacobian coordinates, Y = y/z^3 and X = x/z^2
// Thus:
// (y/z^3)^2 = (x/z^2)^3 + 7
// y^2/z^6 = x^3/z^6 + 7
// y^2 = x^3 + 7*z^6
var y2, z2, x3, result fieldVal
y2.SquareVal(y).Normalize()
z2.SquareVal(z)
x3.SquareVal(x).Mul(x)
result.SquareVal(&z2).Mul(&z2).MulInt(7).Add(&x3).Normalize()
return y2.Equals(&result)
}
// TestAddJacobian tests addition of points projected in Jacobian coordinates.
func TestAddJacobian(t *testing.T) {
tests := []struct {
x1, y1, z1 string // Coordinates (in hex) of first point to add
x2, y2, z2 string // Coordinates (in hex) of second point to add
x3, y3, z3 string // Coordinates (in hex) of expected point
}{
// Addition with a point at infinity (left hand side).
// ∞ + P = P
{
"0",
"0",
"0",
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
"1",
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
"1",
},
// Addition with a point at infinity (right hand side).
// P + ∞ = P
{
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
"1",
"0",
"0",
"0",
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
"1",
},
// Addition with z1=z2=1 different x values.
{
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
"1",
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
"1",
"0cfbc7da1e569b334460788faae0286e68b3af7379d5504efc25e4dba16e46a6",
"e205f79361bbe0346b037b4010985dbf4f9e1e955e7d0d14aca876bfa79aad87",
"44a5646b446e3877a648d6d381370d9ef55a83b666ebce9df1b1d7d65b817b2f",
},
// Addition with z1=z2=1 same x opposite y.
// P(x, y, z) + P(x, -y, z) = infinity
{
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
"1",
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
"f48e156428cf0276dc092da5856e182288d7569f97934a56fe44be60f0d359fd",
"1",
"0",
"0",
"0",
},
// Addition with z1=z2=1 same point.
// P(x, y, z) + P(x, y, z) = 2P
{
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
"1",
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
"1",
"ec9f153b13ee7bd915882859635ea9730bf0dc7611b2c7b0e37ee64f87c50c27",
"b082b53702c466dcf6e984a35671756c506c67c2fcb8adb408c44dd0755c8f2a",
"16e3d537ae61fb1247eda4b4f523cfbaee5152c0d0d96b520376833c1e594464",
},
// Addition with z1=z2 (!=1) different x values.
{
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
"2",
"5d2fe112c21891d440f65a98473cb626111f8a234d2cd82f22172e369f002147",
"98e3386a0a622a35c4561ffb32308d8e1c6758e10ebb1b4ebd3d04b4eb0ecbe8",
"2",
"cfbc7da1e569b334460788faae0286e68b3af7379d5504efc25e4dba16e46a60",
"817de4d86ef80d1ac0ded00426176fd3e787a5579f43452b2a1db021e6ac3778",
"129591ad11b8e1de99235b4e04dc367bd56a0ed99baf3a77c6c75f5a6e05f08d",
},
// Addition with z1=z2 (!=1) same x opposite y.
// P(x, y, z) + P(x, -y, z) = infinity
{
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
"2",
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
"a470ab21467813b6e0496d2c2b70c11446bab4fcbc9a52b7f225f30e869aea9f",
"2",
"0",
"0",
"0",
},
// Addition with z1=z2 (!=1) same point.
// P(x, y, z) + P(x, y, z) = 2P
{
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
"2",
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
"2",
"9f153b13ee7bd915882859635ea9730bf0dc7611b2c7b0e37ee65073c50fabac",
"2b53702c466dcf6e984a35671756c506c67c2fcb8adb408c44dd125dc91cb988",
"6e3d537ae61fb1247eda4b4f523cfbaee5152c0d0d96b520376833c2e5944a11",
},
// Addition with z1!=z2 and z2=1 different x values.
{
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
"2",
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
"1",
"3ef1f68795a6ccd1181e23eab80a1b9a2cebdcde755413bf097936eb5b91b4f3",
"0bef26c377c068d606f6802130bb7e9f3c3d2abcfa1a295950ed81133561cb04",
"252b235a2371c3bd3246b69c09b86cf7aad41db3375e74ef8d8ebeb4dc0be11a",
},
// Addition with z1!=z2 and z2=1 same x opposite y.
// P(x, y, z) + P(x, -y, z) = infinity
{
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
"2",
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
"f48e156428cf0276dc092da5856e182288d7569f97934a56fe44be60f0d359fd",
"1",
"0",
"0",
"0",
},
// Addition with z1!=z2 and z2=1 same point.
// P(x, y, z) + P(x, y, z) = 2P
{
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
"2",
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
"1",
"9f153b13ee7bd915882859635ea9730bf0dc7611b2c7b0e37ee65073c50fabac",
"2b53702c466dcf6e984a35671756c506c67c2fcb8adb408c44dd125dc91cb988",
"6e3d537ae61fb1247eda4b4f523cfbaee5152c0d0d96b520376833c2e5944a11",
},
// Addition with z1!=z2 and z2!=1 different x values.
// P(x, y, z) + P(x, y, z) = 2P
{
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
"2",
"91abba6a34b7481d922a4bd6a04899d5a686f6cf6da4e66a0cb427fb25c04bd4",
"03fede65e30b4e7576a2abefc963ddbf9fdccbf791b77c29beadefe49951f7d1",
"3",
"3f07081927fd3f6dadd4476614c89a09eba7f57c1c6c3b01fa2d64eac1eef31e",
"949166e04ebc7fd95a9d77e5dfd88d1492ecffd189792e3944eb2b765e09e031",
"eb8cba81bcffa4f44d75427506737e1f045f21e6d6f65543ee0e1d163540c931",
}, // Addition with z1!=z2 and z2!=1 same x opposite y.
// P(x, y, z) + P(x, -y, z) = infinity
{
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
"2",
"dcc3768780c74a0325e2851edad0dc8a566fa61a9e7fc4a34d13dcb509f99bc7",
"cafc41904dd5428934f7d075129c8ba46eb622d4fc88d72cd1401452664add18",
"3",
"0",
"0",
"0",
},
// Addition with z1!=z2 and z2!=1 same point.
// P(x, y, z) + P(x, y, z) = 2P
{
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
"2",
"dcc3768780c74a0325e2851edad0dc8a566fa61a9e7fc4a34d13dcb509f99bc7",
"3503be6fb22abd76cb082f8aed63745b9149dd2b037728d32ebfebac99b51f17",
"3",
"9f153b13ee7bd915882859635ea9730bf0dc7611b2c7b0e37ee65073c50fabac",
"2b53702c466dcf6e984a35671756c506c67c2fcb8adb408c44dd125dc91cb988",
"6e3d537ae61fb1247eda4b4f523cfbaee5152c0d0d96b520376833c2e5944a11",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Convert hex to field values.
x1 := new(fieldVal).SetHex(test.x1)
y1 := new(fieldVal).SetHex(test.y1)
z1 := new(fieldVal).SetHex(test.z1)
x2 := new(fieldVal).SetHex(test.x2)
y2 := new(fieldVal).SetHex(test.y2)
z2 := new(fieldVal).SetHex(test.z2)
x3 := new(fieldVal).SetHex(test.x3)
y3 := new(fieldVal).SetHex(test.y3)
z3 := new(fieldVal).SetHex(test.z3)
// Ensure the test data is using points that are actually on
// the curve (or the point at infinity).
if !z1.IsZero() && !isJacobianOnS256Curve(x1, y1, z1) {
t.Errorf("#%d first point is not on the curve -- "+
"invalid test data", i)
continue
}
if !z2.IsZero() && !isJacobianOnS256Curve(x2, y2, z2) {
t.Errorf("#%d second point is not on the curve -- "+
"invalid test data", i)
continue
}
if !z3.IsZero() && !isJacobianOnS256Curve(x3, y3, z3) {
t.Errorf("#%d expected point is not on the curve -- "+
"invalid test data", i)
continue
}
// Add the two points.
rx, ry, rz := new(fieldVal), new(fieldVal), new(fieldVal)
S256().addJacobian(x1, y1, z1, x2, y2, z2, rx, ry, rz)
// Ensure result matches expected.
if !rx.Equals(x3) || !ry.Equals(y3) || !rz.Equals(z3) {
t.Errorf("#%d wrong result\ngot: (%v, %v, %v)\n"+
"want: (%v, %v, %v)", i, rx, ry, rz, x3, y3, z3)
continue
}
}
}
// TestAddAffine tests addition of points in affine coordinates.
func TestAddAffine(t *testing.T) {
tests := []struct {
x1, y1 string // Coordinates (in hex) of first point to add
x2, y2 string // Coordinates (in hex) of second point to add
x3, y3 string // Coordinates (in hex) of expected point
}{
// Addition with a point at infinity (left hand side).
// ∞ + P = P
{
"0",
"0",
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
},
// Addition with a point at infinity (right hand side).
// P + ∞ = P
{
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
"0",
"0",
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
},
// Addition with different x values.
{
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
"d74bf844b0862475103d96a611cf2d898447e288d34b360bc885cb8ce7c00575",
"131c670d414c4546b88ac3ff664611b1c38ceb1c21d76369d7a7a0969d61d97d",
"fd5b88c21d3143518d522cd2796f3d726793c88b3e05636bc829448e053fed69",
"21cf4f6a5be5ff6380234c50424a970b1f7e718f5eb58f68198c108d642a137f",
},
// Addition with same x opposite y.
// P(x, y) + P(x, -y) = infinity
{
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
"f48e156428cf0276dc092da5856e182288d7569f97934a56fe44be60f0d359fd",
"0",
"0",
},
// Addition with same point.
// P(x, y) + P(x, y) = 2P
{
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
"59477d88ae64a104dbb8d31ec4ce2d91b2fe50fa628fb6a064e22582196b365b",
"938dc8c0f13d1e75c987cb1a220501bd614b0d3dd9eb5c639847e1240216e3b6",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Convert hex to field values.
x1, y1 := fromHex(test.x1), fromHex(test.y1)
x2, y2 := fromHex(test.x2), fromHex(test.y2)
x3, y3 := fromHex(test.x3), fromHex(test.y3)
// Ensure the test data is using points that are actually on
// the curve (or the point at infinity).
if !(x1.Sign() == 0 && y1.Sign() == 0) && !S256().IsOnCurve(x1, y1) {
t.Errorf("#%d first point is not on the curve -- "+
"invalid test data", i)
continue
}
if !(x2.Sign() == 0 && y2.Sign() == 0) && !S256().IsOnCurve(x2, y2) {
t.Errorf("#%d second point is not on the curve -- "+
"invalid test data", i)
continue
}
if !(x3.Sign() == 0 && y3.Sign() == 0) && !S256().IsOnCurve(x3, y3) {
t.Errorf("#%d expected point is not on the curve -- "+
"invalid test data", i)
continue
}
// Add the two points.
rx, ry := S256().Add(x1, y1, x2, y2)
// Ensure result matches expected.
if rx.Cmp(x3) != 00 || ry.Cmp(y3) != 0 {
t.Errorf("#%d wrong result\ngot: (%x, %x)\n"+
"want: (%x, %x)", i, rx, ry, x3, y3)
continue
}
}
}
// TestDoubleJacobian tests doubling of points projected in Jacobian
// coordinates.
func TestDoubleJacobian(t *testing.T) {
tests := []struct {
x1, y1, z1 string // Coordinates (in hex) of point to double
x3, y3, z3 string // Coordinates (in hex) of expected point
}{
// Doubling a point at infinity is still infinity.
{
"0",
"0",
"0",
"0",
"0",
"0",
},
// Doubling with z1=1.
{
"34f9460f0e4f08393d192b3c5133a6ba099aa0ad9fd54ebccfacdfa239ff49c6",
"0b71ea9bd730fd8923f6d25a7a91e7dd7728a960686cb5a901bb419e0f2ca232",
"1",
"ec9f153b13ee7bd915882859635ea9730bf0dc7611b2c7b0e37ee64f87c50c27",
"b082b53702c466dcf6e984a35671756c506c67c2fcb8adb408c44dd0755c8f2a",
"16e3d537ae61fb1247eda4b4f523cfbaee5152c0d0d96b520376833c1e594464",
},
// Doubling with z1!=1.
{
"d3e5183c393c20e4f464acf144ce9ae8266a82b67f553af33eb37e88e7fd2718",
"5b8f54deb987ec491fb692d3d48f3eebb9454b034365ad480dda0cf079651190",
"2",
"9f153b13ee7bd915882859635ea9730bf0dc7611b2c7b0e37ee65073c50fabac",
"2b53702c466dcf6e984a35671756c506c67c2fcb8adb408c44dd125dc91cb988",
"6e3d537ae61fb1247eda4b4f523cfbaee5152c0d0d96b520376833c2e5944a11",
},
// From btcd issue #709.
{
"201e3f75715136d2f93c4f4598f91826f94ca01f4233a5bd35de9708859ca50d",
"bdf18566445e7562c6ada68aef02d498d7301503de5b18c6aef6e2b1722412e1",
"0000000000000000000000000000000000000000000000000000000000000001",
"4a5e0559863ebb4e9ed85f5c4fa76003d05d9a7626616e614a1f738621e3c220",
"00000000000000000000000000000000000000000000000000000001b1388778",
"7be30acc88bceac58d5b4d15de05a931ae602a07bcb6318d5dedc563e4482993",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Convert hex to field values.
x1 := new(fieldVal).SetHex(test.x1)
y1 := new(fieldVal).SetHex(test.y1)
z1 := new(fieldVal).SetHex(test.z1)
x3 := new(fieldVal).SetHex(test.x3)
y3 := new(fieldVal).SetHex(test.y3)
z3 := new(fieldVal).SetHex(test.z3)
// Ensure the test data is using points that are actually on
// the curve (or the point at infinity).
if !z1.IsZero() && !isJacobianOnS256Curve(x1, y1, z1) {
t.Errorf("#%d first point is not on the curve -- "+
"invalid test data", i)
continue
}
if !z3.IsZero() && !isJacobianOnS256Curve(x3, y3, z3) {
t.Errorf("#%d expected point is not on the curve -- "+
"invalid test data", i)
continue
}
// Double the point.
rx, ry, rz := new(fieldVal), new(fieldVal), new(fieldVal)
S256().doubleJacobian(x1, y1, z1, rx, ry, rz)
// Ensure result matches expected.
if !rx.Equals(x3) || !ry.Equals(y3) || !rz.Equals(z3) {
t.Errorf("#%d wrong result\ngot: (%v, %v, %v)\n"+
"want: (%v, %v, %v)", i, rx, ry, rz, x3, y3, z3)
continue
}
}
}
// TestDoubleAffine tests doubling of points in affine coordinates.
func TestDoubleAffine(t *testing.T) {
tests := []struct {
x1, y1 string // Coordinates (in hex) of point to double
x3, y3 string // Coordinates (in hex) of expected point
}{
// Doubling a point at infinity is still infinity.
// 2*∞ = ∞ (point at infinity)
{
"0",
"0",
"0",
"0",
},
// Random points.
{
"e41387ffd8baaeeb43c2faa44e141b19790e8ac1f7ff43d480dc132230536f86",
"1b88191d430f559896149c86cbcb703193105e3cf3213c0c3556399836a2b899",
"88da47a089d333371bd798c548ef7caae76e737c1980b452d367b3cfe3082c19",
"3b6f659b09a362821dfcfefdbfbc2e59b935ba081b6c249eb147b3c2100b1bc1",
},
{
"b3589b5d984f03ef7c80aeae444f919374799edf18d375cab10489a3009cff0c",
"c26cf343875b3630e15bccc61202815b5d8f1fd11308934a584a5babe69db36a",
"e193860172998751e527bb12563855602a227fc1f612523394da53b746bb2fb1",
"2bfcf13d2f5ab8bb5c611fab5ebbed3dc2f057062b39a335224c22f090c04789",
},
{
"2b31a40fbebe3440d43ac28dba23eee71c62762c3fe3dbd88b4ab82dc6a82340",
"9ba7deb02f5c010e217607fd49d58db78ec273371ea828b49891ce2fd74959a1",
"2c8d5ef0d343b1a1a48aa336078eadda8481cb048d9305dc4fdf7ee5f65973a2",
"bb4914ac729e26d3cd8f8dc8f702f3f4bb7e0e9c5ae43335f6e94c2de6c3dc95",
},
{
"61c64b760b51981fab54716d5078ab7dffc93730b1d1823477e27c51f6904c7a",
"ef6eb16ea1a36af69d7f66524c75a3a5e84c13be8fbc2e811e0563c5405e49bd",
"5f0dcdd2595f5ad83318a0f9da481039e36f135005420393e72dfca985b482f4",
"a01c849b0837065c1cb481b0932c441f49d1cab1b4b9f355c35173d93f110ae0",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Convert hex to field values.
x1, y1 := fromHex(test.x1), fromHex(test.y1)
x3, y3 := fromHex(test.x3), fromHex(test.y3)
// Ensure the test data is using points that are actually on
// the curve (or the point at infinity).
if !(x1.Sign() == 0 && y1.Sign() == 0) && !S256().IsOnCurve(x1, y1) {
t.Errorf("#%d first point is not on the curve -- "+
"invalid test data", i)
continue
}
if !(x3.Sign() == 0 && y3.Sign() == 0) && !S256().IsOnCurve(x3, y3) {
t.Errorf("#%d expected point is not on the curve -- "+
"invalid test data", i)
continue
}
// Double the point.
rx, ry := S256().Double(x1, y1)
// Ensure result matches expected.
if rx.Cmp(x3) != 00 || ry.Cmp(y3) != 0 {
t.Errorf("#%d wrong result\ngot: (%x, %x)\n"+
"want: (%x, %x)", i, rx, ry, x3, y3)
continue
}
}
}
func TestOnCurve(t *testing.T) {
s256 := S256()
if !s256.IsOnCurve(s256.Params().Gx, s256.Params().Gy) {
t.Errorf("FAIL S256")
}
}
type baseMultTest struct {
k string
x, y string
}
//TODO: add more test vectors
var s256BaseMultTests = []baseMultTest{
{
"AA5E28D6A97A2479A65527F7290311A3624D4CC0FA1578598EE3C2613BF99522",
"34F9460F0E4F08393D192B3C5133A6BA099AA0AD9FD54EBCCFACDFA239FF49C6",
"B71EA9BD730FD8923F6D25A7A91E7DD7728A960686CB5A901BB419E0F2CA232",
},
{
"7E2B897B8CEBC6361663AD410835639826D590F393D90A9538881735256DFAE3",
"D74BF844B0862475103D96A611CF2D898447E288D34B360BC885CB8CE7C00575",
"131C670D414C4546B88AC3FF664611B1C38CEB1C21D76369D7A7A0969D61D97D",
},
{
"6461E6DF0FE7DFD05329F41BF771B86578143D4DD1F7866FB4CA7E97C5FA945D",
"E8AECC370AEDD953483719A116711963CE201AC3EB21D3F3257BB48668C6A72F",
"C25CAF2F0EBA1DDB2F0F3F47866299EF907867B7D27E95B3873BF98397B24EE1",
},
{
"376A3A2CDCD12581EFFF13EE4AD44C4044B8A0524C42422A7E1E181E4DEECCEC",
"14890E61FCD4B0BD92E5B36C81372CA6FED471EF3AA60A3E415EE4FE987DABA1",
"297B858D9F752AB42D3BCA67EE0EB6DCD1C2B7B0DBE23397E66ADC272263F982",
},
{
"1B22644A7BE026548810C378D0B2994EEFA6D2B9881803CB02CEFF865287D1B9",
"F73C65EAD01C5126F28F442D087689BFA08E12763E0CEC1D35B01751FD735ED3",
"F449A8376906482A84ED01479BD18882B919C140D638307F0C0934BA12590BDE",
},
}
//TODO: test different curves as well?
func TestBaseMult(t *testing.T) {
s256 := S256()
for i, e := range s256BaseMultTests {
k, ok := new(big.Int).SetString(e.k, 16)
if !ok {
t.Errorf("%d: bad value for k: %s", i, e.k)
}
x, y := s256.ScalarBaseMult(k.Bytes())
if fmt.Sprintf("%X", x) != e.x || fmt.Sprintf("%X", y) != e.y {
t.Errorf("%d: bad output for k=%s: got (%X, %X), want (%s, %s)", i, e.k, x, y, e.x, e.y)
}
if testing.Short() && i > 5 {
break
}
}
}
func TestBaseMultVerify(t *testing.T) {
s256 := S256()
for bytes := 1; bytes < 40; bytes++ {
for i := 0; i < 30; i++ {
data := make([]byte, bytes)
_, err := rand.Read(data)
if err != nil {
t.Errorf("failed to read random data for %d", i)
continue
}
x, y := s256.ScalarBaseMult(data)
xWant, yWant := s256.ScalarMult(s256.Gx, s256.Gy, data)
if x.Cmp(xWant) != 0 || y.Cmp(yWant) != 0 {
t.Errorf("%d: bad output for %X: got (%X, %X), want (%X, %X)", i, data, x, y, xWant, yWant)
}
if testing.Short() && i > 2 {
break
}
}
}
}
func TestScalarMult(t *testing.T) {
tests := []struct {
x string
y string
k string
rx string
ry string
}{
// base mult, essentially.
{
"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798",
"483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8",
"18e14a7b6a307f426a94f8114701e7c8e774e7f9a47e2c2035db29a206321725",
"50863ad64a87ae8a2fe83c1af1a8403cb53f53e486d8511dad8a04887e5b2352",
"2cd470243453a299fa9e77237716103abc11a1df38855ed6f2ee187e9c582ba6",
},
// From btcd issue #709.
{
"000000000000000000000000000000000000000000000000000000000000002c",
"420e7a99bba18a9d3952597510fd2b6728cfeafc21a4e73951091d4d8ddbe94e",
"a2e8ba2e8ba2e8ba2e8ba2e8ba2e8ba219b51835b55cc30ebfe2f6599bc56f58",
"a2112dcdfbcd10ae1133a358de7b82db68e0a3eb4b492cc8268d1e7118c98788",
"27fc7463b7bb3c5f98ecf2c84a6272bb1681ed553d92c69f2dfe25a9f9fd3836",
},
}
s256 := S256()
for i, test := range tests {
x, _ := new(big.Int).SetString(test.x, 16)
y, _ := new(big.Int).SetString(test.y, 16)
k, _ := new(big.Int).SetString(test.k, 16)
xWant, _ := new(big.Int).SetString(test.rx, 16)
yWant, _ := new(big.Int).SetString(test.ry, 16)
xGot, yGot := s256.ScalarMult(x, y, k.Bytes())
if xGot.Cmp(xWant) != 0 || yGot.Cmp(yWant) != 0 {
t.Fatalf("%d: bad output: got (%X, %X), want (%X, %X)", i, xGot, yGot, xWant, yWant)
}
}
}
func TestScalarMultRand(t *testing.T) {
// Strategy for this test:
// Get a random exponent from the generator point at first
// This creates a new point which is used in the next iteration
// Use another random exponent on the new point.
// We use BaseMult to verify by multiplying the previous exponent
// and the new random exponent together (mod N)
s256 := S256()
x, y := s256.Gx, s256.Gy
exponent := big.NewInt(1)
for i := 0; i < 1024; i++ {
data := make([]byte, 32)
_, err := rand.Read(data)
if err != nil {
t.Fatalf("failed to read random data at %d", i)
break
}
x, y = s256.ScalarMult(x, y, data)
exponent.Mul(exponent, new(big.Int).SetBytes(data))
xWant, yWant := s256.ScalarBaseMult(exponent.Bytes())
if x.Cmp(xWant) != 0 || y.Cmp(yWant) != 0 {
t.Fatalf("%d: bad output for %X: got (%X, %X), want (%X, %X)", i, data, x, y, xWant, yWant)
break
}
}
}
func TestSplitK(t *testing.T) {
tests := []struct {
k string
k1, k2 string
s1, s2 int
}{
{
"6df2b5d30854069ccdec40ae022f5c948936324a4e9ebed8eb82cfd5a6b6d766",
"00000000000000000000000000000000b776e53fb55f6b006a270d42d64ec2b1",
"00000000000000000000000000000000d6cc32c857f1174b604eefc544f0c7f7",
-1, -1,
},
{
"6ca00a8f10632170accc1b3baf2a118fa5725f41473f8959f34b8f860c47d88d",
"0000000000000000000000000000000007b21976c1795723c1bfbfa511e95b84",
"00000000000000000000000000000000d8d2d5f9d20fc64fd2cf9bda09a5bf90",
1, -1,
},
{
"b2eda8ab31b259032d39cbc2a234af17fcee89c863a8917b2740b67568166289",
"00000000000000000000000000000000507d930fecda7414fc4a523b95ef3c8c",
"00000000000000000000000000000000f65ffb179df189675338c6185cb839be",
-1, -1,
},
{
"f6f00e44f179936f2befc7442721b0633f6bafdf7161c167ffc6f7751980e3a0",
"0000000000000000000000000000000008d0264f10bcdcd97da3faa38f85308d",
"0000000000000000000000000000000065fed1506eb6605a899a54e155665f79",
-1, -1,
},
{
"8679085ab081dc92cdd23091ce3ee998f6b320e419c3475fae6b5b7d3081996e",
"0000000000000000000000000000000089fbf24fbaa5c3c137b4f1cedc51d975",
"00000000000000000000000000000000d38aa615bd6754d6f4d51ccdaf529fea",
-1, -1,
},
{
"6b1247bb7931dfcae5b5603c8b5ae22ce94d670138c51872225beae6bba8cdb3",
"000000000000000000000000000000008acc2a521b21b17cfb002c83be62f55d",
"0000000000000000000000000000000035f0eff4d7430950ecb2d94193dedc79",
-1, -1,
},
{
"a2e8ba2e8ba2e8ba2e8ba2e8ba2e8ba219b51835b55cc30ebfe2f6599bc56f58",
"0000000000000000000000000000000045c53aa1bb56fcd68c011e2dad6758e4",
"00000000000000000000000000000000a2e79d200f27f2360fba57619936159b",
-1, -1,
},
}
s256 := S256()
for i, test := range tests {
k, ok := new(big.Int).SetString(test.k, 16)
if !ok {
t.Errorf("%d: bad value for k: %s", i, test.k)
}
k1, k2, k1Sign, k2Sign := s256.splitK(k.Bytes())
k1str := fmt.Sprintf("%064x", k1)
if test.k1 != k1str {
t.Errorf("%d: bad k1: got %v, want %v", i, k1str, test.k1)
}
k2str := fmt.Sprintf("%064x", k2)
if test.k2 != k2str {
t.Errorf("%d: bad k2: got %v, want %v", i, k2str, test.k2)
}
if test.s1 != k1Sign {
t.Errorf("%d: bad k1 sign: got %d, want %d", i, k1Sign, test.s1)
}
if test.s2 != k2Sign {
t.Errorf("%d: bad k2 sign: got %d, want %d", i, k2Sign, test.s2)
}
k1Int := new(big.Int).SetBytes(k1)
k1SignInt := new(big.Int).SetInt64(int64(k1Sign))
k1Int.Mul(k1Int, k1SignInt)
k2Int := new(big.Int).SetBytes(k2)
k2SignInt := new(big.Int).SetInt64(int64(k2Sign))
k2Int.Mul(k2Int, k2SignInt)
gotK := new(big.Int).Mul(k2Int, s256.lambda)
gotK.Add(k1Int, gotK)
gotK.Mod(gotK, s256.N)
if k.Cmp(gotK) != 0 {
t.Errorf("%d: bad k: got %X, want %X", i, gotK.Bytes(), k.Bytes())
}
}
}
func TestSplitKRand(t *testing.T) {
s256 := S256()
for i := 0; i < 1024; i++ {
bytesK := make([]byte, 32)
_, err := rand.Read(bytesK)
if err != nil {
t.Fatalf("failed to read random data at %d", i)
break
}
k := new(big.Int).SetBytes(bytesK)
k1, k2, k1Sign, k2Sign := s256.splitK(bytesK)
k1Int := new(big.Int).SetBytes(k1)
k1SignInt := new(big.Int).SetInt64(int64(k1Sign))
k1Int.Mul(k1Int, k1SignInt)
k2Int := new(big.Int).SetBytes(k2)
k2SignInt := new(big.Int).SetInt64(int64(k2Sign))
k2Int.Mul(k2Int, k2SignInt)
gotK := new(big.Int).Mul(k2Int, s256.lambda)
gotK.Add(k1Int, gotK)
gotK.Mod(gotK, s256.N)
if k.Cmp(gotK) != 0 {
t.Errorf("%d: bad k: got %X, want %X", i, gotK.Bytes(), k.Bytes())
}
}
}
// Test this curve's usage with the ecdsa package.
func testKeyGeneration(t *testing.T, c *KoblitzCurve, tag string) {
priv, err := NewPrivateKey(c)
if err != nil {
t.Errorf("%s: error: %s", tag, err)
return
}
if !c.IsOnCurve(priv.PublicKey.X, priv.PublicKey.Y) {
t.Errorf("%s: public key invalid: %s", tag, err)
}
}
func TestKeyGeneration(t *testing.T) {
testKeyGeneration(t, S256(), "S256")
}
func testSignAndVerify(t *testing.T, c *KoblitzCurve, tag string) {
priv, _ := NewPrivateKey(c)
pub := priv.PubKey()
hashed := []byte("testing")
sig, err := priv.Sign(hashed)
if err != nil {
t.Errorf("%s: error signing: %s", tag, err)
return
}
if !sig.Verify(hashed, pub) {
t.Errorf("%s: Verify failed", tag)
}
hashed[0] ^= 0xff
if sig.Verify(hashed, pub) {
t.Errorf("%s: Verify always works!", tag)
}
}
func TestSignAndVerify(t *testing.T) {
testSignAndVerify(t, S256(), "S256")
}
func TestNAF(t *testing.T) {
tests := []string{
"6df2b5d30854069ccdec40ae022f5c948936324a4e9ebed8eb82cfd5a6b6d766",
"b776e53fb55f6b006a270d42d64ec2b1",
"d6cc32c857f1174b604eefc544f0c7f7",
"45c53aa1bb56fcd68c011e2dad6758e4",
"a2e79d200f27f2360fba57619936159b",
}
negOne := big.NewInt(-1)
one := big.NewInt(1)
two := big.NewInt(2)
for i, test := range tests {
want, _ := new(big.Int).SetString(test, 16)
nafPos, nafNeg := NAF(want.Bytes())
got := big.NewInt(0)
// Check that the NAF representation comes up with the right number
for i := 0; i < len(nafPos); i++ {
bytePos := nafPos[i]
byteNeg := nafNeg[i]
for j := 7; j >= 0; j-- {
got.Mul(got, two)
if bytePos&0x80 == 0x80 {
got.Add(got, one)
} else if byteNeg&0x80 == 0x80 {
got.Add(got, negOne)
}
bytePos <<= 1
byteNeg <<= 1
}
}
if got.Cmp(want) != 0 {
t.Errorf("%d: Failed NAF got %X want %X", i, got, want)
}
}
}
func TestNAFRand(t *testing.T) {
negOne := big.NewInt(-1)
one := big.NewInt(1)
two := big.NewInt(2)
for i := 0; i < 1024; i++ {
data := make([]byte, 32)
_, err := rand.Read(data)
if err != nil {
t.Fatalf("failed to read random data at %d", i)
break
}
nafPos, nafNeg := NAF(data)
want := new(big.Int).SetBytes(data)
got := big.NewInt(0)
// Check that the NAF representation comes up with the right number
for i := 0; i < len(nafPos); i++ {
bytePos := nafPos[i]
byteNeg := nafNeg[i]
for j := 7; j >= 0; j-- {
got.Mul(got, two)
if bytePos&0x80 == 0x80 {
got.Add(got, one)
} else if byteNeg&0x80 == 0x80 {
got.Add(got, negOne)
}
bytePos <<= 1
byteNeg <<= 1
}
}
if got.Cmp(want) != 0 {
t.Errorf("%d: Failed NAF got %X want %X", i, got, want)
}
}
}

View File

@@ -1,21 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package btcec implements support for the elliptic curves needed for bitcoin.
Bitcoin uses elliptic curve cryptography using koblitz curves
(specifically secp256k1) for cryptographic functions. See
http://www.secg.org/collateral/sec2_final.pdf for details on the
standard.
This package provides the data structures and functions implementing the
crypto/elliptic Curve interface in order to permit using these curves
with the standard crypto/ecdsa package provided with go. Helper
functionality is provided to parse signatures and public keys from
standard formats. It was designed for use with btcd, but should be
general enough for other uses of elliptic curve crypto. It was originally based
on some initial work by ThePiachu, but has significantly diverged since then.
*/
package btcec

View File

@@ -1,152 +0,0 @@
package btcec
import (
"crypto/sha256"
"encoding/binary"
"math/big"
"github.com/kaspanet/kaspad/util/daghash"
)
// Multiset tracks the state of a multiset as used to calculate the ECMH
// (elliptic curve multiset hash) hash of an unordered set. The state is
// a point on the curve. New elements are hashed onto a point on the curve
// and then added to the current state. Hence elements can be added in any
// order and we can also remove elements to return to a prior hash.
type Multiset struct {
curve *KoblitzCurve
x *big.Int
y *big.Int
}
// NewMultiset returns an empty multiset. The hash of an empty set
// is the 32 byte value of zero.
func NewMultiset(curve *KoblitzCurve) *Multiset {
return &Multiset{curve: curve, x: big.NewInt(0), y: big.NewInt(0)}
}
// NewMultisetFromPoint initializes a new multiset with the given x, y
// coordinate.
func NewMultisetFromPoint(curve *KoblitzCurve, x, y *big.Int) *Multiset {
var copyX, copyY big.Int
if x != nil {
copyX.Set(x)
}
if y != nil {
copyY.Set(y)
}
return &Multiset{curve: curve, x: &copyX, y: &copyY}
}
// NewMultisetFromDataSlice gets a curve and a slice of byte
// slices, creates an empty multiset, hashes each data and
// add it to the multiset, and return the resulting multiset.
func NewMultisetFromDataSlice(curve *KoblitzCurve, datas [][]byte) *Multiset {
ms := NewMultiset(curve)
for _, data := range datas {
x, y := hashToPoint(curve, data)
ms.addPoint(x, y)
}
return ms
}
// Clone returns a clone of this multiset.
func (ms *Multiset) Clone() *Multiset {
return NewMultisetFromPoint(ms.curve, ms.x, ms.y)
}
// Add hashes the data onto the curve and returns
// a multiset with the new resulting point.
func (ms *Multiset) Add(data []byte) *Multiset {
newMs := ms.Clone()
x, y := hashToPoint(ms.curve, data)
newMs.addPoint(x, y)
return newMs
}
func (ms *Multiset) addPoint(x, y *big.Int) {
ms.x, ms.y = ms.curve.Add(ms.x, ms.y, x, y)
}
// Remove hashes the data onto the curve, subtracts
// the point from the existing multiset, and returns
// a multiset with the new point. This function
// will execute regardless of whether or not the passed
// data was previously added to the set. Hence if you
// remove an element that was never added and also remove
// all the elements that were added, you will not get
// back to the point at infinity (empty set).
func (ms *Multiset) Remove(data []byte) *Multiset {
newMs := ms.Clone()
x, y := hashToPoint(ms.curve, data)
newMs.removePoint(x, y)
return newMs
}
func (ms *Multiset) removePoint(x, y *big.Int) {
y.Neg(y).Mod(y, ms.curve.P)
ms.x, ms.y = ms.curve.Add(ms.x, ms.y, x, y)
}
// Union will add the point of the passed multiset instance to the point
// of this multiset and will return a multiset with the resulting point.
func (ms *Multiset) Union(otherMultiset *Multiset) *Multiset {
newMs := ms.Clone()
otherMsCopy := otherMultiset.Clone()
newMs.addPoint(otherMsCopy.x, otherMsCopy.y)
return newMs
}
// Subtract will remove the point of the passed multiset instance from the point
// of this multiset and will return a multiset with the resulting point.
func (ms *Multiset) Subtract(otherMultiset *Multiset) *Multiset {
newMs := ms.Clone()
otherMsCopy := otherMultiset.Clone()
newMs.removePoint(otherMsCopy.x, otherMsCopy.y)
return newMs
}
// Hash serializes and returns the hash of the multiset. The hash of an empty
// set is the 32 byte value of zero. The hash of a non-empty multiset is the
// sha256 hash of the 32 byte x value concatenated with the 32 byte y value.
func (ms *Multiset) Hash() *daghash.Hash {
if ms.x.Sign() == 0 && ms.y.Sign() == 0 {
return &daghash.Hash{}
}
hash := sha256.Sum256(append(ms.x.Bytes(), ms.y.Bytes()...))
castHash := daghash.Hash(hash)
return &castHash
}
// Point returns a copy of the x and y coordinates of the current multiset state.
func (ms *Multiset) Point() (x *big.Int, y *big.Int) {
var copyX, copyY big.Int
copyX.Set(ms.x)
copyY.Set(ms.y)
return &copyX, &copyY
}
// hashToPoint hashes the passed data into a point on the curve. The x value
// is sha256(n, sha256(data)) where n starts at zero. If the resulting x value
// is not in the field or x^3+7 is not quadratic residue then n is incremented
// and we try again. There is a 50% chance of success for any given iteration.
func hashToPoint(curve *KoblitzCurve, data []byte) (x *big.Int, y *big.Int) {
i := uint64(0)
var err error
h := sha256.Sum256(data)
n := make([]byte, 8)
for {
binary.LittleEndian.PutUint64(n, i)
h2 := sha256.Sum256(append(n, h[:]...))
x = new(big.Int).SetBytes(h2[:])
y, err = decompressPoint(curve, x, false)
if err == nil && x.Cmp(curve.N) < 0 {
break
}
i++
}
return x, y
}

View File

@@ -1,213 +0,0 @@
package btcec
import (
"bytes"
"encoding/hex"
"testing"
"github.com/kaspanet/kaspad/util/daghash"
)
var testVectors = []struct {
dataElementHex string
point [2]string
ecmhHash string
cumulativeHash string
}{
{
"982051fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e00000000010000000100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac",
[2]string{"4f9a5dce69067bf28603e73a7af4c3650b16539b95bad05eee95dfc94d1efe2c", "346d5b777881f2729e7f89b2de4e8e79c7f2f42d1a0b25a8f10becb66e2d0f98"},
"9378d88aa60cfba3032cb19f27891886e26fc6de1afa340c1787a633591983f8",
"",
},
{
"d5fdcc541e25de1c7a5addedf24858b8bb665c9f36ef744ee42c316022c90f9b00000000020000000100f2052a010000004341047211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073dee6c89064984f03385237d92167c13e236446b417ab79a0fcae412ae3316b77ac",
[2]string{"68cf91eb2388a0287c13d46011c73fb8efb6be89c0867a47feccb2d11c390d2d", "f42ba72b1079d3d941881836f88b5dcd7c207a6a4839f129272c77ebb7194d42"},
"e2f3dc6f3aa867c50bd41b80aa3bdafcc9e1d13a6292ff8a5da95da123d185ef",
"afaa1f7ba0bd8a789422fdd6968639a4b8575baf7d54342a987073d038fdbafa",
},
{
"44f672226090d85db9a9f2fbfe5f0f9609b387af7be5b7fbb7a1767c831c9e9900000000030000000100f2052a0100000043410494b9d3e76c5b1629ecf97fff95d7a4bbdac87cc26099ada28066c6ff1eb9191223cd897194a08d0c2726c5747f1db49e8cf90e75dc3e3550ae9b30086f3cd5aaac",
[2]string{"359c6f59859d1d5af8e7081905cb6bb734c010be8680c14b5a89ee315694fc2b", "fb6ba531d4bd83b14c970ad1bec332a8ae9a05706cd5df7fd91a2f2cc32482fe"},
"ffed6804617a4a33b1037cdd26426e61fde0faa2c0cc045efffa17c00ff4adcf",
"e236a694532be6a4926ab8d5b1ff9cbfe638178e0008b0a8c5e87c3da2cdbc1c",
},
}
func TestHashToPoint(t *testing.T) {
for _, test := range testVectors {
data, err := hex.DecodeString(test.dataElementHex)
if err != nil {
t.Fatal(err)
}
x, y := hashToPoint(S256(), data)
if hex.EncodeToString(x.Bytes()) != test.point[0] || hex.EncodeToString(y.Bytes()) != test.point[1] {
t.Fatal("hashToPoint return incorrect point")
}
}
}
func TestMultiset_Hash(t *testing.T) {
for _, test := range testVectors {
data, err := hex.DecodeString(test.dataElementHex)
if err != nil {
t.Fatal(err)
}
x, y := hashToPoint(S256(), data)
m := NewMultisetFromPoint(S256(), x, y)
if m.Hash().String() != test.ecmhHash {
t.Fatal("Multiset-Hash returned incorrect hash serialization")
}
}
m := NewMultiset(S256())
emptySet := m.Hash()
zeroHash := daghash.Hash{}
if !bytes.Equal(emptySet[:], zeroHash[:]) {
t.Fatal("Empty set did not return zero hash")
}
}
func TestMultiset_AddRemove(t *testing.T) {
m := NewMultiset(S256())
for i, test := range testVectors {
data, err := hex.DecodeString(test.dataElementHex)
if err != nil {
t.Fatal(err)
}
m = m.Add(data)
if test.cumulativeHash != "" && m.Hash().String() != test.cumulativeHash {
t.Fatalf("Test #%d: Multiset-Add returned incorrect hash. Expected %s but got %s", i, test.cumulativeHash, m.Hash())
}
}
for i := len(testVectors) - 1; i > 0; i-- {
data, err := hex.DecodeString(testVectors[i].dataElementHex)
if err != nil {
t.Fatal(err)
}
m = m.Remove(data)
if testVectors[i-1].cumulativeHash != "" && m.Hash().String() != testVectors[i-1].cumulativeHash {
t.Fatalf("Test #%d: Multiset-Remove returned incorrect hash. Expected %s but got %s", i, testVectors[i].cumulativeHash, m.Hash())
}
}
}
func TestMultiset_UnionSubtract(t *testing.T) {
m1 := NewMultiset(S256())
zeroHash := m1.Hash().String()
for _, test := range testVectors {
data, err := hex.DecodeString(test.dataElementHex)
if err != nil {
t.Fatal(err)
}
m1 = m1.Add(data)
}
m2 := NewMultiset(S256())
for _, test := range testVectors {
data, err := hex.DecodeString(test.dataElementHex)
if err != nil {
t.Fatal(err)
}
m2 = m2.Remove(data)
}
m1m2Union := m1.Union(m2)
if m1m2Union.Hash().String() != zeroHash {
t.Fatalf("m1m2Union was expected to return to have zero hash, but was %s instead", m1m2Union.Hash())
}
m1Inverse := NewMultiset(S256()).Subtract(m1)
m1InverseM1Union := m1.Union(m1Inverse)
if m1InverseM1Union.Hash().String() != zeroHash {
t.Fatalf("m1InverseM1Union was expected to have zero hash, but got %s instead", m1InverseM1Union.Hash())
}
m1SubtractM1 := m1.Subtract(m1)
if m1SubtractM1.Hash().String() != zeroHash {
t.Fatalf("m1SubtractM1 was expected to have zero hash, but got %s instead", m1SubtractM1.Hash())
}
}
func TestMultiset_Commutativity(t *testing.T) {
m := NewMultiset(S256())
zeroHash := m.Hash().String()
// Check that if we subtract values from zero and then re-add them, we return to zero.
for _, test := range testVectors {
data, err := hex.DecodeString(test.dataElementHex)
if err != nil {
t.Fatal(err)
}
m = m.Remove(data)
}
for _, test := range testVectors {
data, err := hex.DecodeString(test.dataElementHex)
if err != nil {
t.Fatal(err)
}
m = m.Add(data)
}
if m.Hash().String() != zeroHash {
t.Fatalf("m was expected to be zero hash, but was %s instead", m.Hash())
}
// Here we first remove an element from an empty multiset, and then add some other
// elements, and then we create a new empty multiset, then we add the same elements
// we added to the previous multiset, and then we remove the same element we remove
// the same element we removed from the previous multiset. According to commutativity
// laws, the result should be the same.
removeIndex := 0
removeData, err := hex.DecodeString(testVectors[removeIndex].dataElementHex)
if err != nil {
t.Fatal(err)
}
m1 := NewMultiset(S256())
m1 = m1.Remove(removeData)
for i, test := range testVectors {
if i != removeIndex {
data, err := hex.DecodeString(test.dataElementHex)
if err != nil {
t.Fatal(err)
}
m1 = m1.Add(data)
}
}
m2 := NewMultiset(S256())
for i, test := range testVectors {
if i != removeIndex {
data, err := hex.DecodeString(test.dataElementHex)
if err != nil {
t.Fatal(err)
}
m2 = m2.Add(data)
}
}
m2 = m2.Remove(removeData)
if m1.Hash().String() != m2.Hash().String() {
t.Fatalf("m1 and m2 was exepcted to have the same hash, but got instead m1 %s and m2 %s", m1.Hash(), m2.Hash())
}
}
func TestMultiset_NewMultisetFromDataSlice(t *testing.T) {
m1 := NewMultiset(S256())
datas := make([][]byte, 0, len(testVectors))
for _, test := range testVectors {
data, err := hex.DecodeString(test.dataElementHex)
if err != nil {
t.Fatal(err)
}
datas = append(datas, data)
m1 = m1.Add(data)
}
m2 := NewMultisetFromDataSlice(S256(), datas)
if m1.Hash().String() != m2.Hash().String() {
t.Fatalf("m1 and m2 was exepcted to have the same hash, but got instead m1 %s and m2 %s", m1.Hash(), m2.Hash())
}
}

View File

@@ -1,87 +0,0 @@
// Copyright (c) 2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcec_test
import (
"encoding/hex"
"fmt"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/btcec"
)
// This example demonstrates signing a message with a secp256k1 private key that
// is first parsed form raw bytes and serializing the generated signature.
func Example_signMessage() {
// Decode a hex-encoded private key.
pkBytes, err := hex.DecodeString("22a47fa09a223f2aa079edf85a7c2d4f87" +
"20ee63e502ee2869afab7de234b80c")
if err != nil {
fmt.Println(err)
return
}
privKey, pubKey := btcec.PrivKeyFromBytes(btcec.S256(), pkBytes)
// Sign a message using the private key.
message := "test message"
messageHash := daghash.DoubleHashB([]byte(message))
signature, err := privKey.Sign(messageHash)
if err != nil {
fmt.Println(err)
return
}
// Serialize and display the signature.
fmt.Printf("Serialized Signature: %x\n", signature.Serialize())
// Verify the signature for the message using the public key.
verified := signature.Verify(messageHash, pubKey)
fmt.Printf("Signature Verified? %v\n", verified)
// Output:
// Serialized Signature: 275d1c73a01b023377633cd1eaa6460e8cd0542ba089ad50450a8197246b3ef6d6836ef5a18226132c305e2b2060a699529cadc816fc98d63bc7d05771acec4d
// Signature Verified? true
}
// This example demonstrates verifying a secp256k1 signature against a public
// key that is first parsed from raw bytes. The signature is also parsed from
// raw bytes.
func Example_verifySignature() {
// Decode hex-encoded serialized public key.
pubKeyBytes, err := hex.DecodeString("02a673638cb9587cb68ea08dbef685c" +
"6f2d2a751a8b3c6f2a7e9a4999e6e4bfaf5")
if err != nil {
fmt.Println(err)
return
}
pubKey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256())
if err != nil {
fmt.Println(err)
return
}
// Decode hex-encoded serialized signature.
sigBytes, err := hex.DecodeString("275d1c73a01b023377633cd1eaa6460e8cd0542ba" +
"089ad50450a8197246b3ef6d6836ef5a18226132c305e2b2060a699529cadc816fc98d63bc7d05771acec4d")
if err != nil {
fmt.Println(err)
return
}
signature, err := btcec.ParseSignature(sigBytes)
if err != nil {
fmt.Println(err)
return
}
// Verify the signature for the message using the public key.
message := "test message"
messageHash := daghash.DoubleHashB([]byte(message))
verified := signature.Verify(messageHash, pubKey)
fmt.Println("Signature Verified?", verified)
// Output:
// Signature Verified? true
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,822 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2013-2016 Dave Collins
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcec
import (
"reflect"
"testing"
)
// TestSetInt ensures that setting a field value to various native integers
// works as expected.
func TestSetInt(t *testing.T) {
tests := []struct {
in uint
raw [10]uint32
}{
{5, [10]uint32{5, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
// 2^26
{67108864, [10]uint32{67108864, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
// 2^26 + 1
{67108865, [10]uint32{67108865, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
// 2^32 - 1
{4294967295, [10]uint32{4294967295, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal).SetInt(test.in)
if !reflect.DeepEqual(f.n, test.raw) {
t.Errorf("fieldVal.Set #%d wrong result\ngot: %v\n"+
"want: %v", i, f.n, test.raw)
continue
}
}
}
// TestZero ensures that zeroing a field value zero works as expected.
func TestZero(t *testing.T) {
f := new(fieldVal).SetInt(2)
f.Zero()
for idx, rawInt := range f.n {
if rawInt != 0 {
t.Errorf("internal field integer at index #%d is not "+
"zero - got %d", idx, rawInt)
}
}
}
// TestIsZero ensures that checking if a field IsZero works as expected.
func TestIsZero(t *testing.T) {
f := new(fieldVal)
if !f.IsZero() {
t.Errorf("new field value is not zero - got %v (rawints %x)", f,
f.n)
}
f.SetInt(1)
if f.IsZero() {
t.Errorf("field claims it's zero when it's not - got %v "+
"(raw rawints %x)", f, f.n)
}
f.Zero()
if !f.IsZero() {
t.Errorf("field claims it's not zero when it is - got %v "+
"(raw rawints %x)", f, f.n)
}
}
// TestStringer ensures the stringer returns the appropriate hex string.
func TestStringer(t *testing.T) {
tests := []struct {
in string
expected string
}{
{"0", "0000000000000000000000000000000000000000000000000000000000000000"},
{"1", "0000000000000000000000000000000000000000000000000000000000000001"},
{"a", "000000000000000000000000000000000000000000000000000000000000000a"},
{"b", "000000000000000000000000000000000000000000000000000000000000000b"},
{"c", "000000000000000000000000000000000000000000000000000000000000000c"},
{"d", "000000000000000000000000000000000000000000000000000000000000000d"},
{"e", "000000000000000000000000000000000000000000000000000000000000000e"},
{"f", "000000000000000000000000000000000000000000000000000000000000000f"},
{"f0", "00000000000000000000000000000000000000000000000000000000000000f0"},
// 2^26-1
{
"3ffffff",
"0000000000000000000000000000000000000000000000000000000003ffffff",
},
// 2^32-1
{
"ffffffff",
"00000000000000000000000000000000000000000000000000000000ffffffff",
},
// 2^64-1
{
"ffffffffffffffff",
"000000000000000000000000000000000000000000000000ffffffffffffffff",
},
// 2^96-1
{
"ffffffffffffffffffffffff",
"0000000000000000000000000000000000000000ffffffffffffffffffffffff",
},
// 2^128-1
{
"ffffffffffffffffffffffffffffffff",
"00000000000000000000000000000000ffffffffffffffffffffffffffffffff",
},
// 2^160-1
{
"ffffffffffffffffffffffffffffffffffffffff",
"000000000000000000000000ffffffffffffffffffffffffffffffffffffffff",
},
// 2^192-1
{
"ffffffffffffffffffffffffffffffffffffffffffffffff",
"0000000000000000ffffffffffffffffffffffffffffffffffffffffffffffff",
},
// 2^224-1
{
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
"00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
},
// 2^256-4294968273 (the btcec prime, so should result in 0)
{
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
"0000000000000000000000000000000000000000000000000000000000000000",
},
// 2^256-4294968274 (the secp256k1 prime+1, so should result in 1)
{
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30",
"0000000000000000000000000000000000000000000000000000000000000001",
},
// Invalid hex
{"g", "0000000000000000000000000000000000000000000000000000000000000000"},
{"1h", "0000000000000000000000000000000000000000000000000000000000000000"},
{"i1", "0000000000000000000000000000000000000000000000000000000000000000"},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal).SetHex(test.in)
result := f.String()
if result != test.expected {
t.Errorf("fieldVal.String #%d wrong result\ngot: %v\n"+
"want: %v", i, result, test.expected)
continue
}
}
}
// TestNormalize ensures that normalizing the internal field words works as
// expected.
func TestNormalize(t *testing.T) {
tests := []struct {
raw [10]uint32 // Intentionally denormalized value
normalized [10]uint32 // Normalized form of the raw value
}{
{
[10]uint32{0x00000005, 0, 0, 0, 0, 0, 0, 0, 0, 0},
[10]uint32{0x00000005, 0, 0, 0, 0, 0, 0, 0, 0, 0},
},
// 2^26
{
[10]uint32{0x04000000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0},
[10]uint32{0x00000000, 0x1, 0, 0, 0, 0, 0, 0, 0, 0},
},
// 2^26 + 1
{
[10]uint32{0x04000001, 0x0, 0, 0, 0, 0, 0, 0, 0, 0},
[10]uint32{0x00000001, 0x1, 0, 0, 0, 0, 0, 0, 0, 0},
},
// 2^32 - 1
{
[10]uint32{0xffffffff, 0x00, 0, 0, 0, 0, 0, 0, 0, 0},
[10]uint32{0x03ffffff, 0x3f, 0, 0, 0, 0, 0, 0, 0, 0},
},
// 2^32
{
[10]uint32{0x04000000, 0x3f, 0, 0, 0, 0, 0, 0, 0, 0},
[10]uint32{0x00000000, 0x40, 0, 0, 0, 0, 0, 0, 0, 0},
},
// 2^32 + 1
{
[10]uint32{0x04000001, 0x3f, 0, 0, 0, 0, 0, 0, 0, 0},
[10]uint32{0x00000001, 0x40, 0, 0, 0, 0, 0, 0, 0, 0},
},
// 2^64 - 1
{
[10]uint32{0xffffffff, 0xffffffc0, 0xfc0, 0, 0, 0, 0, 0, 0, 0},
[10]uint32{0x03ffffff, 0x03ffffff, 0xfff, 0, 0, 0, 0, 0, 0, 0},
},
// 2^64
{
[10]uint32{0x04000000, 0x03ffffff, 0x0fff, 0, 0, 0, 0, 0, 0, 0},
[10]uint32{0x00000000, 0x00000000, 0x1000, 0, 0, 0, 0, 0, 0, 0},
},
// 2^64 + 1
{
[10]uint32{0x04000001, 0x03ffffff, 0x0fff, 0, 0, 0, 0, 0, 0, 0},
[10]uint32{0x00000001, 0x00000000, 0x1000, 0, 0, 0, 0, 0, 0, 0},
},
// 2^96 - 1
{
[10]uint32{0xffffffff, 0xffffffc0, 0xffffffc0, 0x3ffc0, 0, 0, 0, 0, 0, 0},
[10]uint32{0x03ffffff, 0x03ffffff, 0x03ffffff, 0x3ffff, 0, 0, 0, 0, 0, 0},
},
// 2^96
{
[10]uint32{0x04000000, 0x03ffffff, 0x03ffffff, 0x3ffff, 0, 0, 0, 0, 0, 0},
[10]uint32{0x00000000, 0x00000000, 0x00000000, 0x40000, 0, 0, 0, 0, 0, 0},
},
// 2^128 - 1
{
[10]uint32{0xffffffff, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffc0, 0, 0, 0, 0, 0},
[10]uint32{0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0xffffff, 0, 0, 0, 0, 0},
},
// 2^128
{
[10]uint32{0x04000000, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x0ffffff, 0, 0, 0, 0, 0},
[10]uint32{0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x1000000, 0, 0, 0, 0, 0},
},
// 2^256 - 4294968273 (secp256k1 prime)
{
[10]uint32{0xfffffc2f, 0xffffff80, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0x3fffc0},
[10]uint32{0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000000},
},
// Prime larger than P where both first and second words are larger
// than P's first and second words
{
[10]uint32{0xfffffc30, 0xffffff86, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0x3fffc0},
[10]uint32{0x00000001, 0x00000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000000},
},
// Prime larger than P where only the second word is larger
// than P's second words.
{
[10]uint32{0xfffffc2a, 0xffffff87, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0x3fffc0},
[10]uint32{0x03fffffb, 0x00000006, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000000},
},
// 2^256 - 1
{
[10]uint32{0xffffffff, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0xffffffc0, 0x3fffc0},
[10]uint32{0x000003d0, 0x00000040, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000000},
},
// Prime with field representation such that the initial
// reduction does not result in a carry to bit 256.
//
// 2^256 - 4294968273 (secp256k1 prime)
{
[10]uint32{0x03fffc2f, 0x03ffffbf, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x003fffff},
[10]uint32{0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
},
// Prime larger than P that reduces to a value which is still
// larger than P when it has a magnitude of 1 due to its first
// word and does not result in a carry to bit 256.
//
// 2^256 - 4294968272 (secp256k1 prime + 1)
{
[10]uint32{0x03fffc30, 0x03ffffbf, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x003fffff},
[10]uint32{0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
},
// Prime larger than P that reduces to a value which is still
// larger than P when it has a magnitude of 1 due to its second
// word and does not result in a carry to bit 256.
//
// 2^256 - 4227859409 (secp256k1 prime + 0x4000000)
{
[10]uint32{0x03fffc2f, 0x03ffffc0, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x003fffff},
[10]uint32{0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
},
// Prime larger than P that reduces to a value which is still
// larger than P when it has a magnitude of 1 due to a carry to
// bit 256, but would not be without the carry. These values
// come from the fact that P is 2^256 - 4294968273 and 977 is
// the low order word in the internal field representation.
//
// 2^256 * 5 - ((4294968273 - (977+1)) * 4)
{
[10]uint32{0x03ffffff, 0x03fffeff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x0013fffff},
[10]uint32{0x00001314, 0x00000040, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000000000},
},
// Prime larger than P that reduces to a value which is still
// larger than P when it has a magnitude of 1 due to both a
// carry to bit 256 and the first word.
{
[10]uint32{0x03fffc30, 0x03ffffbf, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x07ffffff, 0x003fffff},
[10]uint32{0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001},
},
// Prime larger than P that reduces to a value which is still
// larger than P when it has a magnitude of 1 due to both a
// carry to bit 256 and the second word.
//
{
[10]uint32{0x03fffc2f, 0x03ffffc0, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x3ffffff, 0x07ffffff, 0x003fffff},
[10]uint32{0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000, 0x00000000, 0x00000001},
},
// Prime larger than P that reduces to a value which is still
// larger than P when it has a magnitude of 1 due to a carry to
// bit 256 and the first and second words.
//
{
[10]uint32{0x03fffc30, 0x03ffffc0, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x03ffffff, 0x07ffffff, 0x003fffff},
[10]uint32{0x00000001, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal)
f.n = test.raw
f.Normalize()
if !reflect.DeepEqual(f.n, test.normalized) {
t.Errorf("fieldVal.Normalize #%d wrong result\n"+
"got: %x\nwant: %x", i, f.n, test.normalized)
continue
}
}
}
// TestIsOdd ensures that checking if a field value IsOdd works as expected.
func TestIsOdd(t *testing.T) {
tests := []struct {
in string // hex encoded value
expected bool // expected oddness
}{
{"0", false},
{"1", true},
{"2", false},
// 2^32 - 1
{"ffffffff", true},
// 2^64 - 2
{"fffffffffffffffe", false},
// secp256k1 prime
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", true},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal).SetHex(test.in)
result := f.IsOdd()
if result != test.expected {
t.Errorf("fieldVal.IsOdd #%d wrong result\n"+
"got: %v\nwant: %v", i, result, test.expected)
continue
}
}
}
// TestEquals ensures that checking two field values for equality via Equals
// works as expected.
func TestEquals(t *testing.T) {
tests := []struct {
in1 string // hex encoded value
in2 string // hex encoded value
expected bool // expected equality
}{
{"0", "0", true},
{"0", "1", false},
{"1", "0", false},
// 2^32 - 1 == 2^32 - 1?
{"ffffffff", "ffffffff", true},
// 2^64 - 1 == 2^64 - 2?
{"ffffffffffffffff", "fffffffffffffffe", false},
// 0 == prime (mod prime)?
{"0", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", true},
// 1 == prime+1 (mod prime)?
{"1", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", true},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal).SetHex(test.in1).Normalize()
f2 := new(fieldVal).SetHex(test.in2).Normalize()
result := f.Equals(f2)
if result != test.expected {
t.Errorf("fieldVal.Equals #%d wrong result\n"+
"got: %v\nwant: %v", i, result, test.expected)
continue
}
}
}
// TestNegate ensures that negating field values via Negate works as expected.
func TestNegate(t *testing.T) {
tests := []struct {
in string // hex encoded value
expected string // expected hex encoded value
}{
// secp256k1 prime (aka 0)
{"0", "0"},
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", "0"},
{"0", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"},
// secp256k1 prime-1
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", "1"},
{"1", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e"},
// secp256k1 prime-2
{"2", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d"},
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d", "2"},
// Random sampling
{
"b3d9aac9c5e43910b4385b53c7e78c21d4cd5f8e683c633aed04c233efc2e120",
"4c2655363a1bc6ef4bc7a4ac381873de2b32a07197c39cc512fb3dcb103d1b0f",
},
{
"f8a85984fee5a12a7c8dd08830d83423c937d77c379e4a958e447a25f407733f",
"757a67b011a5ed583722f77cf27cbdc36c82883c861b56a71bb85d90bf888f0",
},
{
"45ee6142a7fda884211e93352ed6cb2807800e419533be723a9548823ece8312",
"ba119ebd5802577bdee16ccad12934d7f87ff1be6acc418dc56ab77cc131791d",
},
{
"53c2a668f07e411a2e473e1c3b6dcb495dec1227af27673761d44afe5b43d22b",
"ac3d59970f81bee5d1b8c1e3c49234b6a213edd850d898c89e2bb500a4bc2a04",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal).SetHex(test.in).Normalize()
expected := new(fieldVal).SetHex(test.expected).Normalize()
result := f.Negate(1).Normalize()
if !result.Equals(expected) {
t.Errorf("fieldVal.Negate #%d wrong result\n"+
"got: %v\nwant: %v", i, result, expected)
continue
}
}
}
// TestAddInt ensures that adding an integer to field values via AddInt works as
// expected.
func TestAddInt(t *testing.T) {
tests := []struct {
in1 string // hex encoded value
in2 uint // unsigned integer to add to the value above
expected string // expected hex encoded value
}{
{"0", 1, "1"},
{"1", 0, "1"},
{"1", 1, "2"},
// secp256k1 prime-1 + 1
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", 1, "0"},
// secp256k1 prime + 1
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", 1, "1"},
// Random samples.
{
"ff95ad9315aff04ab4af0ce673620c7145dc85d03bab5ba4b09ca2c4dec2d6c1",
0x10f,
"ff95ad9315aff04ab4af0ce673620c7145dc85d03bab5ba4b09ca2c4dec2d7d0",
},
{
"44bdae6b772e7987941f1ba314e6a5b7804a4c12c00961b57d20f41deea9cecf",
0x2cf11d41,
"44bdae6b772e7987941f1ba314e6a5b7804a4c12c00961b57d20f41e1b9aec10",
},
{
"88c3ecae67b591935fb1f6a9499c35315ffad766adca665c50b55f7105122c9c",
0x4829aa2d,
"88c3ecae67b591935fb1f6a9499c35315ffad766adca665c50b55f714d3bd6c9",
},
{
"8523e9edf360ca32a95aae4e57fcde5a542b471d08a974d94ea0ee09a015e2a6",
0xa21265a5,
"8523e9edf360ca32a95aae4e57fcde5a542b471d08a974d94ea0ee0a4228484b",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal).SetHex(test.in1).Normalize()
expected := new(fieldVal).SetHex(test.expected).Normalize()
result := f.AddInt(test.in2).Normalize()
if !result.Equals(expected) {
t.Errorf("fieldVal.AddInt #%d wrong result\n"+
"got: %v\nwant: %v", i, result, expected)
continue
}
}
}
// TestAdd ensures that adding two field values together via Add works as
// expected.
func TestAdd(t *testing.T) {
tests := []struct {
in1 string // first hex encoded value
in2 string // second hex encoded value to add
expected string // expected hex encoded value
}{
{"0", "1", "1"},
{"1", "0", "1"},
{"1", "1", "2"},
// secp256k1 prime-1 + 1
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", "1", "0"},
// secp256k1 prime + 1
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", "1", "1"},
// Random samples.
{
"2b2012f975404e5065b4292fb8bed0a5d315eacf24c74d8b27e73bcc5430edcc",
"2c3cefa4e4753e8aeec6ac4c12d99da4d78accefda3b7885d4c6bab46c86db92",
"575d029e59b58cdb547ad57bcb986e4aaaa0b7beff02c610fcadf680c0b7c95e",
},
{
"8131e8722fe59bb189692b96c9f38de92885730f1dd39ab025daffb94c97f79c",
"ff5454b765f0aab5f0977dcc629becc84cabeb9def48e79c6aadb2622c490fa9",
"80863d2995d646677a00a9632c8f7ab175315ead0d1c824c9088b21c78e10b16",
},
{
"c7c95e93d0892b2b2cdd77e80eb646ea61be7a30ac7e097e9f843af73fad5c22",
"3afe6f91a74dfc1c7f15c34907ee981656c37236d946767dd53ccad9190e437c",
"02c7ce2577d72747abf33b3116a4df00b881ec6785c47ffc74c105d158bba36f",
},
{
"fd1c26f6a23381e5d785ba889494ec059369b888ad8431cd67d8c934b580dbe1",
"a475aa5a31dcca90ef5b53c097d9133d6b7117474b41e7877bb199590fc0489c",
"a191d150d4104c76c6e10e492c6dff42fedacfcff8c61954e38a628ec541284e",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal).SetHex(test.in1).Normalize()
f2 := new(fieldVal).SetHex(test.in2).Normalize()
expected := new(fieldVal).SetHex(test.expected).Normalize()
result := f.Add(f2).Normalize()
if !result.Equals(expected) {
t.Errorf("fieldVal.Add #%d wrong result\n"+
"got: %v\nwant: %v", i, result, expected)
continue
}
}
}
// TestAdd2 ensures that adding two field values together via Add2 works as
// expected.
func TestAdd2(t *testing.T) {
tests := []struct {
in1 string // first hex encoded value
in2 string // second hex encoded value to add
expected string // expected hex encoded value
}{
{"0", "1", "1"},
{"1", "0", "1"},
{"1", "1", "2"},
// secp256k1 prime-1 + 1
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", "1", "0"},
// secp256k1 prime + 1
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", "1", "1"},
// close but over the secp256k1 prime
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000", "f1ffff000", "1ffff3d1"},
// Random samples.
{
"ad82b8d1cc136e23e9fd77fe2c7db1fe5a2ecbfcbde59ab3529758334f862d28",
"4d6a4e95d6d61f4f46b528bebe152d408fd741157a28f415639347a84f6f574b",
"faed0767a2e98d7330b2a0bcea92df3eea060d12380e8ec8b62a9fdb9ef58473",
},
{
"f3f43a2540054a86e1df98547ec1c0e157b193e5350fb4a3c3ea214b228ac5e7",
"25706572592690ea3ddc951a1b48b504a4c83dc253756e1b96d56fdfb3199522",
"19649f97992bdb711fbc2d6e9a0a75e5fc79d1a7888522bf5abf912bd5a45eda",
},
{
"6915bb94eef13ff1bb9b2633d997e13b9b1157c713363cc0e891416d6734f5b8",
"11f90d6ac6fe1c4e8900b1c85fb575c251ec31b9bc34b35ada0aea1c21eded22",
"7b0ec8ffb5ef5c40449bd7fc394d56fdecfd8980cf6af01bc29c2b898922e2da",
},
{
"48b0c9eae622eed9335b747968544eb3e75cb2dc8128388f948aa30f88cabde4",
"0989882b52f85f9d524a3a3061a0e01f46d597839d2ba637320f4b9510c8d2d5",
"523a5216391b4e7685a5aea9c9f52ed32e324a601e53dec6c699eea4999390b9",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal).SetHex(test.in1).Normalize()
f2 := new(fieldVal).SetHex(test.in2).Normalize()
expected := new(fieldVal).SetHex(test.expected).Normalize()
result := f.Add2(f, f2).Normalize()
if !result.Equals(expected) {
t.Errorf("fieldVal.Add2 #%d wrong result\n"+
"got: %v\nwant: %v", i, result, expected)
continue
}
}
}
// TestMulInt ensures that adding an integer to field values via MulInt works as
// expected.
func TestMulInt(t *testing.T) {
tests := []struct {
in1 string // hex encoded value
in2 uint // unsigned integer to multiply with value above
expected string // expected hex encoded value
}{
{"0", 0, "0"},
{"1", 0, "0"},
{"0", 1, "0"},
{"1", 1, "1"},
// secp256k1 prime-1 * 2
{
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
2,
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d",
},
// secp256k1 prime * 3
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", 3, "0"},
// secp256k1 prime-1 * 8
{
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
8,
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc27",
},
// Random samples for first value. The second value is limited
// to 8 since that is the maximum int used in the elliptic curve
// calculations.
{
"b75674dc9180d306c692163ac5e089f7cef166af99645c0c23568ab6d967288a",
6,
"4c06bd2b6904f228a76c8560a3433bced9a8681d985a2848d407404d186b0280",
},
{
"54873298ac2b5ba8591c125ae54931f5ea72040aee07b208d6135476fb5b9c0e",
3,
"fd9597ca048212f90b543710afdb95e1bf560c20ca17161a8239fd64f212d42a",
},
{
"7c30fbd363a74c17e1198f56b090b59bbb6c8755a74927a6cba7a54843506401",
5,
"6cf4eb20f2447c77657fccb172d38c0aa91ea4ac446dc641fa463a6b5091fba7",
},
{
"fb4529be3e027a3d1587d8a500b72f2d312e3577340ef5175f96d113be4c2ceb",
8,
"da294df1f013d1e8ac3ec52805b979698971abb9a077a8bafcb688a4f261820f",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal).SetHex(test.in1).Normalize()
expected := new(fieldVal).SetHex(test.expected).Normalize()
result := f.MulInt(test.in2).Normalize()
if !result.Equals(expected) {
t.Errorf("fieldVal.MulInt #%d wrong result\n"+
"got: %v\nwant: %v", i, result, expected)
continue
}
}
}
// TestMul ensures that multiplying two field valuess via Mul works as expected.
func TestMul(t *testing.T) {
tests := []struct {
in1 string // first hex encoded value
in2 string // second hex encoded value to multiply with
expected string // expected hex encoded value
}{
{"0", "0", "0"},
{"1", "0", "0"},
{"0", "1", "0"},
{"1", "1", "1"},
// slightly over prime
{
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffff1ffff",
"1000",
"1ffff3d1",
},
// secp256k1 prime-1 * 2
{
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
"2",
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d",
},
// secp256k1 prime * 3
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", "3", "0"},
// secp256k1 prime-1 * 8
{
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
"8",
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc27",
},
// Random samples.
{
"cfb81753d5ef499a98ecc04c62cb7768c2e4f1740032946db1c12e405248137e",
"58f355ad27b4d75fb7db0442452e732c436c1f7c5a7c4e214fa9cc031426a7d3",
"1018cd2d7c2535235b71e18db9cd98027386328d2fa6a14b36ec663c4c87282b",
},
{
"26e9d61d1cdf3920e9928e85fa3df3e7556ef9ab1d14ec56d8b4fc8ed37235bf",
"2dfc4bbe537afee979c644f8c97b31e58be5296d6dbc460091eae630c98511cf",
"da85f48da2dc371e223a1ae63bd30b7e7ee45ae9b189ac43ff357e9ef8cf107a",
},
{
"5db64ed5afb71646c8b231585d5b2bf7e628590154e0854c4c29920b999ff351",
"279cfae5eea5d09ade8e6a7409182f9de40981bc31c84c3d3dfe1d933f152e9a",
"2c78fbae91792dd0b157abe3054920049b1879a7cc9d98cfda927d83be411b37",
},
{
"b66dfc1f96820b07d2bdbd559c19319a3a73c97ceb7b3d662f4fe75ecb6819e6",
"bf774aba43e3e49eb63a6e18037d1118152568f1a3ac4ec8b89aeb6ff8008ae1",
"c4f016558ca8e950c21c3f7fc15f640293a979c7b01754ee7f8b3340d4902ebb",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal).SetHex(test.in1).Normalize()
f2 := new(fieldVal).SetHex(test.in2).Normalize()
expected := new(fieldVal).SetHex(test.expected).Normalize()
result := f.Mul(f2).Normalize()
if !result.Equals(expected) {
t.Errorf("fieldVal.Mul #%d wrong result\n"+
"got: %v\nwant: %v", i, result, expected)
continue
}
}
}
// TestSquare ensures that squaring field values via Square works as expected.
func TestSquare(t *testing.T) {
tests := []struct {
in string // hex encoded value
expected string // expected hex encoded value
}{
// secp256k1 prime (aka 0)
{"0", "0"},
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", "0"},
{"0", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"},
// secp256k1 prime-1
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", "1"},
// secp256k1 prime-2
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d", "4"},
// Random sampling
{
"b0ba920360ea8436a216128047aab9766d8faf468895eb5090fc8241ec758896",
"133896b0b69fda8ce9f648b9a3af38f345290c9eea3cbd35bafcadf7c34653d3",
},
{
"c55d0d730b1d0285a1599995938b042a756e6e8857d390165ffab480af61cbd5",
"cd81758b3f5877cbe7e5b0a10cebfa73bcbf0957ca6453e63ee8954ab7780bee",
},
{
"e89c1f9a70d93651a1ba4bca5b78658f00de65a66014a25544d3365b0ab82324",
"39ffc7a43e5dbef78fd5d0354fb82c6d34f5a08735e34df29da14665b43aa1f",
},
{
"7dc26186079d22bcbe1614aa20ae627e62d72f9be7ad1e99cac0feb438956f05",
"bf86bcfc4edb3d81f916853adfda80c07c57745b008b60f560b1912f95bce8ae",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal).SetHex(test.in).Normalize()
expected := new(fieldVal).SetHex(test.expected).Normalize()
result := f.Square().Normalize()
if !result.Equals(expected) {
t.Errorf("fieldVal.Square #%d wrong result\n"+
"got: %v\nwant: %v", i, result, expected)
continue
}
}
}
// TestInverse ensures that finding the multiplicative inverse via Inverse works
// as expected.
func TestInverse(t *testing.T) {
tests := []struct {
in string // hex encoded value
expected string // expected hex encoded value
}{
// secp256k1 prime (aka 0)
{"0", "0"},
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", "0"},
{"0", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"},
// secp256k1 prime-1
{
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
},
// secp256k1 prime-2
{
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d",
"7fffffffffffffffffffffffffffffffffffffffffffffffffffffff7ffffe17",
},
// Random sampling
{
"16fb970147a9acc73654d4be233cc48b875ce20a2122d24f073d29bd28805aca",
"987aeb257b063df0c6d1334051c47092b6d8766c4bf10c463786d93f5bc54354",
},
{
"69d1323ce9f1f7b3bd3c7320b0d6311408e30281e273e39a0d8c7ee1c8257919",
"49340981fa9b8d3dad72de470b34f547ed9179c3953797d0943af67806f4bb6",
},
{
"e0debf988ae098ecda07d0b57713e97c6d213db19753e8c95aa12a2fc1cc5272",
"64f58077b68af5b656b413ea366863f7b2819f8d27375d9c4d9804135ca220c2",
},
{
"dcd394f91f74c2ba16aad74a22bb0ed47fe857774b8f2d6c09e28bfb14642878",
"fb848ec64d0be572a63c38fe83df5e7f3d032f60bf8c969ef67d36bf4ada22a9",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
f := new(fieldVal).SetHex(test.in).Normalize()
expected := new(fieldVal).SetHex(test.expected).Normalize()
result := f.Inverse().Normalize()
if !result.Equals(expected) {
t.Errorf("fieldVal.Inverse #%d wrong result\n"+
"got: %v\nwant: %v", i, result, expected)
continue
}
}
}

View File

@@ -1,63 +0,0 @@
// Copyright 2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// This file is ignored during the regular build due to the following build tag.
// It is called by go generate and used to automatically generate pre-computed
// tables used to accelerate operations.
// +build ignore
package main
import (
"bytes"
"compress/zlib"
"encoding/base64"
"fmt"
"log"
"os"
"github.com/kaspanet/kaspad/btcec"
)
func main() {
fi, err := os.Create("secp256k1.go")
if err != nil {
log.Fatal(err)
}
defer fi.Close()
// Compress the serialized byte points.
serialized := btcec.S256().SerializedBytePoints()
var compressed bytes.Buffer
w := zlib.NewWriter(&compressed)
if _, err := w.Write(serialized); err != nil {
fmt.Println(err)
os.Exit(1)
}
w.Close()
// Encode the compressed byte points with base64.
encoded := make([]byte, base64.StdEncoding.EncodedLen(compressed.Len()))
base64.StdEncoding.Encode(encoded, compressed.Bytes())
fmt.Fprintln(fi, "// Copyright (c) 2015 The btcsuite developers")
fmt.Fprintln(fi, "// Use of this source code is governed by an ISC")
fmt.Fprintln(fi, "// license that can be found in the LICENSE file.")
fmt.Fprintln(fi)
fmt.Fprintln(fi, "package btcec")
fmt.Fprintln(fi)
fmt.Fprintln(fi, "// Auto-generated file (see genprecomps.go)")
fmt.Fprintln(fi, "// DO NOT EDIT")
fmt.Fprintln(fi)
fmt.Fprintf(fi, "var secp256k1BytePoints = %q\n", string(encoded))
a1, b1, a2, b2 := btcec.S256().EndomorphismVectors()
fmt.Println("The following values are the computed linearly " +
"independent vectors needed to make use of the secp256k1 " +
"endomorphism:")
fmt.Printf("a1: %x\n", a1)
fmt.Printf("b1: %x\n", b1)
fmt.Printf("a2: %x\n", a2)
fmt.Printf("b2: %x\n", b2)
}

View File

@@ -1,203 +0,0 @@
// Copyright (c) 2014-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// This file is ignored during the regular build due to the following build tag.
// This build tag is set during go generate.
// +build gensecp256k1
package btcec
// References:
// [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone)
import (
"encoding/binary"
"math/big"
)
// secp256k1BytePoints are dummy points used so the code which generates the
// real values can compile.
var secp256k1BytePoints = ""
// getDoublingPoints returns all the possible G^(2^i) for i in
// 0..n-1 where n is the curve's bit size (256 in the case of secp256k1)
// the coordinates are recorded as Jacobian coordinates.
func (curve *KoblitzCurve) getDoublingPoints() [][3]fieldVal {
doublingPoints := make([][3]fieldVal, curve.BitSize)
// initialize px, py, pz to the Jacobian coordinates for the base point
px, py := curve.bigAffineToField(curve.Gx, curve.Gy)
pz := new(fieldVal).SetInt(1)
for i := 0; i < curve.BitSize; i++ {
doublingPoints[i] = [3]fieldVal{*px, *py, *pz}
// P = 2*P
curve.doubleJacobian(px, py, pz, px, py, pz)
}
return doublingPoints
}
// SerializedBytePoints returns a serialized byte slice which contains all of
// the possible points per 8-bit window. This is used to when generating
// secp256k1.go.
func (curve *KoblitzCurve) SerializedBytePoints() []byte {
doublingPoints := curve.getDoublingPoints()
// Segregate the bits into byte-sized windows
serialized := make([]byte, curve.byteSize*256*3*10*4)
offset := 0
for byteNum := 0; byteNum < curve.byteSize; byteNum++ {
// Grab the 8 bits that make up this byte from doublingPoints.
startingBit := 8 * (curve.byteSize - byteNum - 1)
computingPoints := doublingPoints[startingBit : startingBit+8]
// Compute all points in this window and serialize them.
for i := 0; i < 256; i++ {
px, py, pz := new(fieldVal), new(fieldVal), new(fieldVal)
for j := 0; j < 8; j++ {
if i>>uint(j)&1 == 1 {
curve.addJacobian(px, py, pz, &computingPoints[j][0],
&computingPoints[j][1], &computingPoints[j][2], px, py, pz)
}
}
for i := 0; i < 10; i++ {
binary.LittleEndian.PutUint32(serialized[offset:], px.n[i])
offset += 4
}
for i := 0; i < 10; i++ {
binary.LittleEndian.PutUint32(serialized[offset:], py.n[i])
offset += 4
}
for i := 0; i < 10; i++ {
binary.LittleEndian.PutUint32(serialized[offset:], pz.n[i])
offset += 4
}
}
}
return serialized
}
// sqrt returns the square root of the provided big integer using Newton's
// method. It's only compiled and used during generation of pre-computed
// values, so speed is not a huge concern.
func sqrt(n *big.Int) *big.Int {
// Initial guess = 2^(log_2(n)/2)
guess := big.NewInt(2)
guess.Exp(guess, big.NewInt(int64(n.BitLen()/2)), nil)
// Now refine using Newton's method.
big2 := big.NewInt(2)
prevGuess := big.NewInt(0)
for {
prevGuess.Set(guess)
guess.Add(guess, new(big.Int).Div(n, guess))
guess.Div(guess, big2)
if guess.Cmp(prevGuess) == 0 {
break
}
}
return guess
}
// EndomorphismVectors runs the first 3 steps of algorithm 3.74 from [GECC] to
// generate the linearly independent vectors needed to generate a balanced
// length-two representation of a multiplier such that k = k1 + k2λ (mod N) and
// returns them. Since the values will always be the same given the fact that N
// and λ are fixed, the final results can be accelerated by storing the
// precomputed values with the curve.
func (curve *KoblitzCurve) EndomorphismVectors() (a1, b1, a2, b2 *big.Int) {
bigMinus1 := big.NewInt(-1)
// This section uses an extended Euclidean algorithm to generate a
// sequence of equations:
// s[i] * N + t[i] * λ = r[i]
nSqrt := sqrt(curve.N)
u, v := new(big.Int).Set(curve.N), new(big.Int).Set(curve.lambda)
x1, y1 := big.NewInt(1), big.NewInt(0)
x2, y2 := big.NewInt(0), big.NewInt(1)
q, r := new(big.Int), new(big.Int)
qu, qx1, qy1 := new(big.Int), new(big.Int), new(big.Int)
s, t := new(big.Int), new(big.Int)
ri, ti := new(big.Int), new(big.Int)
a1, b1, a2, b2 = new(big.Int), new(big.Int), new(big.Int), new(big.Int)
found, oneMore := false, false
for u.Sign() != 0 {
// q = v/u
q.Div(v, u)
// r = v - q*u
qu.Mul(q, u)
r.Sub(v, qu)
// s = x2 - q*x1
qx1.Mul(q, x1)
s.Sub(x2, qx1)
// t = y2 - q*y1
qy1.Mul(q, y1)
t.Sub(y2, qy1)
// v = u, u = r, x2 = x1, x1 = s, y2 = y1, y1 = t
v.Set(u)
u.Set(r)
x2.Set(x1)
x1.Set(s)
y2.Set(y1)
y1.Set(t)
// As soon as the remainder is less than the sqrt of n, the
// values of a1 and b1 are known.
if !found && r.Cmp(nSqrt) < 0 {
// When this condition executes ri and ti represent the
// r[i] and t[i] values such that i is the greatest
// index for which r >= sqrt(n). Meanwhile, the current
// r and t values are r[i+1] and t[i+1], respectively.
// a1 = r[i+1], b1 = -t[i+1]
a1.Set(r)
b1.Mul(t, bigMinus1)
found = true
oneMore = true
// Skip to the next iteration so ri and ti are not
// modified.
continue
} else if oneMore {
// When this condition executes ri and ti still
// represent the r[i] and t[i] values while the current
// r and t are r[i+2] and t[i+2], respectively.
// sum1 = r[i]^2 + t[i]^2
rSquared := new(big.Int).Mul(ri, ri)
tSquared := new(big.Int).Mul(ti, ti)
sum1 := new(big.Int).Add(rSquared, tSquared)
// sum2 = r[i+2]^2 + t[i+2]^2
r2Squared := new(big.Int).Mul(r, r)
t2Squared := new(big.Int).Mul(t, t)
sum2 := new(big.Int).Add(r2Squared, t2Squared)
// if (r[i]^2 + t[i]^2) <= (r[i+2]^2 + t[i+2]^2)
if sum1.Cmp(sum2) <= 0 {
// a2 = r[i], b2 = -t[i]
a2.Set(ri)
b2.Mul(ti, bigMinus1)
} else {
// a2 = r[i+2], b2 = -t[i+2]
a2.Set(r)
b2.Mul(t, bigMinus1)
}
// All done.
break
}
ri.Set(r)
ti.Set(t)
}
return a1, b1, a2, b2
}

View File

@@ -1,67 +0,0 @@
// Copyright 2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcec
import (
"compress/zlib"
"encoding/base64"
"encoding/binary"
"io/ioutil"
"strings"
)
//go:generate go run -tags gensecp256k1 genprecomps.go
// loadS256BytePoints decompresses and deserializes the pre-computed byte points
// used to accelerate scalar base multiplication for the secp256k1 curve. This
// approach is used since it allows the compile to use significantly less ram
// and be performed much faster than it is with hard-coding the final in-memory
// data structure. At the same time, it is quite fast to generate the in-memory
// data structure at init time with this approach versus computing the table.
func loadS256BytePoints() error {
// There will be no byte points to load when generating them.
bp := secp256k1BytePoints
if len(bp) == 0 {
return nil
}
// Decompress the pre-computed table used to accelerate scalar base
// multiplication.
decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(bp))
r, err := zlib.NewReader(decoder)
if err != nil {
return err
}
serialized, err := ioutil.ReadAll(r)
if err != nil {
return err
}
// Deserialize the precomputed byte points and set the curve to them.
offset := 0
var bytePoints [32][256][3]fieldVal
for byteNum := 0; byteNum < 32; byteNum++ {
// All points in this window.
for i := 0; i < 256; i++ {
px := &bytePoints[byteNum][i][0]
py := &bytePoints[byteNum][i][1]
pz := &bytePoints[byteNum][i][2]
for i := 0; i < 10; i++ {
px.n[i] = binary.LittleEndian.Uint32(serialized[offset:])
offset += 4
}
for i := 0; i < 10; i++ {
py.n[i] = binary.LittleEndian.Uint32(serialized[offset:])
offset += 4
}
for i := 0; i < 10; i++ {
pz.n[i] = binary.LittleEndian.Uint32(serialized[offset:])
offset += 4
}
}
}
secp256k1.bytePoints = &bytePoints
return nil
}

View File

@@ -1,72 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcec
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"math/big"
)
// PrivateKey wraps an ecdsa.PrivateKey as a convenience mainly for signing
// things with the the private key without having to directly import the ecdsa
// package.
type PrivateKey ecdsa.PrivateKey
// PrivKeyFromBytes returns a private and public key for `curve' based on the
// private key passed as an argument as a byte slice.
func PrivKeyFromBytes(curve elliptic.Curve, pk []byte) (*PrivateKey,
*PublicKey) {
x, y := curve.ScalarBaseMult(pk)
priv := &ecdsa.PrivateKey{
PublicKey: ecdsa.PublicKey{
Curve: curve,
X: x,
Y: y,
},
D: new(big.Int).SetBytes(pk),
}
return (*PrivateKey)(priv), (*PublicKey)(&priv.PublicKey)
}
// NewPrivateKey is a wrapper for ecdsa.GenerateKey that returns a PrivateKey
// instead of the normal ecdsa.PrivateKey.
func NewPrivateKey(curve elliptic.Curve) (*PrivateKey, error) {
key, err := ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
return nil, err
}
return (*PrivateKey)(key), nil
}
// PubKey returns the PublicKey corresponding to this private key.
func (p *PrivateKey) PubKey() *PublicKey {
return (*PublicKey)(&p.PublicKey)
}
// ToECDSA returns the private key as a *ecdsa.PrivateKey.
func (p *PrivateKey) ToECDSA() *ecdsa.PrivateKey {
return (*ecdsa.PrivateKey)(p)
}
// Sign generates a Schnorr signature for the provided hash (which should be the result
// of hashing a larger message) using the private key. Produced signature
// is deterministic (same message and same key yield the same signature).
func (p *PrivateKey) Sign(hash []byte) (*Signature, error) {
return sign(p, hash)
}
// PrivKeyBytesLen defines the length in bytes of a serialized private key.
const PrivKeyBytesLen = 32
// Serialize returns the private key number d as a big-endian binary-encoded
// number, padded to a length of 32 bytes.
func (p *PrivateKey) Serialize() []byte {
b := make([]byte, 0, PrivKeyBytesLen)
return paddedAppend(PrivKeyBytesLen, b, p.ToECDSA().D.Bytes())
}

View File

@@ -1,55 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcec
import (
"bytes"
"testing"
)
func TestPrivKeys(t *testing.T) {
tests := []struct {
name string
key []byte
}{
{
name: "check curve",
key: []byte{
0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6,
0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c,
0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9,
0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94,
},
},
}
for _, test := range tests {
priv, pub := PrivKeyFromBytes(S256(), test.key)
_, err := ParsePubKey(pub.SerializeUncompressed(), S256())
if err != nil {
t.Errorf("%s privkey: %v", test.name, err)
continue
}
hash := []byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9}
sig, err := priv.Sign(hash)
if err != nil {
t.Errorf("%s could not sign: %v", test.name, err)
continue
}
if !sig.Verify(hash, pub) {
t.Errorf("%s could not verify: %v", test.name, err)
continue
}
serializedKey := priv.Serialize()
if !bytes.Equal(serializedKey, test.key) {
t.Errorf("%s unexpected serialized bytes - got: %x, "+
"want: %x", test.name, serializedKey, test.key)
}
}
}

View File

@@ -1,189 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcec
import (
"crypto/ecdsa"
"github.com/pkg/errors"
"math/big"
)
// These constants define the lengths of serialized public keys.
const (
PubKeyBytesLenCompressed = 33
PubKeyBytesLenUncompressed = 65
PubKeyBytesLenHybrid = 65
)
func isOdd(a *big.Int) bool {
return a.Bit(0) == 1
}
// decompressPoint decompresses a point on the given curve given the X point and
// the solution to use.
func decompressPoint(curve *KoblitzCurve, x *big.Int, ybit bool) (*big.Int, error) {
// TODO: This will probably only work for secp256k1 due to
// optimizations.
// Y = +-sqrt(x^3 + B)
x3 := new(big.Int).Mul(x, x)
x3.Mul(x3, x)
x3.Add(x3, curve.Params().B)
x3.Mod(x3, curve.Params().P)
// Now calculate sqrt mod p of x^3 + B
// This code used to do a full sqrt based on tonelli/shanks,
// but this was replaced by the algorithms referenced in
// https://bitcointalk.org/index.php?topic=162805.msg1712294#msg1712294
y := new(big.Int).Exp(x3, curve.QPlus1Div4(), curve.Params().P)
// Check that y is a square root of x^3 + B.
y2 := new(big.Int).Mul(y, y)
y2.Mod(y2, curve.Params().P)
if y2.Cmp(x3) != 0 {
return nil, errors.Errorf("invalid square root")
}
// Verify that y-coord has expected parity.
if ybit != isOdd(y) {
y.Sub(curve.Params().P, y)
}
if ybit != isOdd(y) {
return nil, errors.Errorf("ybit doesn't match oddness")
}
return y, nil
}
const (
pubkeyCompressed byte = 0x2 // y_bit + x coord
pubkeyUncompressed byte = 0x4 // x coord + y coord
pubkeyHybrid byte = 0x6 // y_bit + x coord + y coord
)
// IsCompressedPubKey returns true the the passed serialized public key has
// been encoded in compressed format, and false otherwise.
func IsCompressedPubKey(pubKey []byte) bool {
// The public key is only compressed if it is the correct length and
// the format (first byte) is one of the compressed pubkey values.
return len(pubKey) == PubKeyBytesLenCompressed &&
(pubKey[0]&^byte(0x1) == pubkeyCompressed)
}
// ParsePubKey parses a public key for a koblitz curve from a bytestring into a
// ecdsa.Publickey, verifying that it is valid. It supports compressed,
// uncompressed and hybrid signature formats.
func ParsePubKey(pubKeyStr []byte, curve *KoblitzCurve) (key *PublicKey, err error) {
pubkey := PublicKey{}
pubkey.Curve = curve
if len(pubKeyStr) == 0 {
return nil, errors.New("pubkey string is empty")
}
format := pubKeyStr[0]
ybit := (format & 0x1) == 0x1
format &= ^byte(0x1)
switch len(pubKeyStr) {
case PubKeyBytesLenUncompressed:
if format != pubkeyUncompressed && format != pubkeyHybrid {
return nil, errors.Errorf("invalid magic in pubkey str: "+
"%d", pubKeyStr[0])
}
pubkey.X = new(big.Int).SetBytes(pubKeyStr[1:33])
pubkey.Y = new(big.Int).SetBytes(pubKeyStr[33:])
// hybrid keys have extra information, make use of it.
if format == pubkeyHybrid && ybit != isOdd(pubkey.Y) {
return nil, errors.Errorf("ybit doesn't match oddness")
}
case PubKeyBytesLenCompressed:
// format is 0x2 | solution, <X coordinate>
// solution determines which solution of the curve we use.
/// y^2 = x^3 + Curve.B
if format != pubkeyCompressed {
return nil, errors.Errorf("invalid magic in compressed "+
"pubkey string: %d", pubKeyStr[0])
}
pubkey.X = new(big.Int).SetBytes(pubKeyStr[1:33])
pubkey.Y, err = decompressPoint(curve, pubkey.X, ybit)
if err != nil {
return nil, err
}
default: // wrong!
return nil, errors.Errorf("invalid pub key length %d",
len(pubKeyStr))
}
if pubkey.X.Cmp(pubkey.Curve.Params().P) >= 0 {
return nil, errors.Errorf("pubkey X parameter is >= to P")
}
if pubkey.Y.Cmp(pubkey.Curve.Params().P) >= 0 {
return nil, errors.Errorf("pubkey Y parameter is >= to P")
}
if !pubkey.Curve.IsOnCurve(pubkey.X, pubkey.Y) {
return nil, errors.Errorf("pubkey isn't on secp256k1 curve")
}
return &pubkey, nil
}
// PublicKey is an ecdsa.PublicKey with additional functions to
// serialize in uncompressed, compressed, and hybrid formats.
type PublicKey ecdsa.PublicKey
// ToECDSA returns the public key as a *ecdsa.PublicKey.
func (p *PublicKey) ToECDSA() *ecdsa.PublicKey {
return (*ecdsa.PublicKey)(p)
}
// SerializeUncompressed serializes a public key in a 65-byte uncompressed
// format.
func (p *PublicKey) SerializeUncompressed() []byte {
b := make([]byte, 0, PubKeyBytesLenUncompressed)
b = append(b, pubkeyUncompressed)
b = paddedAppend(32, b, p.X.Bytes())
return paddedAppend(32, b, p.Y.Bytes())
}
// SerializeCompressed serializes a public key in a 33-byte compressed format.
func (p *PublicKey) SerializeCompressed() []byte {
b := make([]byte, 0, PubKeyBytesLenCompressed)
format := pubkeyCompressed
if isOdd(p.Y) {
format |= 0x1
}
b = append(b, format)
return paddedAppend(32, b, p.X.Bytes())
}
// SerializeHybrid serializes a public key in a 65-byte hybrid format.
func (p *PublicKey) SerializeHybrid() []byte {
b := make([]byte, 0, PubKeyBytesLenHybrid)
format := pubkeyHybrid
if isOdd(p.Y) {
format |= 0x1
}
b = append(b, format)
b = paddedAppend(32, b, p.X.Bytes())
return paddedAppend(32, b, p.Y.Bytes())
}
// IsEqual compares this PublicKey instance to the one passed, returning true if
// both PublicKeys are equivalent. A PublicKey is equivalent to another, if they
// both have the same X and Y coordinate.
func (p *PublicKey) IsEqual(otherPubKey *PublicKey) bool {
return p.X.Cmp(otherPubKey.X) == 0 &&
p.Y.Cmp(otherPubKey.Y) == 0
}
// paddedAppend appends the src byte slice to dst, returning the new slice.
// If the length of the source is smaller than the passed size, leading zero
// bytes are appended to the dst slice before appending src.
func paddedAppend(size uint, dst, src []byte) []byte {
for i := 0; i < int(size)-len(src); i++ {
dst = append(dst, 0)
}
return append(dst, src...)
}

View File

@@ -1,296 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcec
import (
"bytes"
"testing"
"github.com/davecgh/go-spew/spew"
)
type pubKeyTest struct {
name string
key []byte
format byte
isValid bool
}
var pubKeyTests = []pubKeyTest{
// pubkey from bitcoin blockchain tx
// 0437cd7f8525ceed2324359c2d0ba26006d92d85
{
name: "uncompressed ok",
key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
0xb4, 0x12, 0xa3,
},
isValid: true,
format: pubkeyUncompressed,
},
{
name: "uncompressed x changed",
key: []byte{0x04, 0x15, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
0xb4, 0x12, 0xa3,
},
isValid: false,
},
{
name: "uncompressed y changed",
key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
0xb4, 0x12, 0xa4,
},
isValid: false,
},
{
name: "uncompressed claims compressed",
key: []byte{0x03, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
0xb4, 0x12, 0xa3,
},
isValid: false,
},
{
name: "uncompressed as hybrid ok",
key: []byte{0x07, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
0xb4, 0x12, 0xa3,
},
isValid: true,
format: pubkeyHybrid,
},
{
name: "uncompressed as hybrid wrong",
key: []byte{0x06, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
0xb4, 0x12, 0xa3,
},
isValid: false,
},
// from tx 0b09c51c51ff762f00fb26217269d2a18e77a4fa87d69b3c363ab4df16543f20
{
name: "compressed ok (ybit = 0)",
key: []byte{0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
},
isValid: true,
format: pubkeyCompressed,
},
// from tx fdeb8e72524e8dab0da507ddbaf5f88fe4a933eb10a66bc4745bb0aa11ea393c
{
name: "compressed ok (ybit = 1)",
key: []byte{0x03, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33,
0x09, 0xfb, 0x14, 0x3e, 0x0e, 0x8f, 0xe3, 0x96, 0x34,
0x25, 0x21, 0x88, 0x7e, 0x97, 0x66, 0x90, 0xb6, 0xb4,
0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e,
},
isValid: true,
format: pubkeyCompressed,
},
{
name: "compressed claims uncompressed (ybit = 0)",
key: []byte{0x04, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
},
isValid: false,
},
{
name: "compressed claims uncompressed (ybit = 1)",
key: []byte{0x05, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33,
0x09, 0xfb, 0x14, 0x3e, 0x0e, 0x8f, 0xe3, 0x96, 0x34,
0x25, 0x21, 0x88, 0x7e, 0x97, 0x66, 0x90, 0xb6, 0xb4,
0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e,
},
isValid: false,
},
{
name: "wrong length)",
key: []byte{0x05},
isValid: false,
},
{
name: "X == P",
key: []byte{0x04, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x2F, 0xb2, 0xe0,
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
0xb4, 0x12, 0xa3,
},
isValid: false,
},
{
name: "X > P",
key: []byte{0x04, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFD, 0x2F, 0xb2, 0xe0,
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
0xb4, 0x12, 0xa3,
},
isValid: false,
},
{
name: "Y == P",
key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF,
0xFF, 0xFC, 0x2F,
},
isValid: false,
},
{
name: "Y > P",
key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF,
0xFF, 0xFD, 0x2F,
},
isValid: false,
},
{
name: "hybrid",
key: []byte{0x06, 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb,
0xac, 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, 0x0b, 0x07,
0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9, 0x59,
0xf2, 0x81, 0x5b, 0x16, 0xf8, 0x17, 0x98, 0x48, 0x3a,
0xda, 0x77, 0x26, 0xa3, 0xc4, 0x65, 0x5d, 0xa4, 0xfb,
0xfc, 0x0e, 0x11, 0x08, 0xa8, 0xfd, 0x17, 0xb4, 0x48,
0xa6, 0x85, 0x54, 0x19, 0x9c, 0x47, 0xd0, 0x8f, 0xfb,
0x10, 0xd4, 0xb8,
},
format: pubkeyHybrid,
isValid: true,
},
}
func TestPubKeys(t *testing.T) {
for _, test := range pubKeyTests {
pk, err := ParsePubKey(test.key, S256())
if err != nil {
if test.isValid {
t.Errorf("%s pubkey failed when shouldn't %v",
test.name, err)
}
continue
}
if !test.isValid {
t.Errorf("%s counted as valid when it should fail",
test.name)
continue
}
var pkStr []byte
switch test.format {
case pubkeyUncompressed:
pkStr = (*PublicKey)(pk).SerializeUncompressed()
case pubkeyCompressed:
pkStr = (*PublicKey)(pk).SerializeCompressed()
case pubkeyHybrid:
pkStr = (*PublicKey)(pk).SerializeHybrid()
}
if !bytes.Equal(test.key, pkStr) {
t.Errorf("%s pubkey: serialized keys do not match.",
test.name)
spew.Dump(test.key)
spew.Dump(pkStr)
}
}
}
func TestPublicKeyIsEqual(t *testing.T) {
pubKey1, err := ParsePubKey(
[]byte{0x03, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33,
0x09, 0xfb, 0x14, 0x3e, 0x0e, 0x8f, 0xe3, 0x96, 0x34,
0x25, 0x21, 0x88, 0x7e, 0x97, 0x66, 0x90, 0xb6, 0xb4,
0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e,
},
S256(),
)
if err != nil {
t.Fatalf("failed to parse raw bytes for pubKey1: %v", err)
}
pubKey2, err := ParsePubKey(
[]byte{0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
},
S256(),
)
if err != nil {
t.Fatalf("failed to parse raw bytes for pubKey2: %v", err)
}
if !pubKey1.IsEqual(pubKey1) {
t.Fatalf("value of IsEqual is incorrect, %v is "+
"equal to %v", pubKey1, pubKey1)
}
if pubKey1.IsEqual(pubKey2) {
t.Fatalf("value of IsEqual is incorrect, %v is not "+
"equal to %v", pubKey1, pubKey2)
}
}
func TestIsCompressed(t *testing.T) {
for _, test := range pubKeyTests {
isCompressed := IsCompressedPubKey(test.key)
wantCompressed := (test.format == pubkeyCompressed)
if isCompressed != wantCompressed {
t.Fatalf("%s (%x) pubkey: unexpected compressed result, "+
"got %v, want %v", test.name, test.key,
isCompressed, wantCompressed)
}
}
}

File diff suppressed because one or more lines are too long

View File

@@ -1,267 +0,0 @@
// Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcec
import (
"bytes"
"crypto/elliptic"
"crypto/hmac"
"crypto/sha256"
"github.com/pkg/errors"
"hash"
"math/big"
)
// Errors returned by canonicalPadding.
var (
errNegativeValue = errors.New("value may be interpreted as negative")
errExcessivelyPaddedValue = errors.New("value is excessively padded")
)
// Signature is a type representing a Schnorr signature.
type Signature struct {
R *big.Int
S *big.Int
}
var (
// Used in RFC6979 implementation when testing the nonce for correctness
one = big.NewInt(1)
// oneInitializer is used to fill a byte slice with byte 0x01. It is provided
// here to avoid the need to create it multiple times.
oneInitializer = []byte{0x01}
)
// Serialize returns a serialized signature (R and S concatenated).
func (sig *Signature) Serialize() []byte {
return append(bigIntTo32Bytes(sig.R), bigIntTo32Bytes(sig.S)...)
}
// Verify verifies digital signatures. It returns true if the signature
// is valid, false otherwise.
func (sig *Signature) Verify(hash []byte, pubKey *PublicKey) bool {
return verifySchnorr(pubKey, hash, sig.R, sig.S)
}
// verifySchnorr verifies the schnorr signature of the hash using the pubkey key.
// It returns true if the signature is valid, false otherwise.
func verifySchnorr(pubKey *PublicKey, hash []byte, r *big.Int, s *big.Int) bool {
// This schnorr specification is specific to the secp256k1 curve so if the
// provided curve is not a KoblitizCurve then we'll just return false.
curve, ok := pubKey.Curve.(*KoblitzCurve)
if !ok {
return false
}
// Signature is invalid if s >= order or r >= p.
if s.Cmp(curve.Params().N) >= 0 || r.Cmp(curve.Params().P) >= 0 {
return false
}
// Compute scalar e = Hash(r || compressed(P) || m) mod N
eBytes := sha256.Sum256(append(append(bigIntTo32Bytes(r), pubKey.SerializeCompressed()...), hash...))
e := new(big.Int).SetBytes(eBytes[:])
e.Mod(e, curve.Params().N)
// Negate e
e.Neg(e).Mod(e, curve.Params().N)
// Compute point R = s * G - e * P.
sgx, sgy, sgz := curve.scalarBaseMultJacobian(s.Bytes())
epx, epy, epz := curve.scalarMultJacobian(pubKey.X, pubKey.Y, e.Bytes())
rx, ry, rz := new(fieldVal), new(fieldVal), new(fieldVal)
curve.addJacobian(sgx, sgy, sgz, epx, epy, epz, rx, ry, rz)
// Check that R is not infinity
if rz.Equals(new(fieldVal).SetInt(0)) {
return false
}
// Check if R.y is quadratic residue
yz := ry.Mul(rz).Normalize()
b := yz.Bytes()
if big.Jacobi(new(big.Int).SetBytes(b[:]), curve.P) != 1 {
return false
}
// Check R values match
// rx ≠ rz^2 * r mod p
fieldR := new(fieldVal).SetByteSlice(r.Bytes())
return rx.Normalize().Equals(rz.Square().Mul(fieldR).Normalize())
}
// IsEqual compares this Signature instance to the one passed, returning true
// if both Signatures are equivalent. A signature is equivalent to another, if
// they both have the same scalar value for R and S.
func (sig *Signature) IsEqual(otherSig *Signature) bool {
return sig.R.Cmp(otherSig.R) == 0 &&
sig.S.Cmp(otherSig.S) == 0
}
// ParseSignature parses a 64 byte schnorr signature into a Signature type.
func ParseSignature(sigStr []byte) (*Signature, error) {
if len(sigStr) != 64 {
return nil, errors.New("malformed schnorr signature: not 64 bytes")
}
bigR := new(big.Int).SetBytes(sigStr[:32])
bigS := new(big.Int).SetBytes(sigStr[32:64])
return &Signature{
R: bigR,
S: bigS,
}, nil
}
// bigIntTo32Bytes pads a big int bytes with leading zeros if they
// are missing to get the length up to 32 bytes.
func bigIntTo32Bytes(val *big.Int) []byte {
b := val.Bytes()
pad := bytes.Repeat([]byte{0x00}, 32-len(b))
return append(pad, b...)
}
// hashToInt converts a hash value to an integer. There is some disagreement
// about how this is done. [NSA] suggests that this is done in the obvious
// manner, but [SECG] truncates the hash to the bit-length of the curve order
// first. We follow [SECG] because that's what OpenSSL does. Additionally,
// OpenSSL right shifts excess bits from the number if the hash is too large
// and we mirror that too.
// This is borrowed from crypto/ecdsa.
func hashToInt(hash []byte, c elliptic.Curve) *big.Int {
orderBits := c.Params().N.BitLen()
orderBytes := (orderBits + 7) / 8
if len(hash) > orderBytes {
hash = hash[:orderBytes]
}
ret := new(big.Int).SetBytes(hash)
excess := len(hash)*8 - orderBits
if excess > 0 {
ret.Rsh(ret, uint(excess))
}
return ret
}
// sign signs the hash using the schnorr signature algorithm.
func sign(privateKey *PrivateKey, hash []byte) (*Signature, error) {
// The rfc6979 nonce derivation function accepts additional entropy.
// See https://github.com/bitcoincashorg/bitcoincash.org/blob/master/spec/2019-05-15-schnorr.md#recommended-practices-for-secure-signature-generation
additionalData := []byte{'S', 'c', 'h', 'n', 'o', 'r', 'r', '+', 'S', 'H', 'A', '2', '5', '6', ' ', ' '}
k := nonceRFC6979(privateKey.D, hash, additionalData)
// Compute point R = k * G
rx, ry := privateKey.Curve.ScalarBaseMult(k.Bytes())
// Negate nonce if R.y is not a quadratic residue.
if big.Jacobi(ry, privateKey.Params().P) != 1 {
k = k.Neg(k)
}
// Compute scalar e = Hash(R.x || compressed(P) || m) mod N
eBytes := sha256.Sum256(append(append(bigIntTo32Bytes(rx), privateKey.PubKey().SerializeCompressed()...), hash...))
e := new(big.Int).SetBytes(eBytes[:])
e.Mod(e, privateKey.Params().N)
// Compute scalar s = (k + e * x) mod N
x := new(big.Int).SetBytes(privateKey.Serialize())
s := e.Mul(e, x)
s.Add(s, k)
s.Mod(s, privateKey.Params().N)
return &Signature{
R: rx,
S: s,
}, nil
}
// nonceRFC6979 generates a nonce (`k`) deterministically according to RFC 6979.
// It takes a 32-byte hash as an input and returns 32-byte nonce to be used in the digital signature algorithm.
func nonceRFC6979(privkey *big.Int, hash []byte, additionalData []byte) *big.Int {
// Step A
curve := S256()
q := curve.Params().N
x := privkey
alg := sha256.New
qlen := q.BitLen()
holen := alg().Size()
rolen := (qlen + 7) >> 3
bx := append(int2octets(x, rolen), bits2octets(hash, curve, rolen)...)
// Step B
v := bytes.Repeat(oneInitializer, holen)
// Step C (Go zeroes the all allocated memory)
k := make([]byte, holen)
// Step D
k = mac(alg, k, append(append(append(v, 0x00), bx...), additionalData...))
// Step E
v = mac(alg, k, v)
// Step F
k = mac(alg, k, append(append(append(v, 0x01), bx...), additionalData...))
// Step G
v = mac(alg, k, v)
// Step H
for {
// Step H1
var t []byte
// Step H2
for len(t)*8 < qlen {
v = mac(alg, k, v)
t = append(t, v...)
}
// Step H3
secret := hashToInt(t, curve)
if secret.Cmp(one) >= 0 && secret.Cmp(q) < 0 {
return secret
}
k = mac(alg, k, append(v, 0x00))
v = mac(alg, k, v)
}
}
// mac returns an HMAC of the given key and message.
func mac(alg func() hash.Hash, k, m []byte) []byte {
h := hmac.New(alg, k)
h.Write(m)
return h.Sum(nil)
}
// https://tools.ietf.org/html/rfc6979#section-2.3.3
func int2octets(v *big.Int, rolen int) []byte {
out := v.Bytes()
// left pad with zeros if it's too short
if len(out) < rolen {
out2 := make([]byte, rolen)
copy(out2[rolen-len(out):], out)
return out2
}
// drop most significant bytes if it's too long
if len(out) > rolen {
out2 := make([]byte, rolen)
copy(out2, out[len(out)-rolen:])
return out2
}
return out
}
func bits2octets(in []byte, curve elliptic.Curve, rolen int) []byte {
// https://tools.ietf.org/html/rfc6979#section-2.3.4
z1 := hashToInt(in, curve)
z2 := new(big.Int).Sub(z1, curve.Params().N)
if z2.Sign() < 0 {
return int2octets(z1, rolen)
}
return int2octets(z2, rolen)
}

View File

@@ -1,269 +0,0 @@
// Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcec
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"testing"
)
type signatureTest struct {
name string
sig []byte
der bool
isValid bool
}
// decodeHex decodes the passed hex string and returns the resulting bytes. It
// panics if an error occurs. This is only used in the tests as a helper since
// the only way it can fail is if there is an error in the test source code.
func decodeHex(hexStr string) []byte {
b, err := hex.DecodeString(hexStr)
if err != nil {
panic("invalid hex string in test source: err " + err.Error() +
", hex: " + hexStr)
}
return b
}
func TestRFC6979(t *testing.T) {
// Test vectors matching Trezor and CoreBitcoin implementations.
// - https://github.com/trezor/trezor-crypto/blob/9fea8f8ab377dc514e40c6fd1f7c89a74c1d8dc6/tests.c#L432-L453
// - https://github.com/oleganza/CoreBitcoin/blob/e93dd71207861b5bf044415db5fa72405e7d8fbc/CoreBitcoin/BTCKey%2BTests.m#L23-L49
tests := []struct {
key string
msg string
nonce string
signature string
}{
{
"cca9fbcc1b41e5a95d369eaa6ddcff73b61a4efaa279cfc6567e8daa39cbaf50",
"sample",
"bbed2b98f40ded8587b0615b8413d2aaf520215f369b098cea4ceec119b3f722",
"e8d6af55a53a1ac260c89aa247cc677a6219132f0d031530c75c22bffde442d894a5edc9cfffa4362f0b9f2af84f4cd13ae67b0b742d18c46eb85dac8eea6171",
},
{
// This signature hits the case when S is higher than halforder.
// If S is not canonicalized (lowered by halforder), this test will fail.
"0000000000000000000000000000000000000000000000000000000000000001",
"Satoshi Nakamoto",
"c2b802cb01789ed38de5cffd81a56374c39b7ee6f31bb36244650141bb3425fe",
"2f78a0720cf85bef9a24aef691fce02002c59381133ee543055d24222e2797cc78531f684122d541bbb4afe536e0b19236ad83e2e97a6277626aa5e0d1428f64",
},
{
"fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140",
"Satoshi Nakamoto",
"b962c5b6537356a8b9362d6a275614df9711dff7b94d803b9ba5b5f2d4d90f8c",
"bebecd554a4c22ad202d074db855af4c974a55a5b9ef5ecf009eff232c806c63b9b37ef1d984feb2aa500777562d66f2ba468fab0491202ede12e76aa69d194a",
},
{
"f8b8af8ce3c7cca5e300d33939540c10d45ce001b8f252bfbc57ba0342904181",
"Alan Turing",
"65e72825561dd93f7a0ab53cdce97d5e57b2235c056e8b8396857425a6a8ff54",
"864487c21bc3a7823a9dbc99fc2a6de67cf7d7cf5f2feb6585319256393ff0f2192c3292df9979a01bf26f96017bc7e5f4a2e01fa5b9e1007af1a19a58f767d0",
},
{
"0000000000000000000000000000000000000000000000000000000000000001",
"All those moments will be lost in time, like tears in rain. Time to die...",
"60eb74b1fbc2db67be649f56246cee8c2225803155c86ff72f29641958de471a",
"66e3a89b2842c05cf57e5315bc2d063b1054979a4f0e5718ec5e0d68bde268296f855a1595aaa0625c822b526226a0e248fc0a1eae6f160cc97a783b8646b755",
},
{
"e91671c46231f833a6406ccbea0e3e392c76c167bac1cb013f6f1013980455c2",
"There is a computer disease that anybody who works with computers knows about. It's a very serious disease and it interferes completely with the work. The trouble with computers is that you 'play' with them!",
"a1b518d7f2c6d9b68672f5e2b66fc872ef67a9b0b422d8b70ce0292017a62f60",
"c5290b3ecadc0ed0d674501ffc880f2f44b2ed829bfd101676f6fb55def15f23236ace4b651a52e9d590f421668e2b1b495c9039d27db848eb3edf1d1eb60d50",
},
}
for i, test := range tests {
privKey, _ := PrivKeyFromBytes(S256(), decodeHex(test.key))
hash := sha256.Sum256([]byte(test.msg))
// Ensure deterministically generated nonce is the expected value.
additionalData := []byte{'S', 'c', 'h', 'n', 'o', 'r', 'r', '+', 'S', 'H', 'A', '2', '5', '6', ' ', ' '}
gotNonce := nonceRFC6979(privKey.D, hash[:], additionalData).Bytes()
wantNonce := decodeHex(test.nonce)
if !bytes.Equal(gotNonce, wantNonce) {
t.Errorf("NonceRFC6979 #%d (%s): Nonce is incorrect: "+
"%x (expected %x)", i, test.msg, gotNonce,
wantNonce)
continue
}
// Ensure deterministically generated signature is the expected value.
gotSig, err := privKey.Sign(hash[:])
if err != nil {
t.Errorf("Sign #%d (%s): unexpected error: %v", i,
test.msg, err)
continue
}
gotSigBytes := gotSig.Serialize()
wantSigBytes := decodeHex(test.signature)
if !bytes.Equal(gotSigBytes, wantSigBytes) {
t.Errorf("Sign #%d (%s): mismatched signature: %x "+
"(expected %x)", i, test.msg, gotSigBytes,
wantSigBytes)
continue
}
}
}
func TestSignatureIsEqual(t *testing.T) {
sig1 := &Signature{
R: fromHex("0082235e21a2300022738dabb8e1bbd9d19cfb1e7ab8c30a23b0afbb8d178abcf3"),
S: fromHex("24bf68e256c534ddfaf966bf908deb944305596f7bdcc38d69acad7f9c868724"),
}
sig2 := &Signature{
R: fromHex("4e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd41"),
S: fromHex("181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d09"),
}
if !sig1.IsEqual(sig1) {
t.Fatalf("value of IsEqual is incorrect, %v is "+
"equal to %v", sig1, sig1)
}
if sig1.IsEqual(sig2) {
t.Fatalf("value of IsEqual is incorrect, %v is not "+
"equal to %v", sig1, sig2)
}
}
func TestSchnorrSignatureVerify(t *testing.T) {
// Test vectors taken from https://github.com/sipa/bips/blob/bip-schnorr/bip-schnorr/test-vectors.csv
tests := []struct {
pubKey []byte
message []byte
signature []byte
valid bool
}{
{
decodeHex("0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798"),
decodeHex("0000000000000000000000000000000000000000000000000000000000000000"),
decodeHex("787A848E71043D280C50470E8E1532B2DD5D20EE912A45DBDD2BD1DFBF187EF67031A98831859DC34DFFEEDDA86831842CCD0079E1F92AF177F7F22CC1DCED05"),
true,
},
{
decodeHex("02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"),
decodeHex("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89"),
decodeHex("2A298DACAE57395A15D0795DDBFD1DCB564DA82B0F269BC70A74F8220429BA1D1E51A22CCEC35599B8F266912281F8365FFC2D035A230434A1A64DC59F7013FD"),
true,
},
{
decodeHex("03FAC2114C2FBB091527EB7C64ECB11F8021CB45E8E7809D3C0938E4B8C0E5F84B"),
decodeHex("5E2D58D8B3BCDF1ABADEC7829054F90DDA9805AAB56C77333024B9D0A508B75C"),
decodeHex("00DA9B08172A9B6F0466A2DEFD817F2D7AB437E0D253CB5395A963866B3574BE00880371D01766935B92D2AB4CD5C8A2A5837EC57FED7660773A05F0DE142380"),
true,
},
{
decodeHex("03DEFDEA4CDB677750A420FEE807EACF21EB9898AE79B9768766E4FAA04A2D4A34"),
decodeHex("4DF3C3F68FCC83B27E9D42C90431A72499F17875C81A599B566C9889B9696703"),
decodeHex("00000000000000000000003B78CE563F89A0ED9414F5AA28AD0D96D6795F9C6302A8DC32E64E86A333F20EF56EAC9BA30B7246D6D25E22ADB8C6BE1AEB08D49D"),
true,
},
{
decodeHex("031B84C5567B126440995D3ED5AABA0565D71E1834604819FF9C17F5E9D5DD078F"),
decodeHex("0000000000000000000000000000000000000000000000000000000000000000"),
decodeHex("52818579ACA59767E3291D91B76B637BEF062083284992F2D95F564CA6CB4E3530B1DA849C8E8304ADC0CFE870660334B3CFC18E825EF1DB34CFAE3DFC5D8187"),
true,
},
{
decodeHex("03FAC2114C2FBB091527EB7C64ECB11F8021CB45E8E7809D3C0938E4B8C0E5F84B"),
decodeHex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"),
decodeHex("570DD4CA83D4E6317B8EE6BAE83467A1BF419D0767122DE409394414B05080DCE9EE5F237CBD108EABAE1E37759AE47F8E4203DA3532EB28DB860F33D62D49BD"),
true,
},
{
decodeHex("02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"),
decodeHex("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89"),
decodeHex("2A298DACAE57395A15D0795DDBFD1DCB564DA82B0F269BC70A74F8220429BA1DFA16AEE06609280A19B67A24E1977E4697712B5FD2943914ECD5F730901B4AB7"),
false,
},
{
decodeHex("03FAC2114C2FBB091527EB7C64ECB11F8021CB45E8E7809D3C0938E4B8C0E5F84B"),
decodeHex("5E2D58D8B3BCDF1ABADEC7829054F90DDA9805AAB56C77333024B9D0A508B75C"),
decodeHex("00DA9B08172A9B6F0466A2DEFD817F2D7AB437E0D253CB5395A963866B3574BED092F9D860F1776A1F7412AD8A1EB50DACCC222BC8C0E26B2056DF2F273EFDEC"),
false,
},
{
decodeHex("0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798"),
decodeHex("0000000000000000000000000000000000000000000000000000000000000000"),
decodeHex("787A848E71043D280C50470E8E1532B2DD5D20EE912A45DBDD2BD1DFBF187EF68FCE5677CE7A623CB20011225797CE7A8DE1DC6CCD4F754A47DA6C600E59543C"),
false,
},
{
decodeHex("03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"),
decodeHex("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89"),
decodeHex("2A298DACAE57395A15D0795DDBFD1DCB564DA82B0F269BC70A74F8220429BA1D1E51A22CCEC35599B8F266912281F8365FFC2D035A230434A1A64DC59F7013FD"),
false,
},
{
decodeHex("03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"),
decodeHex("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89"),
decodeHex("00000000000000000000000000000000000000000000000000000000000000009E9D01AF988B5CEDCE47221BFA9B222721F3FA408915444A4B489021DB55775F"),
false,
},
{
decodeHex("03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"),
decodeHex("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89"),
decodeHex("0000000000000000000000000000000000000000000000000000000000000001D37DDF0254351836D84B1BD6A795FD5D523048F298C4214D187FE4892947F728"),
false,
},
{
decodeHex("03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"),
decodeHex("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89"),
decodeHex("4A298DACAE57395A15D0795DDBFD1DCB564DA82B0F269BC70A74F8220429BA1D1E51A22CCEC35599B8F266912281F8365FFC2D035A230434A1A64DC59F7013FD"),
false,
},
{
decodeHex("03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"),
decodeHex("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89"),
decodeHex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC2F1E51A22CCEC35599B8F266912281F8365FFC2D035A230434A1A64DC59F7013FD"),
false,
},
{
decodeHex("03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"),
decodeHex("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89"),
decodeHex("2A298DACAE57395A15D0795DDBFD1DCB564DA82B0F269BC70A74F8220429BA1DFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"),
false,
},
}
for i, test := range tests {
pubkey, err := ParsePubKey(test.pubKey, S256())
if err != nil {
t.Fatal(err)
}
sig, err := ParseSignature(test.signature)
if err != nil {
t.Fatal(err)
}
valid := sig.Verify(test.message, pubkey)
if valid != test.valid {
t.Errorf("Schnorr test vector %d didn't produce correct result", i)
}
}
}
func TestDeterministicSchnorrSignatureGen(t *testing.T) {
// Test vector from Bitcoin-ABC
privKeyBytes := decodeHex("12b004fff7f4b69ef8650e767f18f11ede158148b425660723b9f9a66e61f747")
privKey, _ := PrivKeyFromBytes(S256(), privKeyBytes)
h1 := sha256.Sum256([]byte("Very deterministic message"))
h2 := sha256.Sum256(h1[:])
sig, err := privKey.Sign(h2[:])
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(sig.R.Bytes(), decodeHex("2c56731ac2f7a7e7f11518fc7722a166b02438924ca9d8b4d111347b81d07175")) ||
!bytes.Equal(sig.S.Bytes(), decodeHex("71846de67ad3d913a8fdf9d8f3f73161a4c48ae81cb183b214765feb86e255ce")) {
t.Error("Failed to generate deterministic schnorr signature")
}
}

View File

@@ -1,70 +0,0 @@
btcjson
=======
[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/btcjson)
Package btcjson implements concrete types for marshalling to and from the
bitcoin JSON-RPC API. A comprehensive suite of tests is provided to ensure
proper functionality.
Although this package was primarily written for the btcsuite, it has
intentionally been designed so it can be used as a standalone package for any
projects needing to marshal to and from bitcoin JSON-RPC requests and responses.
Note that although it's possible to use this package directly to implement an
RPC client, it is not recommended since it is only intended as an infrastructure
package. Instead, RPC clients should use the
[btcrpcclient](https://github.com/btcsuite/btcrpcclient) package which provides
a full blown RPC client with many features such as automatic connection
management, websocket support, automatic notification re-registration on
reconnect, and conversion from the raw underlying RPC types (strings, floats,
ints, etc) to higher-level types with many nice and useful properties.
## Installation and Updating
```bash
$ go get -u github.com/kaspanet/kaspad/btcjson
```
## Examples
* [Marshal Command](http://godoc.org/github.com/kaspanet/kaspad/btcjson#example-MarshalCmd)
Demonstrates how to create and marshal a command into a JSON-RPC request.
* [Unmarshal Command](http://godoc.org/github.com/kaspanet/kaspad/btcjson#example-UnmarshalCmd)
Demonstrates how to unmarshal a JSON-RPC request and then unmarshal the
concrete request into a concrete command.
* [Marshal Response](http://godoc.org/github.com/kaspanet/kaspad/btcjson#example-MarshalResponse)
Demonstrates how to marshal a JSON-RPC response.
* [Unmarshal Response](http://godoc.org/github.com/kaspanet/kaspad/btcjson#example-package--UnmarshalResponse)
Demonstrates how to unmarshal a JSON-RPC response and then unmarshal the
result field in the response to a concrete type.
## GPG Verification Key
All official release tags are signed by Conformal so users can ensure the code
has not been tampered with and is coming from the btcsuite developers. To
verify the signature perform the following:
- Download the public key from the Conformal website at
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
- Import the public key into your GPG keyring:
```bash
gpg --import GIT-GPG-KEY-conformal.txt
```
- Verify the release tag with the following command where `TAG_NAME` is a
placeholder for the specific tag:
```bash
git tag -v TAG_NAME
```
## License
Package btcjson is licensed under the [copyfree](http://copyfree.org) ISC
License.

View File

@@ -1,158 +0,0 @@
// Copyright (c) 2014-2016 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// NOTE: This file is intended to house the RPC commands that are supported by
// a dag server with btcd extensions.
package btcjson
// NodeSubCmd defines the type used in the `node` JSON-RPC command for the
// sub command field.
type NodeSubCmd string
const (
// NConnect indicates the specified host that should be connected to.
NConnect NodeSubCmd = "connect"
// NRemove indicates the specified peer that should be removed as a
// persistent peer.
NRemove NodeSubCmd = "remove"
// NDisconnect indicates the specified peer should be disonnected.
NDisconnect NodeSubCmd = "disconnect"
)
// NodeCmd defines the node JSON-RPC command.
type NodeCmd struct {
SubCmd NodeSubCmd `jsonrpcusage:"\"connect|remove|disconnect\""`
Target string
ConnectSubCmd *string `jsonrpcusage:"\"perm|temp\""`
}
// NewNodeCmd returns a new instance which can be used to issue a `node`
// JSON-RPC command.
//
// The parameters which are pointers indicate they are optional. Passing nil
// for optional parameters will use the default value.
func NewNodeCmd(subCmd NodeSubCmd, target string, connectSubCmd *string) *NodeCmd {
return &NodeCmd{
SubCmd: subCmd,
Target: target,
ConnectSubCmd: connectSubCmd,
}
}
// DebugLevelCmd defines the debugLevel JSON-RPC command. This command is not a
// standard Bitcoin command. It is an extension for btcd.
type DebugLevelCmd struct {
LevelSpec string
}
// NewDebugLevelCmd returns a new DebugLevelCmd which can be used to issue a
// debugLevel JSON-RPC command. This command is not a standard Bitcoin command.
// It is an extension for btcd.
func NewDebugLevelCmd(levelSpec string) *DebugLevelCmd {
return &DebugLevelCmd{
LevelSpec: levelSpec,
}
}
// GenerateCmd defines the generate JSON-RPC command.
type GenerateCmd struct {
NumBlocks uint32
}
// NewGenerateCmd returns a new instance which can be used to issue a generate
// JSON-RPC command.
func NewGenerateCmd(numBlocks uint32) *GenerateCmd {
return &GenerateCmd{
NumBlocks: numBlocks,
}
}
// GetSelectedTipCmd defines the getSelectedTip JSON-RPC command.
type GetSelectedTipCmd struct {
Verbose *bool `jsonrpcdefault:"true"`
VerboseTx *bool `jsonrpcdefault:"false"`
}
// NewGetSelectedTipCmd returns a new instance which can be used to issue a
// getSelectedTip JSON-RPC command.
func NewGetSelectedTipCmd(verbose, verboseTx *bool) *GetSelectedTipCmd {
return &GetSelectedTipCmd{
Verbose: verbose,
VerboseTx: verboseTx,
}
}
// GetCurrentNetCmd defines the getCurrentNet JSON-RPC command.
type GetCurrentNetCmd struct{}
// NewGetCurrentNetCmd returns a new instance which can be used to issue a
// getCurrentNet JSON-RPC command.
func NewGetCurrentNetCmd() *GetCurrentNetCmd {
return &GetCurrentNetCmd{}
}
// GetTopHeadersCmd defined the getTopHeaders JSON-RPC command.
type GetTopHeadersCmd struct {
StartHash *string `json:"startHash"`
}
// NewGetTopHeadersCmd returns a new instance which can be used to issue a
// getTopHeaders JSON-RPC command.
func NewGetTopHeadersCmd(startHash *string) *GetTopHeadersCmd {
return &GetTopHeadersCmd{
StartHash: startHash,
}
}
// GetHeadersCmd defines the getHeaders JSON-RPC command.
//
// NOTE: This is a btcsuite extension ported from
// github.com/decred/dcrd/dcrjson.
type GetHeadersCmd struct {
StartHash string `json:"startHash"`
StopHash string `json:"stopHash"`
}
// NewGetHeadersCmd returns a new instance which can be used to issue a
// getHeaders JSON-RPC command.
//
// NOTE: This is a btcsuite extension ported from
// github.com/decred/dcrd/dcrjson.
func NewGetHeadersCmd(startHash, stopHash string) *GetHeadersCmd {
return &GetHeadersCmd{
StartHash: startHash,
StopHash: stopHash,
}
}
// VersionCmd defines the version JSON-RPC command.
//
// NOTE: This is a btcsuite extension ported from
// github.com/decred/dcrd/dcrjson.
type VersionCmd struct{}
// NewVersionCmd returns a new instance which can be used to issue a JSON-RPC
// version command.
//
// NOTE: This is a btcsuite extension ported from
// github.com/decred/dcrd/dcrjson.
func NewVersionCmd() *VersionCmd { return new(VersionCmd) }
func init() {
// No special flags for commands in this file.
flags := UsageFlag(0)
MustRegisterCmd("debugLevel", (*DebugLevelCmd)(nil), flags)
MustRegisterCmd("node", (*NodeCmd)(nil), flags)
MustRegisterCmd("generate", (*GenerateCmd)(nil), flags)
MustRegisterCmd("getSelectedTip", (*GetSelectedTipCmd)(nil), flags)
MustRegisterCmd("getCurrentNet", (*GetCurrentNetCmd)(nil), flags)
MustRegisterCmd("getHeaders", (*GetHeadersCmd)(nil), flags)
MustRegisterCmd("getTopHeaders", (*GetTopHeadersCmd)(nil), flags)
MustRegisterCmd("version", (*VersionCmd)(nil), flags)
}

View File

@@ -1,282 +0,0 @@
// Copyright (c) 2014-2016 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcjson_test
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"testing"
"github.com/kaspanet/kaspad/btcjson"
)
// TestBtcdExtCmds tests all of the btcd extended commands marshal and unmarshal
// into valid results include handling of optional fields being omitted in the
// marshalled command, while optional fields with defaults have the default
// assigned on unmarshalled commands.
func TestBtcdExtCmds(t *testing.T) {
t.Parallel()
testID := 1
tests := []struct {
name string
newCmd func() (interface{}, error)
staticCmd func() interface{}
marshalled string
unmarshalled interface{}
}{
{
name: "debugLevel",
newCmd: func() (interface{}, error) {
return btcjson.NewCmd("debugLevel", "trace")
},
staticCmd: func() interface{} {
return btcjson.NewDebugLevelCmd("trace")
},
marshalled: `{"jsonrpc":"1.0","method":"debugLevel","params":["trace"],"id":1}`,
unmarshalled: &btcjson.DebugLevelCmd{
LevelSpec: "trace",
},
},
{
name: "node",
newCmd: func() (interface{}, error) {
return btcjson.NewCmd("node", btcjson.NRemove, "1.1.1.1")
},
staticCmd: func() interface{} {
return btcjson.NewNodeCmd("remove", "1.1.1.1", nil)
},
marshalled: `{"jsonrpc":"1.0","method":"node","params":["remove","1.1.1.1"],"id":1}`,
unmarshalled: &btcjson.NodeCmd{
SubCmd: btcjson.NRemove,
Target: "1.1.1.1",
},
},
{
name: "node",
newCmd: func() (interface{}, error) {
return btcjson.NewCmd("node", btcjson.NDisconnect, "1.1.1.1")
},
staticCmd: func() interface{} {
return btcjson.NewNodeCmd("disconnect", "1.1.1.1", nil)
},
marshalled: `{"jsonrpc":"1.0","method":"node","params":["disconnect","1.1.1.1"],"id":1}`,
unmarshalled: &btcjson.NodeCmd{
SubCmd: btcjson.NDisconnect,
Target: "1.1.1.1",
},
},
{
name: "node",
newCmd: func() (interface{}, error) {
return btcjson.NewCmd("node", btcjson.NConnect, "1.1.1.1", "perm")
},
staticCmd: func() interface{} {
return btcjson.NewNodeCmd("connect", "1.1.1.1", btcjson.String("perm"))
},
marshalled: `{"jsonrpc":"1.0","method":"node","params":["connect","1.1.1.1","perm"],"id":1}`,
unmarshalled: &btcjson.NodeCmd{
SubCmd: btcjson.NConnect,
Target: "1.1.1.1",
ConnectSubCmd: btcjson.String("perm"),
},
},
{
name: "node",
newCmd: func() (interface{}, error) {
return btcjson.NewCmd("node", btcjson.NConnect, "1.1.1.1", "temp")
},
staticCmd: func() interface{} {
return btcjson.NewNodeCmd("connect", "1.1.1.1", btcjson.String("temp"))
},
marshalled: `{"jsonrpc":"1.0","method":"node","params":["connect","1.1.1.1","temp"],"id":1}`,
unmarshalled: &btcjson.NodeCmd{
SubCmd: btcjson.NConnect,
Target: "1.1.1.1",
ConnectSubCmd: btcjson.String("temp"),
},
},
{
name: "generate",
newCmd: func() (interface{}, error) {
return btcjson.NewCmd("generate", 1)
},
staticCmd: func() interface{} {
return btcjson.NewGenerateCmd(1)
},
marshalled: `{"jsonrpc":"1.0","method":"generate","params":[1],"id":1}`,
unmarshalled: &btcjson.GenerateCmd{
NumBlocks: 1,
},
},
{
name: "getSelectedTip",
newCmd: func() (interface{}, error) {
return btcjson.NewCmd("getSelectedTip")
},
staticCmd: func() interface{} {
return btcjson.NewGetSelectedTipCmd(nil, nil)
},
marshalled: `{"jsonrpc":"1.0","method":"getSelectedTip","params":[],"id":1}`,
unmarshalled: &btcjson.GetSelectedTipCmd{
Verbose: btcjson.Bool(true),
VerboseTx: btcjson.Bool(false),
},
},
{
name: "getCurrentNet",
newCmd: func() (interface{}, error) {
return btcjson.NewCmd("getCurrentNet")
},
staticCmd: func() interface{} {
return btcjson.NewGetCurrentNetCmd()
},
marshalled: `{"jsonrpc":"1.0","method":"getCurrentNet","params":[],"id":1}`,
unmarshalled: &btcjson.GetCurrentNetCmd{},
},
{
name: "getHeaders",
newCmd: func() (interface{}, error) {
return btcjson.NewCmd("getHeaders", "", "")
},
staticCmd: func() interface{} {
return btcjson.NewGetHeadersCmd(
"",
"",
)
},
marshalled: `{"jsonrpc":"1.0","method":"getHeaders","params":["",""],"id":1}`,
unmarshalled: &btcjson.GetHeadersCmd{
StartHash: "",
StopHash: "",
},
},
{
name: "getHeaders - with arguments",
newCmd: func() (interface{}, error) {
return btcjson.NewCmd("getHeaders", "000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16", "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7")
},
staticCmd: func() interface{} {
return btcjson.NewGetHeadersCmd(
"000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16",
"000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7",
)
},
marshalled: `{"jsonrpc":"1.0","method":"getHeaders","params":["000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16","000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"],"id":1}`,
unmarshalled: &btcjson.GetHeadersCmd{
StartHash: "000000000000000001f1739002418e2f9a84c47a4fd2a0eb7a787a6b7dc12f16",
StopHash: "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7",
},
},
{
name: "getTopHeaders",
newCmd: func() (interface{}, error) {
return btcjson.NewCmd("getTopHeaders")
},
staticCmd: func() interface{} {
return btcjson.NewGetTopHeadersCmd(
nil,
)
},
marshalled: `{"jsonrpc":"1.0","method":"getTopHeaders","params":[],"id":1}`,
unmarshalled: &btcjson.GetTopHeadersCmd{},
},
{
name: "getTopHeaders - with start hash",
newCmd: func() (interface{}, error) {
return btcjson.NewCmd("getTopHeaders", "000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7")
},
staticCmd: func() interface{} {
return btcjson.NewGetTopHeadersCmd(
btcjson.String("000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"),
)
},
marshalled: `{"jsonrpc":"1.0","method":"getTopHeaders","params":["000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"],"id":1}`,
unmarshalled: &btcjson.GetTopHeadersCmd{
StartHash: btcjson.String("000000000000000000ba33b33e1fad70b69e234fc24414dd47113bff38f523f7"),
},
},
{
name: "version",
newCmd: func() (interface{}, error) {
return btcjson.NewCmd("version")
},
staticCmd: func() interface{} {
return btcjson.NewVersionCmd()
},
marshalled: `{"jsonrpc":"1.0","method":"version","params":[],"id":1}`,
unmarshalled: &btcjson.VersionCmd{},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Marshal the command as created by the new static command
// creation function.
marshalled, err := btcjson.MarshalCmd(testID, test.staticCmd())
if err != nil {
t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i,
test.name, err)
continue
}
if !bytes.Equal(marshalled, []byte(test.marshalled)) {
t.Errorf("Test #%d (%s) unexpected marshalled data - "+
"got %s, want %s", i, test.name, marshalled,
test.marshalled)
continue
}
// Ensure the command is created without error via the generic
// new command creation function.
cmd, err := test.newCmd()
if err != nil {
t.Errorf("Test #%d (%s) unexpected NewCmd error: %v ",
i, test.name, err)
}
// Marshal the command as created by the generic new command
// creation function.
marshalled, err = btcjson.MarshalCmd(testID, cmd)
if err != nil {
t.Errorf("MarshalCmd #%d (%s) unexpected error: %v", i,
test.name, err)
continue
}
if !bytes.Equal(marshalled, []byte(test.marshalled)) {
t.Errorf("Test #%d (%s) unexpected marshalled data - "+
"got %s, want %s", i, test.name, marshalled,
test.marshalled)
continue
}
var request btcjson.Request
if err := json.Unmarshal(marshalled, &request); err != nil {
t.Errorf("Test #%d (%s) unexpected error while "+
"unmarshalling JSON-RPC request: %v", i,
test.name, err)
continue
}
cmd, err = btcjson.UnmarshalCmd(&request)
if err != nil {
t.Errorf("UnmarshalCmd #%d (%s) unexpected error: %v", i,
test.name, err)
continue
}
if !reflect.DeepEqual(cmd, test.unmarshalled) {
t.Errorf("Test #%d (%s) unexpected unmarshalled command "+
"- got %s, want %s", i, test.name,
fmt.Sprintf("(%T) %+[1]v", cmd),
fmt.Sprintf("(%T) %+[1]v\n", test.unmarshalled))
continue
}
}
}

View File

@@ -1,20 +0,0 @@
// Copyright (c) 2016-2017 The btcsuite developers
// Copyright (c) 2015-2017 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcjson
// VersionResult models objects included in the version response. In the actual
// result, these objects are keyed by the program or API name.
//
// NOTE: This is a btcsuite extension ported from
// github.com/decred/dcrd/dcrjson.
type VersionResult struct {
VersionString string `json:"versionString"`
Major uint32 `json:"major"`
Minor uint32 `json:"minor"`
Patch uint32 `json:"patch"`
Prerelease string `json:"prerelease"`
BuildMetadata string `json:"buildMetadata"`
}

View File

@@ -1,55 +0,0 @@
// Copyright (c) 2016-2017 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcjson_test
import (
"encoding/json"
"testing"
"github.com/kaspanet/kaspad/btcjson"
)
// TestBtcdExtCustomResults ensures any results that have custom marshalling
// work as inteded.
// and unmarshal code of results are as expected.
func TestBtcdExtCustomResults(t *testing.T) {
t.Parallel()
tests := []struct {
name string
result interface{}
expected string
}{
{
name: "versionresult",
result: &btcjson.VersionResult{
VersionString: "1.0.0",
Major: 1,
Minor: 0,
Patch: 0,
Prerelease: "pr",
BuildMetadata: "bm",
},
expected: `{"versionString":"1.0.0","major":1,"minor":0,"patch":0,"prerelease":"pr","buildMetadata":"bm"}`,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
marshalled, err := json.Marshal(test.result)
if err != nil {
t.Errorf("Test #%d (%s) unexpected error: %v", i,
test.name, err)
continue
}
if string(marshalled) != test.expected {
t.Errorf("Test #%d (%s) unexpected marhsalled data - "+
"got %s, want %s", i, test.name, marshalled,
test.expected)
continue
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -6,59 +6,25 @@ package main
import (
"os"
"path/filepath"
"runtime"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/limits"
"github.com/kaspanet/kaspad/logs"
"github.com/kaspanet/kaspad/infrastructure/limits"
"github.com/kaspanet/kaspad/infrastructure/logs"
"github.com/kaspanet/kaspad/util/panics"
)
const (
// blockDbNamePrefix is the prefix for the btcd block database.
blockDbNamePrefix = "blocks"
// blockDBNamePrefix is the prefix for the kaspad block database.
blockDBNamePrefix = "blocks"
)
var (
cfg *ConfigFlags
log logs.Logger
spawn func(func())
log *logs.Logger
spawn func(string, func())
)
// loadBlockDB opens the block database and returns a handle to it.
func loadBlockDB() (database.DB, error) {
// The database name is based on the database type.
dbName := blockDbNamePrefix + "_" + cfg.DbType
dbPath := filepath.Join(cfg.DataDir, dbName)
log.Infof("Loading block database from '%s'", dbPath)
db, err := database.Open(cfg.DbType, dbPath, ActiveConfig().NetParams().Net)
if err != nil {
// Return the error if it's not because the database doesn't
// exist.
if dbErr, ok := err.(database.Error); !ok || dbErr.ErrorCode !=
database.ErrDbDoesNotExist {
return nil, err
}
// Create the db if it does not exist.
err = os.MkdirAll(cfg.DataDir, 0700)
if err != nil {
return nil, err
}
db, err = database.Create(cfg.DbType, dbPath, ActiveConfig().NetParams().Net)
if err != nil {
return nil, err
}
}
log.Info("Block database loaded")
return db, nil
}
// realMain is the real main function for the utility. It is necessary to work
// realMain is the real main function for the utility. It is necessary to work
// around the fact that deferred functions do not run when os.Exit() is called.
func realMain() error {
// Load configuration and parse command line.
@@ -74,14 +40,6 @@ func realMain() error {
log = backendLogger.Logger("MAIN")
spawn = panics.GoroutineWrapperFunc(log)
// Load the block database.
db, err := loadBlockDB()
if err != nil {
log.Errorf("Failed to load database: %s", err)
return err
}
defer db.Close()
fi, err := os.Open(cfg.InFile)
if err != nil {
log.Errorf("Failed to open file %s: %s", cfg.InFile, err)
@@ -92,14 +50,14 @@ func realMain() error {
// Create a block importer for the database and input file and start it.
// The done channel returned from start will contain an error if
// anything went wrong.
importer, err := newBlockImporter(db, fi)
importer, err := newBlockImporter(fi)
if err != nil {
log.Errorf("Failed create block importer: %s", err)
return err
}
// Perform the import asynchronously. This allows blocks to be
// processed and read in parallel. The results channel returned from
// Perform the import asynchronously. This allows blocks to be
// processed and read in parallel. The results channel returned from
// Import contains the statistics about the import including an error
// if something went wrong.
log.Info("Starting import")

View File

@@ -6,28 +6,22 @@ package main
import (
"fmt"
"github.com/kaspanet/kaspad/config"
flags "github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"os"
"path/filepath"
"strings"
flags "github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
)
const (
defaultDbType = "ffldb"
defaultDataFile = "bootstrap.dat"
defaultProgress = 10
)
var (
btcdHomeDir = util.AppDataDir("btcd", false)
defaultDataDir = filepath.Join(btcdHomeDir, "data")
knownDbTypes = database.SupportedDrivers()
kaspadHomeDir = util.AppDataDir("kaspad", false)
defaultDataDir = filepath.Join(kaspadHomeDir, "data")
activeConfig *ConfigFlags
)
@@ -36,16 +30,14 @@ func ActiveConfig() *ConfigFlags {
return activeConfig
}
// ConfigFlags defines the configuration options for findcheckpoint.
// ConfigFlags defines the configuration options for addblock.
//
// See loadConfig for details on the configuration load process.
type ConfigFlags struct {
DataDir string `short:"b" long:"datadir" description:"Location of the btcd data directory"`
DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"`
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
TxIndex bool `long:"txindex" description:"Build a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
AddrIndex bool `long:"addrindex" description:"Build a full address-based transaction index which makes the searchrawtransactions RPC available"`
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainFromBlock RPC available"`
config.NetworkFlags
}
@@ -59,23 +51,11 @@ func fileExists(name string) bool {
return true
}
// validDbType returns whether or not dbType is a supported database type.
func validDbType(dbType string) bool {
for _, knownType := range knownDbTypes {
if dbType == knownType {
return true
}
}
return false
}
// loadConfig initializes and parses the config using command line options.
func loadConfig() (*ConfigFlags, []string, error) {
// Default config.
activeConfig = &ConfigFlags{
DataDir: defaultDataDir,
DbType: defaultDbType,
InFile: defaultDataFile,
Progress: defaultProgress,
}
@@ -84,7 +64,8 @@ func loadConfig() (*ConfigFlags, []string, error) {
parser := flags.NewParser(&activeConfig, flags.Default)
remainingArgs, err := parser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
parser.WriteHelp(os.Stderr)
}
return nil, nil, err
@@ -95,18 +76,8 @@ func loadConfig() (*ConfigFlags, []string, error) {
return nil, nil, err
}
// Validate database type.
if !validDbType(activeConfig.DbType) {
str := "%s: The specified database type [%s] is invalid -- " +
"supported types %s"
err := errors.Errorf(str, "loadConfig", activeConfig.DbType, strings.Join(knownDbTypes, ", "))
fmt.Fprintln(os.Stderr, err)
parser.WriteHelp(os.Stderr)
return nil, nil, err
}
// Append the network type to the data directory so it is "namespaced"
// per network. In addition to the block database, there are other
// per network. In addition to the block database, there are other
// pieces of data that are saved to disk such as address manager state.
// All data is specific to a network, so namespacing the data directory
// means each individual piece of serialized data does not have to

View File

@@ -6,16 +6,16 @@ package main
import (
"encoding/binary"
"github.com/kaspanet/kaspad/domain/blockdag/indexers"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"io"
"sync"
"time"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/network/domainmessage"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
)
// importResults houses the stats and result as an import operation.
@@ -28,7 +28,6 @@ type importResults struct {
// blockImporter houses information about an ongoing import from a block data
// file to the block database.
type blockImporter struct {
db database.DB
dag *blockdag.BlockDAG
r io.ReadSeeker
processQueue chan []byte
@@ -41,8 +40,8 @@ type blockImporter struct {
receivedLogBlocks int64
receivedLogTx int64
lastHeight int64
lastBlockTime time.Time
lastLogTime time.Time
lastBlockTime mstime.Time
lastLogTime mstime.Time
}
// readBlock reads the next block from the input file.
@@ -69,10 +68,10 @@ func (bi *blockImporter) readBlock() ([]byte, error) {
if err := binary.Read(bi.r, binary.LittleEndian, &blockLen); err != nil {
return nil, err
}
if blockLen > wire.MaxMessagePayload {
if blockLen > domainmessage.MaxMessagePayload {
return nil, errors.Errorf("block payload of %d bytes is larger "+
"than the max allowed %d bytes", blockLen,
wire.MaxMessagePayload)
domainmessage.MaxMessagePayload)
}
serializedBlock := make([]byte, blockLen)
@@ -83,12 +82,11 @@ func (bi *blockImporter) readBlock() ([]byte, error) {
return serializedBlock, nil
}
// processBlock potentially imports the block into the database. It first
// deserializes the raw block while checking for errors. Already known blocks
// are skipped and orphan blocks are considered errors. Finally, it runs the
// block through the DAG rules to ensure it follows all rules and matches
// up to the known checkpoint. Returns whether the block was imported along
// with any potential errors.
// processBlock potentially imports the block into the database. It first
// deserializes the raw block while checking for errors. Already known blocks
// are skipped and orphan blocks are considered errors. Finally, it runs the
// block through the DAG rules to ensure it follows all rules.
// Returns whether the block was imported along with any potential errors.
func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
// Deserialize the block which includes checks for malformed blocks.
block, err := util.NewBlockFromBytes(serializedBlock)
@@ -102,28 +100,27 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
// Skip blocks that already exist.
blockHash := block.Hash()
if bi.dag.HaveBlock(blockHash) {
if bi.dag.IsKnownBlock(blockHash) {
return false, nil
}
// Don't bother trying to process orphans.
parentHashes := block.MsgBlock().Header.ParentHashes
if len(parentHashes) > 0 {
if !bi.dag.HaveBlocks(parentHashes) {
if !bi.dag.AreKnownBlocks(parentHashes) {
return false, errors.Errorf("import file contains block "+
"%v which does not link to the available "+
"block DAG", parentHashes)
}
}
// Ensure the blocks follows all of the chain rules and match up to the
// known checkpoints.
isOrphan, delay, err := bi.dag.ProcessBlock(block,
// Ensure the blocks follows all of the DAG rules.
isOrphan, isDelayed, err := bi.dag.ProcessBlock(block,
blockdag.BFFastAdd)
if err != nil {
return false, err
}
if delay != 0 {
if isDelayed {
return false, errors.Errorf("import file contains a block that is too far in the future")
}
if isOrphan {
@@ -168,13 +165,13 @@ out:
bi.wg.Done()
}
// logProgress logs block progress as an information message. In order to
// logProgress logs block progress as an information message. In order to
// prevent spam, it limits logging to one message every cfg.Progress seconds
// with duration and totals included.
func (bi *blockImporter) logProgress() {
bi.receivedLogBlocks++
now := time.Now()
now := mstime.Now()
duration := now.Sub(bi.lastLogTime)
if duration < time.Second*time.Duration(cfg.Progress) {
return
@@ -202,7 +199,7 @@ func (bi *blockImporter) logProgress() {
bi.lastLogTime = now
}
// processHandler is the main handler for processing blocks. This allows block
// processHandler is the main handler for processing blocks. This allows block
// processing to take place in parallel with block reads from the import file.
// It must be run as a goroutine.
func (bi *blockImporter) processHandler() {
@@ -237,7 +234,7 @@ out:
}
// statusHandler waits for updates from the import operation and notifies
// the passed doneChan with the results of the import. It also causes all
// the passed doneChan with the results of the import. It also causes all
// goroutines to exit if an error is reported from any of them.
func (bi *blockImporter) statusHandler(resultsChan chan *importResults) {
select {
@@ -262,18 +259,18 @@ func (bi *blockImporter) statusHandler(resultsChan chan *importResults) {
}
// Import is the core function which handles importing the blocks from the file
// associated with the block importer to the database. It returns a channel
// associated with the block importer to the database. It returns a channel
// on which the results will be returned when the operation has completed.
func (bi *blockImporter) Import() chan *importResults {
// Start up the read and process handling goroutines. This setup allows
// Start up the read and process handling goroutines. This setup allows
// blocks to be read from disk in parallel while being processed.
bi.wg.Add(2)
spawn(bi.readHandler)
spawn(bi.processHandler)
spawn("blockImporter.readHandler", bi.readHandler)
spawn("blockImporter.processHandler", bi.processHandler)
// Wait for the import to finish in a separate goroutine and signal
// the status handler when done.
spawn(func() {
spawn("blockImporter.sendToDoneChan", func() {
bi.wg.Wait()
bi.doneChan <- true
})
@@ -281,7 +278,7 @@ func (bi *blockImporter) Import() chan *importResults {
// Start the status handler and return the result channel that it will
// send the results on when the import is done.
resultChan := make(chan *importResults)
spawn(func() {
spawn("blockImporter.statusHandler", func() {
bi.statusHandler(resultChan)
})
return resultChan
@@ -289,29 +286,12 @@ func (bi *blockImporter) Import() chan *importResults {
// newBlockImporter returns a new importer for the provided file reader seeker
// and database.
func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
// Create the transaction and address indexes if needed.
//
// CAUTION: the txindex needs to be first in the indexes array because
// the addrindex uses data from the txindex during catchup. If the
// addrindex is run first, it may not have the transactions from the
// current block indexed.
func newBlockImporter(r io.ReadSeeker) (*blockImporter, error) {
// Create the acceptance index if needed.
var indexes []indexers.Indexer
if cfg.TxIndex || cfg.AddrIndex {
// Enable transaction index if address index is enabled since it
// requires it.
if !cfg.TxIndex {
log.Infof("Transaction index enabled because it is " +
"required by the address index")
cfg.TxIndex = true
} else {
log.Info("Transaction index is enabled")
}
indexes = append(indexes, indexers.NewTxIndex())
}
if cfg.AddrIndex {
log.Info("Address index is enabled")
indexes = append(indexes, indexers.NewAddrIndex(ActiveConfig().NetParams()))
if cfg.AcceptanceIndex {
log.Info("Acceptance index is enabled")
indexes = append(indexes, indexers.NewAcceptanceIndex())
}
// Create an index manager if any of the optional indexes are enabled.
@@ -321,9 +301,8 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
}
dag, err := blockdag.New(&blockdag.Config{
DB: db,
DAGParams: ActiveConfig().NetParams(),
TimeSource: blockdag.NewMedianTime(),
TimeSource: blockdag.NewTimeSource(),
IndexManager: indexManager,
})
if err != nil {
@@ -331,13 +310,12 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
}
return &blockImporter{
db: db,
r: r,
processQueue: make(chan []byte, 2),
doneChan: make(chan bool),
errChan: make(chan error),
quit: make(chan struct{}),
dag: dag,
lastLogTime: time.Now(),
lastLogTime: mstime.Now(),
}, nil
}

View File

@@ -1,87 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/btcjson"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"time"
)
const (
getSubnetworkRetryDelay = 5 * time.Second
maxGetSubnetworkRetries = 12
)
func main() {
cfg, err := parseConfig()
if err != nil {
panic(errors.Errorf("error parsing command-line arguments: %s", err))
}
privateKey, addrPubKeyHash, err := decodeKeys(cfg)
if err != nil {
panic(errors.Errorf("error decoding public key: %s", err))
}
client, err := connect(cfg)
if err != nil {
panic(errors.Errorf("could not connect to RPC server: %s", err))
}
log.Infof("Connected to server %s", cfg.RPCServer)
fundingOutpoint, fundingTx, err := findUnspentTXO(cfg, client, addrPubKeyHash)
if err != nil {
panic(errors.Errorf("error finding unspent transactions: %s", err))
}
if fundingOutpoint == nil || fundingTx == nil {
panic(errors.Errorf("could not find any unspent transactions this for key"))
}
log.Infof("Found transaction to spend: %s:%d", fundingOutpoint.TxID, fundingOutpoint.Index)
registryTx, err := buildSubnetworkRegistryTx(cfg, fundingOutpoint, fundingTx, privateKey)
if err != nil {
panic(errors.Errorf("error building subnetwork registry tx: %s", err))
}
_, err = client.SendRawTransaction(registryTx, true)
if err != nil {
panic(errors.Errorf("failed sending subnetwork registry tx: %s", err))
}
log.Infof("Successfully sent subnetwork registry transaction")
subnetworkID, err := blockdag.TxToSubnetworkID(registryTx)
if err != nil {
panic(errors.Errorf("could not build subnetwork ID: %s", err))
}
err = waitForSubnetworkToBecomeAccepted(client, subnetworkID)
if err != nil {
panic(errors.Errorf("error waiting for subnetwork to become accepted: %s", err))
}
log.Infof("Subnetwork '%s' was successfully registered.", subnetworkID)
}
func waitForSubnetworkToBecomeAccepted(client *rpcclient.Client, subnetworkID *subnetworkid.SubnetworkID) error {
retries := 0
for {
_, err := client.GetSubnetwork(subnetworkID.String())
if err != nil {
if rpcError, ok := err.(*btcjson.RPCError); ok && rpcError.Code == btcjson.ErrRPCSubnetworkNotFound {
log.Infof("Subnetwork not found")
retries++
if retries == maxGetSubnetworkRetries {
return errors.Errorf("failed to get subnetwork %d times: %s", maxGetSubnetworkRetries, err)
}
log.Infof("Waiting %d seconds...", int(getSubnetworkRetryDelay.Seconds()))
<-time.After(getSubnetworkRetryDelay)
continue
}
return errors.Errorf("failed getting subnetwork: %s", err)
}
return nil
}
}

View File

@@ -1,71 +0,0 @@
package main
import (
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/config"
"github.com/pkg/errors"
)
var activeConfig *ConfigFlags
// ActiveConfig returns the active configuration struct
func ActiveConfig() *ConfigFlags {
return activeConfig
}
// ConfigFlags holds the configurations set by the command line argument
type ConfigFlags struct {
PrivateKey string `short:"k" long:"private-key" description:"Private key" required:"true"`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username" required:"true"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password" required:"true"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to" required:"true"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
GasLimit uint64 `long:"gaslimit" description:"The gas limit of the new subnetwork"`
RegistryTxFee uint64 `long:"regtxfee" description:"The fee for the subnetwork registry transaction"`
config.NetworkFlags
}
const (
defaultSubnetworkGasLimit = 1000
defaultRegistryTxFee = 3000
)
func parseConfig() (*ConfigFlags, error) {
activeConfig = &ConfigFlags{}
parser := flags.NewParser(activeConfig, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return nil, err
}
if activeConfig.RPCCert == "" && !activeConfig.DisableTLS {
return nil, errors.New("--notls has to be disabled if --cert is used")
}
if activeConfig.RPCCert != "" && activeConfig.DisableTLS {
return nil, errors.New("--cert should be omitted if --notls is used")
}
err = activeConfig.ResolveNetwork(parser)
if err != nil {
return nil, err
}
if activeConfig.GasLimit < 0 {
return nil, errors.Errorf("gaslimit may not be smaller than 0")
}
if activeConfig.GasLimit == 0 {
activeConfig.GasLimit = defaultSubnetworkGasLimit
}
if activeConfig.RegistryTxFee < 0 {
return nil, errors.Errorf("regtxfee may not be smaller than 0")
}
if activeConfig.RegistryTxFee == 0 {
activeConfig.RegistryTxFee = defaultRegistryTxFee
}
return activeConfig, nil
}

View File

@@ -1,37 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/rpcclient"
"github.com/pkg/errors"
"io/ioutil"
)
func connect(cfg *ConfigFlags) (*rpcclient.Client, error) {
var cert []byte
if !cfg.DisableTLS {
var err error
cert, err = ioutil.ReadFile(cfg.RPCCert)
if err != nil {
return nil, errors.Errorf("error reading certificates file: %s", err)
}
}
connCfg := &rpcclient.ConnConfig{
Host: cfg.RPCServer,
Endpoint: "ws",
User: cfg.RPCUser,
Pass: cfg.RPCPassword,
DisableTLS: cfg.DisableTLS,
}
if !cfg.DisableTLS {
connCfg.Certificates = cert
}
client, err := rpcclient.New(connCfg, nil)
if err != nil {
return nil, errors.Errorf("error connecting to address %s: %s", cfg.RPCServer, err)
}
return client, nil
}

View File

@@ -1,19 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/btcec"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/base58"
)
func decodeKeys(cfg *ConfigFlags) (*btcec.PrivateKey, *util.AddressPubKeyHash, error) {
privateKeyBytes := base58.Decode(cfg.PrivateKey)
privateKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), privateKeyBytes)
serializedPrivateKey := privateKey.PubKey().SerializeCompressed()
addr, err := util.NewAddressPubKeyHashFromPublicKey(serializedPrivateKey, ActiveConfig().NetParams().Prefix)
if err != nil {
return nil, nil, err
}
return privateKey, addr, nil
}

View File

@@ -1,10 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/logs"
)
var (
backendLog = logs.NewBackend()
log = backendLog.Logger("ASUB")
)

View File

@@ -1,29 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/btcec"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
func buildSubnetworkRegistryTx(cfg *ConfigFlags, fundingOutpoint *wire.Outpoint, fundingTx *wire.MsgTx, privateKey *btcec.PrivateKey) (*wire.MsgTx, error) {
txIn := &wire.TxIn{
PreviousOutpoint: *fundingOutpoint,
Sequence: wire.MaxTxInSequenceNum,
}
txOut := &wire.TxOut{
ScriptPubKey: fundingTx.TxOut[fundingOutpoint.Index].ScriptPubKey,
Value: fundingTx.TxOut[fundingOutpoint.Index].Value - cfg.RegistryTxFee,
}
registryTx := wire.NewRegistryMsgTx(1, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}, cfg.GasLimit)
SignatureScript, err := txscript.SignatureScript(registryTx, 0, fundingTx.TxOut[fundingOutpoint.Index].ScriptPubKey,
txscript.SigHashAll, privateKey, true)
if err != nil {
return nil, errors.Errorf("failed to build signature script: %s", err)
}
txIn.SignatureScript = SignatureScript
return registryTx, nil
}

View File

@@ -1,112 +0,0 @@
package main
import (
"bytes"
"encoding/hex"
"github.com/kaspanet/kaspad/btcjson"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
const (
resultsCount = 1000
minConfirmations = 10
)
func findUnspentTXO(cfg *ConfigFlags, client *rpcclient.Client, addrPubKeyHash *util.AddressPubKeyHash) (*wire.Outpoint, *wire.MsgTx, error) {
txs, err := collectTransactions(client, addrPubKeyHash)
if err != nil {
return nil, nil, err
}
utxos := buildUTXOs(txs)
for outpoint, tx := range utxos {
// Skip TXOs that can't pay for registration
if tx.TxOut[outpoint.Index].Value < cfg.RegistryTxFee {
continue
}
return &outpoint, tx, nil
}
return nil, nil, nil
}
func collectTransactions(client *rpcclient.Client, addrPubKeyHash *util.AddressPubKeyHash) ([]*wire.MsgTx, error) {
txs := make([]*wire.MsgTx, 0)
skip := 0
for {
results, err := client.SearchRawTransactionsVerbose(addrPubKeyHash, skip, resultsCount, true, false, nil)
if err != nil {
// Break when there are no further txs
if rpcError, ok := err.(*btcjson.RPCError); ok && rpcError.Code == btcjson.ErrRPCNoTxInfo {
break
}
return nil, err
}
for _, result := range results {
// Mempool transactions bring about unnecessary complexity, so
// simply don't bother processing them
if result.IsInMempool {
continue
}
tx, err := parseRawTransactionResult(result)
if err != nil {
return nil, errors.Errorf("failed to process SearchRawTransactionResult: %s", err)
}
if tx == nil {
continue
}
if !isTxMatured(tx, *result.Confirmations) {
continue
}
txs = append(txs, tx)
}
skip += resultsCount
}
return txs, nil
}
func parseRawTransactionResult(result *btcjson.SearchRawTransactionsResult) (*wire.MsgTx, error) {
txBytes, err := hex.DecodeString(result.Hex)
if err != nil {
return nil, errors.Errorf("failed to decode transaction bytes: %s", err)
}
var tx wire.MsgTx
reader := bytes.NewReader(txBytes)
err = tx.Deserialize(reader)
if err != nil {
return nil, errors.Errorf("failed to deserialize transaction: %s", err)
}
return &tx, nil
}
func isTxMatured(tx *wire.MsgTx, confirmations uint64) bool {
if !tx.IsCoinBase() {
return confirmations >= minConfirmations
}
return confirmations >= ActiveConfig().NetParams().BlockCoinbaseMaturity
}
func buildUTXOs(txs []*wire.MsgTx) map[wire.Outpoint]*wire.MsgTx {
utxos := make(map[wire.Outpoint]*wire.MsgTx)
for _, tx := range txs {
for i := range tx.TxOut {
outpoint := wire.NewOutpoint(tx.TxID(), uint32(i))
utxos[*outpoint] = tx
}
}
for _, tx := range txs {
for _, input := range tx.TxIn {
delete(utxos, input.PreviousOutpoint)
}
}
return utxos
}

View File

@@ -1,75 +0,0 @@
// Copyright (c) 2013 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"fmt"
"strings"
)
// semanticAlphabet
const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
// These constants define the application version and follow the semantic
// versioning 2.0.0 spec (http://semver.org/).
const (
appMajor uint = 0
appMinor uint = 12
appPatch uint = 0
// appPreRelease MUST only contain characters from semanticAlphabet
// per the semantic versioning spec.
appPreRelease = "beta"
)
// appBuild is defined as a variable so it can be overridden during the build
// process with '-ldflags "-X main.appBuild foo' if needed. It MUST only
// contain characters from semanticAlphabet per the semantic versioning spec.
var appBuild string
// version returns the application version as a properly formed string per the
// semantic versioning 2.0.0 spec (http://semver.org/).
func version() string {
// Start with the major, minor, and patch versions.
version := fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch)
// Append pre-release version if there is one. The hyphen called for
// by the semantic versioning spec is automatically appended and should
// not be contained in the pre-release string. The pre-release version
// is not appended if it contains invalid characters.
preRelease := normalizeVerString(appPreRelease)
if preRelease != "" {
version = fmt.Sprintf("%s-%s", version, preRelease)
}
// Append build metadata if there is any. The plus called for
// by the semantic versioning spec is automatically appended and should
// not be contained in the build metadata string. The build metadata
// string is not appended if it contains invalid characters.
build := normalizeVerString(appBuild)
if build != "" {
version = fmt.Sprintf("%s+%s", version, build)
}
return version
}
// normalizeVerString returns the passed string stripped of all characters which
// are not valid according to the semantic versioning guidelines for pre-release
// version and build metadata strings. In particular they MUST only contain
// characters in semanticAlphabet.
func normalizeVerString(str string) string {
var result bytes.Buffer
for _, r := range str {
if strings.ContainsRune(semanticAlphabet, r) {
// Ignoring the error here since it can only fail if
// the the system is out of memory and there are much
// bigger issues at that point.
_, _ = result.WriteRune(r)
}
}
return result.String()
}

View File

@@ -1,116 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"github.com/kaspanet/kaspad/config"
"github.com/pkg/errors"
"os"
"path/filepath"
"strings"
flags "github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
)
const (
minCandidates = 1
maxCandidates = 20
defaultNumCandidates = 5
defaultDbType = "ffldb"
)
var (
btcdHomeDir = util.AppDataDir("btcd", false)
defaultDataDir = filepath.Join(btcdHomeDir, "data")
knownDbTypes = database.SupportedDrivers()
activeConfig *ConfigFlags
)
// ActiveConfig returns the active configuration struct
func ActiveConfig() *ConfigFlags {
return activeConfig
}
// ConfigFlags defines the configuration options for findcheckpoint.
//
// See loadConfig for details on the configuration load process.
type ConfigFlags struct {
DataDir string `short:"b" long:"datadir" description:"Location of the btcd data directory"`
DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"`
NumCandidates int `short:"n" long:"numcandidates" description:"Max num of checkpoint candidates to show {1-20}"`
UseGoOutput bool `short:"g" long:"gooutput" description:"Display the candidates using Go syntax that is ready to insert into the btcchain checkpoint list"`
config.NetworkFlags
}
// validDbType returns whether or not dbType is a supported database type.
func validDbType(dbType string) bool {
for _, knownType := range knownDbTypes {
if dbType == knownType {
return true
}
}
return false
}
// loadConfig initializes and parses the config using command line options.
func loadConfig() (*ConfigFlags, []string, error) {
// Default config.
activeConfig = &ConfigFlags{
DataDir: defaultDataDir,
DbType: defaultDbType,
NumCandidates: defaultNumCandidates,
}
// Parse command line options.
parser := flags.NewParser(&activeConfig, flags.Default)
remainingArgs, err := parser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
parser.WriteHelp(os.Stderr)
}
return nil, nil, err
}
funcName := "loadConfig"
err = activeConfig.ResolveNetwork(parser)
if err != nil {
return nil, nil, err
}
// Validate database type.
if !validDbType(activeConfig.DbType) {
str := "%s: The specified database type [%s] is invalid -- " +
"supported types %s"
err := errors.Errorf(str, funcName, activeConfig.DbType, strings.Join(knownDbTypes, ", "))
fmt.Fprintln(os.Stderr, err)
parser.WriteHelp(os.Stderr)
return nil, nil, err
}
// Append the network type to the data directory so it is "namespaced"
// per network. In addition to the block database, there are other
// pieces of data that are saved to disk such as address manager state.
// All data is specific to a network, so namespacing the data directory
// means each individual piece of serialized data does not have to
// worry about changing names per network and such.
activeConfig.DataDir = filepath.Join(activeConfig.DataDir, activeConfig.NetParams().Name)
// Validate the number of candidates.
if activeConfig.NumCandidates < minCandidates || activeConfig.NumCandidates > maxCandidates {
str := "%s: The specified number of candidates is out of " +
"range -- parsed [%d]"
err = errors.Errorf(str, funcName, activeConfig.NumCandidates)
fmt.Fprintln(os.Stderr, err)
parser.WriteHelp(os.Stderr)
return nil, nil, err
}
return activeConfig, remainingArgs, nil
}

View File

@@ -1,186 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"github.com/pkg/errors"
"os"
"path/filepath"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
const blockDbNamePrefix = "blocks"
var (
cfg *ConfigFlags
)
// loadBlockDB opens the block database and returns a handle to it.
func loadBlockDB() (database.DB, error) {
// The database name is based on the database type.
dbName := blockDbNamePrefix + "_" + cfg.DbType
dbPath := filepath.Join(cfg.DataDir, dbName)
fmt.Printf("Loading block database from '%s'\n", dbPath)
db, err := database.Open(cfg.DbType, dbPath, ActiveConfig().NetParams().Net)
if err != nil {
return nil, err
}
return db, nil
}
// findCandidates searches the DAG backwards for checkpoint candidates and
// returns a slice of found candidates, if any. It also stops searching for
// candidates at the last checkpoint that is already hard coded since there
// is no point in finding candidates before already existing checkpoints.
func findCandidates(dag *blockdag.BlockDAG, highestTipHash *daghash.Hash) ([]*dagconfig.Checkpoint, error) {
// Start with the selected tip.
block, err := dag.BlockByHash(highestTipHash)
if err != nil {
return nil, err
}
// Get the latest known checkpoint.
latestCheckpoint := dag.LatestCheckpoint()
if latestCheckpoint == nil {
// Set the latest checkpoint to the genesis block if there isn't
// already one.
latestCheckpoint = &dagconfig.Checkpoint{
Hash: ActiveConfig().NetParams().GenesisHash,
ChainHeight: 0,
}
}
// The latest known block must be at least the last known checkpoint
// plus required checkpoint confirmations.
checkpointConfirmations := uint64(blockdag.CheckpointConfirmations)
requiredChainHeight := latestCheckpoint.ChainHeight + checkpointConfirmations
if block.ChainHeight() < requiredChainHeight {
return nil, errors.Errorf("the block database is only at chain "+
"height %d which is less than the latest checkpoint chain height "+
"of %d plus required confirmations of %d",
block.ChainHeight(), latestCheckpoint.ChainHeight,
checkpointConfirmations)
}
// For the first checkpoint, the required height is any block after the
// genesis block, so long as the DAG has at least the required number
// of confirmations (which is enforced above).
if len(ActiveConfig().NetParams().Checkpoints) == 0 {
requiredChainHeight = 1
}
// Indeterminate progress setup.
numBlocksToTest := block.ChainHeight() - requiredChainHeight
progressInterval := (numBlocksToTest / 100) + 1 // min 1
fmt.Print("Searching for candidates")
defer fmt.Println()
// Loop backwards through the DAG to find checkpoint candidates.
candidates := make([]*dagconfig.Checkpoint, 0, cfg.NumCandidates)
numTested := uint64(0)
for len(candidates) < cfg.NumCandidates && block.ChainHeight() > requiredChainHeight {
// Display progress.
if numTested%progressInterval == 0 {
fmt.Print(".")
}
// Determine if this block is a checkpoint candidate.
isCandidate, err := dag.IsCheckpointCandidate(block)
if err != nil {
return nil, err
}
// All checks passed, so this node seems like a reasonable
// checkpoint candidate.
if isCandidate {
checkpoint := dagconfig.Checkpoint{
ChainHeight: block.ChainHeight(),
Hash: block.Hash(),
}
candidates = append(candidates, &checkpoint)
}
parentHashes := block.MsgBlock().Header.ParentHashes
selectedBlockHash := parentHashes[0]
block, err = dag.BlockByHash(selectedBlockHash)
if err != nil {
return nil, err
}
numTested++
}
return candidates, nil
}
// showCandidate display a checkpoint candidate using and output format
// determined by the configuration parameters. The Go syntax output
// uses the format the btcchain code expects for checkpoints added to the list.
func showCandidate(candidateNum int, checkpoint *dagconfig.Checkpoint) {
if cfg.UseGoOutput {
fmt.Printf("Candidate %d -- {%d, newShaHashFromStr(\"%s\")},\n",
candidateNum, checkpoint.ChainHeight, checkpoint.Hash)
return
}
fmt.Printf("Candidate %d -- ChainHeight: %d, Hash: %s\n", candidateNum,
checkpoint.ChainHeight, checkpoint.Hash)
}
func main() {
// Load configuration and parse command line.
tcfg, _, err := loadConfig()
if err != nil {
return
}
cfg = tcfg
// Load the block database.
db, err := loadBlockDB()
if err != nil {
fmt.Fprintln(os.Stderr, "failed to load database:", err)
return
}
defer db.Close()
// Setup chain. Ignore notifications since they aren't needed for this
// util.
dag, err := blockdag.New(&blockdag.Config{
DB: db,
DAGParams: ActiveConfig().NetParams(),
TimeSource: blockdag.NewMedianTime(),
})
if err != nil {
fmt.Fprintf(os.Stderr, "failed to initialize chain: %s\n", err)
return
}
// Get the latest block hash and height from the database and report
// status.
fmt.Printf("Block database loaded with block chain height %d\n", dag.ChainHeight())
// Find checkpoint candidates.
selectedTipHash := dag.SelectedTipHash()
candidates, err := findCandidates(dag, selectedTipHash)
if err != nil {
fmt.Fprintln(os.Stderr, "Unable to identify candidates:", err)
return
}
// No candidates.
if len(candidates) == 0 {
fmt.Println("No candidates found.")
return
}
// Show the candidates.
for i, checkpoint := range candidates {
showCandidate(i+1, checkpoint)
}
}

View File

@@ -1,35 +0,0 @@
package main
import (
"encoding/hex"
"fmt"
"os"
"github.com/kaspanet/kaspad/btcec"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/base58"
)
func main() {
activeNetParams := &dagconfig.DevNetParams
privateKey, err := btcec.NewPrivateKey(btcec.S256())
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to generate private key: %s", err)
os.Exit(1)
}
fmt.Printf("\nPrivate key (base-58): %s\n", base58.Encode(privateKey.Serialize()))
wif, err := util.NewWIF(privateKey, activeNetParams.PrivateKeyID, true)
if err != nil {
panic(fmt.Sprintf("error generating wif: %s", err))
}
fmt.Printf("\nPrivate key wif: %s\n", wif)
addr, err := util.NewAddressPubKeyHashFromPublicKey(privateKey.PubKey().SerializeCompressed(), activeNetParams.Prefix)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to generate p2pkh address: %s", err)
os.Exit(1)
}
fmt.Printf("Address: %s\n", addr)
hash160 := addr.Hash160()[:]
fmt.Printf("Hash160 of address (hex): %s\n\n", hex.EncodeToString(hash160))
}

View File

@@ -6,6 +6,7 @@ package main
import (
"fmt"
"github.com/pkg/errors"
"io/ioutil"
"os"
"path/filepath"
@@ -32,7 +33,8 @@ func main() {
parser := flags.NewParser(&cfg, flags.Default)
_, err := parser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp {
parser.WriteHelp(os.Stderr)
}
return

Some files were not shown because too many files have changed in this diff Show More