Compare commits

...

638 Commits

Author SHA1 Message Date
Ori Newman
53706c2f9f bignet debug 2020-09-02 15:43:00 +03:00
Ori Newman
917fa11706 bignet debug 2020-09-02 14:53:38 +03:00
Ori Newman
a32a9011c7 [NOD-1305] Close client connection on disconnect (#909) 2020-08-31 15:57:11 +03:00
stasatdaglabs
5da957f16e Update to version 0.6.8 2020-08-30 11:31:43 +03:00
Ori Newman
505d264603 [NOD-1322] Fix compilation on windows (#905) 2020-08-27 18:04:54 +03:00
Ori Newman
883361fea3 [NOD-1323] Always save new block reachability data (#906) 2020-08-27 18:03:50 +03:00
stasatdaglabs
13a6872a45 Update to version 0.6.7 2020-08-26 12:13:43 +03:00
Elichai Turkel
c82a951a24 [NOD-1316] Refactor TestGHOSTDAG to enable arbitrary DAGs (#899)
* Add VirtualBlueHashes to BlockDAG

* Refactor TestGHOSTDAG to read DAGs from json files

* Added a new DAG for the ghostdag test suite

* Pass BehaviorFlags to delayed blocks
2020-08-25 14:00:43 +03:00
Ori Newman
bbb9dfa4cd [NOD-1318] Check if relay block is known before requesting it (#900) 2020-08-25 09:18:03 +03:00
stasatdaglabs
86d51fa1cb [NOD-1307] Fix duplicate connections (#897)
* [NOD-1307] Lock peersMutex in methods that don't.

* [NOD-1307] Fix duplicate connections.

* [NOD-1307] Use RLock instead of Lock.

* [NOD-1307] Simplify IsEqual.
2020-08-24 16:11:32 +03:00
Ori Newman
8dd7b95423 [NOD-1308] Don't call wg.done() on handshake if flow failed (#896) 2020-08-24 14:03:58 +03:00
stasatdaglabs
b668d98942 [NOD-1095] Fix data races in gRPCConnection.stream. (#895) 2020-08-24 12:56:19 +03:00
stasatdaglabs
e9602cc777 [NOD-1304] Fix nil deference originating in HandleHandshake. (#894) 2020-08-24 11:45:33 +03:00
stasatdaglabs
5fd164bf66 [NOD-1095] RLock the dagLock in SelectedTipHeader. (#893) 2020-08-24 11:31:12 +03:00
Ori Newman
83e7c9e8e4 [NOD-1303] Fix concurent access to UTXO set from RPC (#892) 2020-08-23 18:54:03 +03:00
Ori Newman
a6b8eea369 [NOD-1301] Add MsgReject to protowire mapping (#891) 2020-08-23 18:29:41 +03:00
stasatdaglabs
15b545ee2b [NOD-592] Remove TODOs and XXXs from the codebase (#890)
* [NOD-592] Remove TODOs related to fake nonces.

* [NOD-592] Remove irrelevant TODOs from handleRescanBlocks and parseTxAcceptedVerboseNtfnParams.

* [NOD-592] Fix TODO in handleGetTxOut.

* [NOD-592] Remove irrelevant TODO from updateAddress.

* [NOD-592] Move StandardVerifyFlags to a separate file.

* [NOD-592] Remove TODOs in sign.go.

* [NOD-592] Remove TODO in scriptval_test.go.

* [NOD-592] Remove TODO in reachabilitystore.go.

* [NOD-592] Remove XXXs.

* [NOD-592] Fix a comment.

* [NOD-557] Move AddAddressByIP out of AddressManager since it's used only for tests..

* [NOD-557] Remove rescan blocks.

* [NOD-592] Fix handleGetTxOut.
2020-08-23 17:17:06 +03:00
stasatdaglabs
667b2d46e9 [NOD-557] Remove RegTest (#889)
* [NOD-557] Remove regTest network.

* [NOD-557] Remove remaining references to regTest.

* [NOD-557] Move newHashFromStr from params.go to params_test.go.

* [NOD-557] Rename test to network in register_test.go.

* [NOD-557] Replaced removed tests in TestDecodeAddressErrorConditions.
2020-08-23 15:38:27 +03:00
stasatdaglabs
53ab906ea8 [NOD-1279] Handle ruleErrors properly in processIBDBlock. (#887) 2020-08-23 13:42:21 +03:00
stasatdaglabs
5d20772f94 [NOD-1293] Fix kaspad sending 127.0.0.1 in its msgVersion (#886)
* [NOD-1293] Use addressManager's GetBestLocalAddress.

* [NOD-1293] Copy the initListeners function from the old p2p to the address manager.

* [NOD-1293] Remove debug logs.

* [NOD-1293] Remove unused import.

* [NOD-1293] Fix a comment.
2020-08-23 13:11:48 +03:00
stasatdaglabs
d4728bd9b6 Update to version 0.6.6 2020-08-23 11:22:13 +03:00
Ori Newman
4dbd64478c [NOD-1294] In TestTxRelay return after tx is found in the mempool (#885) 2020-08-20 19:05:53 +03:00
stasatdaglabs
7756baf9a9 [NOD-1290] Add blocklogger.LogBlock to IBD. (#884) 2020-08-20 12:29:11 +03:00
Ori Newman
c331293a2e [NOD-1289] Check if connection exists before establishing another one with the same address (#883) 2020-08-20 11:50:29 +03:00
Ori Newman
fcae491e6d [NOD-1286] Close router from netConnection.Disconnect (#881)
* [NOD-1286] Close router from netConnection.Disconnect

* [NOD-1286] Close router in grpc errors as well

* [NOD-1286] Fix typo

* [NOD-1286] Rename isConnected->isRouterClosed
2020-08-19 17:28:01 +03:00
stasatdaglabs
5a4cafe342 Update to version 0.6.5 2020-08-19 15:00:12 +03:00
Ori Newman
8dae378bd9 [NOD-1285] Fix deadlock on connection manager (#880) 2020-08-19 13:24:20 +03:00
stasatdaglabs
8dd409dc1c [NOD-1223] Rename executables package back to cmd. (#879) 2020-08-19 11:45:11 +03:00
Ori Newman
74110a2e49 [NOD-1282] Remove peer after disconnect (#878) 2020-08-19 11:10:10 +03:00
Ori Newman
ce876a7c44 Merge remote-tracking branch 'origin/v0.6.3-dev' into v0.6.4-dev 2020-08-18 19:03:52 +03:00
stasatdaglabs
d14809694f [NOD-1223] Reorganize directory structure (#874)
* [NOD-1223] Delete unused files/packages.

* [NOD-1223] Move signal and limits to the os package.

* [NOD-1223] Put database and dbaccess into the db package.

* [NOD-1223] Fold the logs package into the logger package.

* [NOD-1223] Rename domainmessage to appmessage.

* [NOD-1223] Rename to/from DomainMessage to AppMessage.

* [NOD-1223] Move appmessage to the app packge.

* [NOD-1223] Move protocol to the app packge.

* [NOD-1223] Move the network package to the infrastructure packge.

* [NOD-1223] Rename cmd to executables.

* [NOD-1223] Fix go.doc in the logger package.
2020-08-18 10:26:39 +03:00
stasatdaglabs
450ff81f86 [NOD-1275] Fix onNewBlock not being called from RPC submitBlock (#873)
* [NOD-1275] Fix onNewBlock not being called from from RPC submitBlock.

* [NOD-1275] Rename tx to txID.
2020-08-17 15:24:00 +03:00
Ori Newman
1f04f30ea7 [NOD-1273] Order parents in PrepareBlockForTest (#872) 2020-08-17 14:26:56 +03:00
Ori Newman
3e4e8d8b6b Merge remote-tracking branch 'origin/v0.6.2-dev' into v0.6.3-dev 2020-08-16 18:13:46 +03:00
Ori Newman
31c0399484 Update to version 0.6.4 2020-08-16 17:55:49 +03:00
Ori Newman
8cac582f6d Update to version 0.6.4 2020-08-16 17:30:09 +03:00
Ori Newman
f2a3ccd9ab [NOD-1271] Move version package to the top level (#871)
* [NOD-1271] Move version package to the top level

* [NOD-1271] Fix imports
2020-08-16 17:16:11 +03:00
Ori Newman
31b5cd8d28 Fix merge errors from v0.6.2-rc2 to v0.6.3-dev 2020-08-16 15:35:47 +03:00
Ori Newman
96bd1fa99b [NOD-1262] Add network name to MinimalNetAdapter handshake (#867) 2020-08-16 15:30:04 +03:00
Svarog
48d498e820 [NOD-1259] Do not panic on non-protocol errors from RPC (#863)
* [NOD-1259] All rule-errors should be protocol-errors

* [NOD-1259] Handle submitting of coinbase transactions properly

* Revert "[NOD-1259] All rule-errors should be protocol-errors"

This reverts commit 2fd30c1856.

* [NOD-1259] Don't panic on non-protocol errors in ProtocolManager.AddTransaction/AddBlock

* [NOD-1259] Implement subnetworkid.IsBuiltInOrNative and use where appropriate
2020-08-16 15:29:23 +03:00
Ori Newman
32c5cfeaf5 [NOD-1204] Add timestamp and message number to domain messages (#854) 2020-08-16 15:26:02 +03:00
stasatdaglabs
d55f4e8164 [NOD-1220] Add network string field to Version message (#852)
* [NOD-1220] Add network name to the version message.

* [NOD-1220] Ban peers from the wrong network.

* [NOD-1220] Add the network parameter to protowire.

* [NOD-1220] Add "kaspa-" to network names.
2020-08-16 15:25:25 +03:00
stasatdaglabs
1927e81202 [NOD-1129] Fix NewBlockTemplate creating incesous blocks (#870)
* [NOD-1129] Implement TestIncestousNewBlockTemplate.

* [NOD-1129] Add some debug logs to TestIncestousNewBlockTemplate.

* [NOD-1129] Fix merge errors.

* [NOD-1129] Narrow down on the failure.

* [NOD-1129] Fix bad initial value for child.interval in reachabilityTreeNode.addChild.

* [NOD-1129] Rewrite the test to be specific to reachability.
2020-08-16 13:14:44 +03:00
stasatdaglabs
8a4ece1101 [NOD-1223] Reorganize project (#868)
* [NOD-1223] Move all network stuff into a new network package.

* [NOD-1223] Delete the unused package testutil.

* [NOD-1223] Move infrastructure stuff into a new instrastructure package.

* [NOD-1223] Move domain stuff into a new domain package.
2020-08-13 17:27:25 +03:00
Elichai Turkel
0bf1052abf [NOD-1101] Hash data without serializing into a buffer first (#779)
* Add Hash Writers

* Add the hash writers to the tests

* Add the DoubleHash Writer to the benchmarks

* Remove buffers from hashing by using the Hash Writer

* Replace empty slice with nil in mempool test payload
2020-08-13 15:40:54 +03:00
Ori Newman
2af03c1ccf [NOD-1207] Send reject messages (#855)
* [NOD-1207] Send reject messages

* [NOD-1207] Empty outgoing route before disconnecting

* [NOD-1207] Renumber fields in RejectMessage

* [NOD-1207] Use more accurate log messages

* [NOD-1207] Call registerRejectsFlow

* [NOD-1207] Panic if outgoingRoute.Enqueue returns unexpected error

* [NOD-1207] Fix comment and rename variables

* [NOD-1207] Fix comment

* [NOD-1207] add baseMessage to MsgReject

* [NOD-1207] Fix comments and add block hash to error if it's rejected
2020-08-13 15:32:41 +03:00
Ori Newman
a2aa58c8a4 [NOD-1201] Panic if callbacks are not set (#856)
* [NOD-1201] Panic if necessary callback are not set in gRPCConnection and gRPCServer

* [NOD-1201] Fix comment and change return order

* [NOD-1201] Return nil instead of error on gRPCServer.Start

* [NOD-1201] Fix typo
2020-08-13 15:21:52 +03:00
oudeis
7e74fc0b2b [NOD-1248] netadapter unit test (#865)
* [NOD-1246/NOD-1248] Add unit test for NetAdapter

* [NOD-1246/NOD-1248] Do not ignore OK

* [NOD-1248] Lint code

- Move `t *testing.T` to be first parameter in test-helper function
- Rename `getRouterInitializer` to `routerInitializerForTest`
- Make test data constants

Co-authored-by: Yaroslav Reshetnyk <yaroslav.r@it-dimension.com>
2020-08-13 15:07:20 +03:00
stasatdaglabs
0653e59e16 [NOD-1190] Refactor process.go (#858)
* [NOD-1190] Move non-processBlock stuff out of process.go.

* [NOD-1190] Move everything out of accept.go.

* [NOD-1190] Move all processBlock functions to process.go.

* [NOD-1190] Move orphan stuff to orphan.go.

* [NOD-1190] Remove thresholdstate stuff.

* [NOD-1190] Move isSynced to sync_rate.go.

* [NOD-1190] Move delayed block stuff to delayed_blocks.go.

* [NOD-1190] Rename orphans.go to orphaned_blocks.go.

* [NOD-1190] Move non-BlockDAG structs out of dag.go.

* [NOD-1190] Remove unused fields.

* [NOD-1190] Fixup BlockDAG.New a bit.

* [NOD-1190] Move sequence lock stuff to sequence_lock.go

* [NOD-1190] Move some multiset stuff out of dag.go.

* [NOD-1190] Move finality stuff out of dag.go.

* [NOD-1190] Move blocklocator stuff out of dag.go.

* [NOD-1190] Move confirmation stuff out of dag.go.

* [NOD-1190] Move utxo and selected parent chain stuff out of dag.go.

* [NOD-1190] Move BlockDAG lock functions to the beginning of dag.go.

* [NOD-1190] Move verifyAndBuildUTXO out of process.go.

* [NOD-1190] Extract handleProcessBlockError to a function.

* [NOD-1190] Remove daglock unlock in notifyBlockAccepted.

* [NOD-1190] Extract checkDuplicateBlock to a method.

* [NOD-1190] Fix merge errors.

* [NOD-1190] Remove unused parameter from CalcSequenceLock.

* [NOD-1190] Extract processBlock contents into functions.

* [NOD-1190] Fix parent delayed blocks not marking their children as delayed

* [NOD-1190] Fix TestProcessDelayedBlocks.

* [NOD-1190] Extract stuff in maybeAcceptBlock to separate functions.

* [NOD-1190] Rename handleProcessBlockError to handleConnectBlockError.

* [NOD-1190] Remove some comments.

* [NOD-1190] Use lowercase in error messages.

* [NOD-1190] Rename createNewBlockNode to createBlockNodeFromBlock.

* [NOD-1190] Rename orphaned_blocks.go to orpan_blocks.go.

* [NOD-1190] Extract validateUTXOCommitment to a separate function.

* [NOD-1190] Fix a bug in validateUTXOCommitment.

* [NOD-1190] Rename checkBlockTxsFinalized to checkBlockTransactionsFinalized.

* [NOD-1190] Add a comment over createBlockNodeFromBlock.

* [NOD-1190] Fold validateAllTxsFinalized into checkBlockTransactionsFinalized.

* [NOD-1190] Return parents from checkBlockParents.

* [NOD-1190] Remove the processBlock prefix from the functions that had it.

* [NOD-1190] Begin extracting functions out of checkTransactionSanity.

* [NOD-1190] Finish extracting functions out of checkTransactionSanity.

* [NOD-1190] Remove an unused parameter.

* [NOD-1190] Fix merge errors.

* [NOD-1190] Added an explanation as to why we change the nonce in TestProcessDelayedBlocks.

* [NOD-1190] Fix a comment.

* [NOD-1190] Fix a comment.

* [NOD-1190] Fix a typo.

* [NOD-1190] Replace checkBlockParents with handleLookupParentNodesError.
2020-08-13 13:33:43 +03:00
oudeis
32463ce906 [NOD-1247] Add check for routerInitializer presence (#864)
Co-authored-by: Yaroslav Reshetnyk <yaroslav.r@it-dimension.com>
2020-08-13 12:04:43 +03:00
stasatdaglabs
23a3594c18 [NOD-1233] Go over all TODO(libp2p)s and either fix them or create tickets for them (#860)
* [NOD-1233] Remove HandleNewBlockOld.

* [NOD-1233] Make ErrRouteClosed not a protocol error.

* [NOD-1233] Fix ambiguous comments.

* [NOD-1233] Remove a no-longer-relevant comment.

* [NOD-1233] Remove some of the TODOs.

* [NOD-1233] Replace fakeSourceAddress with a real sourceAddress.

* [NOD-1233] Remove a no-longer-relevant TODO.

* [NOD-1233] Remove TODO from handleGetNetTotals.

* [NOD-1233] Remove a no-longer-relevant TODO.

* [NOD-1233] Disconnect if connected to wrong partial/full type.

* [NOD-1233] Get rid of mempool tags.

* [NOD-1233] Remove TODOs.

* [NOD-1233] Simplify a test.

* [NOD-1190] Remove getNetTotals.
2020-08-13 09:41:02 +03:00
Ori Newman
ffe153efa7 [NOD-1262] Add network name to MinimalNetAdapter handshake (#867) 2020-08-12 17:55:58 +03:00
Ori Newman
ca3172dad0 [NOD-1239] Delete app.WaitForShutdown() (#866) 2020-08-12 16:38:52 +03:00
Ori Newman
22dc3f998f [NOD-1256] Optimize PrepareBlockForTest (#861)
* [NOD-1256] Optimize PrepareBlockForTest

* [NOD-1256] Remove redundant comment
2020-08-12 16:25:42 +03:00
Svarog
91f4ed9825 [NOD-1259] Do not panic on non-protocol errors from RPC (#863)
* [NOD-1259] All rule-errors should be protocol-errors

* [NOD-1259] Handle submitting of coinbase transactions properly

* Revert "[NOD-1259] All rule-errors should be protocol-errors"

This reverts commit 2fd30c1856.

* [NOD-1259] Don't panic on non-protocol errors in ProtocolManager.AddTransaction/AddBlock

* [NOD-1259] Implement subnetworkid.IsBuiltInOrNative and use where appropriate
2020-08-12 12:29:58 +03:00
Ori Newman
aa9556aa59 [NOD-1257] Disable difficulty adjustment on simnet (#862)
* [NOD-1257] Disable difficulty adjustment on simnet

* [NOD-1257] Explictly set DisableDifficultyAdjustment everywhere
2020-08-12 12:24:37 +03:00
stasatdaglabs
91f0fe5740 [NOD-1238] Fix acceptance index never being initialized. (#859) 2020-08-11 16:53:34 +03:00
Ori Newman
b0fecc9f87 [NOD-1204] Add timestamp and message number to domain messages (#854) 2020-08-10 12:55:24 +03:00
stasatdaglabs
53cccd405f [NOD-1220] Add network string field to Version message (#852)
* [NOD-1220] Add network name to the version message.

* [NOD-1220] Ban peers from the wrong network.

* [NOD-1220] Add the network parameter to protowire.

* [NOD-1220] Add "kaspa-" to network names.
2020-08-09 18:11:13 +03:00
stasatdaglabs
5b84184921 [NOD-1221] Explicitly add a maximum message size in gRPC (#851)
* [NOD-1221] Explicitly add a maximum message size in gRPC.

* [NOD-1221] Limit sent message size and print a debug log on start.
2020-08-09 17:56:26 +03:00
Yuval Shaul
af1df425a2 update to version v0.6.2 2020-08-09 15:16:21 +03:00
Ori Newman
8e170cf327 [NOD-1225] Rename wire to domainmessage and get rid of InvType (#853)
* [NOD-1225] Rename wire to domainmessage

* [NOD-1225] Get rid of references to package wire in the code, and get rid of InvType
2020-08-09 12:39:15 +03:00
stasatdaglabs
b55cfee8c8 [NOD-1229] Fix node crashing if AntiPastHashesBetween lowHigh or highHash are not found in the DAG (#849)
* [NOD-1229] Fix node crashing if AntiPastHashesBetween lowHigh or highHash are not found in the DAG

* [NOD-1229] Rename InvalidParameterError to ErrInvalidParameter.

* [NOD-1229] Lowercasify errors.
2020-08-09 09:36:29 +03:00
stasatdaglabs
420c3d4258 [NOD-1222] Turn on gzip in gRPC. (#850) 2020-08-06 17:28:40 +03:00
Mike Zak
b92943a98c Update to version v0.6.1 2020-08-06 15:16:05 +03:00
Mike Zak
e1318aa326 [NOD-1208] Labels should be lower case 2020-08-06 11:34:22 +03:00
Mike Zak
2bd4a71913 [NOD-1208] Continue the correct loop 2020-08-06 11:11:44 +03:00
Mike Zak
5b206f4c9d [NOD-1208] Use lower case for errors + omit hard-coded numbers 2020-08-06 10:17:20 +03:00
Mike Zak
3f969a2921 [NOD-1208] Add comment to handlePingPong to explain it's one-sided. 2020-08-06 10:10:31 +03:00
Mike Zak
90be14fd57 [NOD-1208] Added comment explaining why Version.Address is optional 2020-08-06 10:00:25 +03:00
Mike Zak
1a5d9fc65c [NOD-1208] Renamed NetAdapterMock to standalone.MinimalNetAdapter 2020-08-05 15:59:23 +03:00
Mike Zak
ec03a094e5 [NOD-1208] Use ID netAdapter generates in netAdapterMock handshake 2020-08-05 15:50:46 +03:00
Mike Zak
9d60bb1ee7 [NOD-1208] Use netConnection.Disconnect in Routes.Disconnect 2020-08-05 15:46:32 +03:00
Mike Zak
cd10de2dce [NOD-1208] Add NetAdapterMock 2020-08-05 10:40:33 +03:00
Mike Zak
658fb08c02 [NOD-1208] Do not ban if DisableBanning was turned on 2020-08-05 10:40:33 +03:00
Mike Zak
3b40488877 [NOD-1208] Allow sending Version message without an address 2020-08-05 10:40:33 +03:00
Svarog
d3d0ad0cf3 [NOD-1226] Properly handle ErrIsClosed during handshake (#843)
* [NOD-1226] Consider ErrIsClosed as protocol error during handshake

* Revert "[NOD-1226] Consider ErrIsClosed as protocol error during handshake"

This reverts commit 74bb07b6cd.

* [NOD-1226] handle errors separately for handshake

* [NOD-1226] Wrap ErrRouteClosed as protocol error in handshake

* [NOD-1226] return if ErrRouteClosed

* [NOD-1226] Use atomic

* [NOD-1226] Don't wrap with protocol error

* [NOD-1226] Fix comment

* [NOD-1226] Fix comment
2020-08-05 10:37:19 +03:00
Svarog
473cc37a75 [NOD-1224] Fix duplicate connections and duplicate blocks bugs (#842)
* [NOD-1224] Make block already existing a ruleError

* [NOD-1224] Remove block from pendingBlocks list only after it was processed

* [NOD-1224] AddToPeers should have a Write Lock, not Read Lock

* [NOD-1224] Check for unrequested before processing
2020-08-04 11:07:21 +03:00
Svarog
966cba4a4e [NOD-1218] KaspadMessage_Pong.toWireMessage should return MsgPong, not MsgPing (#841) 2020-08-03 18:18:06 +03:00
Svarog
da90755530 [NOD-1215] Added integration test for address exchange (#839)
* [NOD-1215] Added integration test for address exchange

* [NOD-1215] Fix error message
2020-08-03 10:34:21 +03:00
Svarog
fa58623815 [NOD-1214] Added test for 64 incoming connections to single node (#838)
* [NOD-1214] Added test for 64 incoming connections to single node

* [NOD-1214] Expand comments, and a small rename

* [NOD-1214] Make sure no bully reports blockAdded twice
2020-08-03 10:30:38 +03:00
Svarog
26af4da507 [NOD-1217] MethodUsageText should take a write lock, to protect methodToInfo (#840) 2020-08-03 10:29:04 +03:00
Svarog
b527470153 [NOD-1211] Transaction relay integration test + fixes to flow (#836)
* [NOD-1162] Separate kaspad to it's own package, so that I can use it out of integration test

* [NOD-1162] Begin integration tests

* [NOD-1162] [FIX] Assign cfg to RPCServer

* [NOD-1162] Basic integration test ready

* [NOD-1162] Wait for connection for real

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] Make connect something that can be invoked in middle of test

* [NOD-1162] Complete first integration test

* [NOD-1162] Undo refactor error

* [NOD-1162] Rename Kaspad to App

* [NOD-1162] Convert checking connection to polling

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] Add comment

* [NOD-1162] Added support for 3 nodes and clients in integration tests

* [NOD-1162] Add third node to integration test

* [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock

* [NOD-1192] Broadcast transactions only if there's more then 0

* [NOD-1162] Removed double waitTillNextIteration

* [NOD-1192] Rename: broadcastTransactions -> broadcastTransactionsAfterBlockAdded

* [NOD-1162] Call NotifyBlocks on client3 as well

* [NOD-1162] ErrTimeout and ErrRouteClosed should be ProtocolErrors

* [NOD-1162] Added comment and removed redundant type PeerAddedCallback

* [NOD-1162] Revert overly eager rename

* [NOD-1162] Move DisalbeTLS to common config + minimize call for ioutil.TempDir()

* [NOD-1162] Add some clarifications in code

* [NOD-1193] Skip closed connections in NetAdapter.Broadcast

* [NOD-1193] Make sure to protect connectionsToRouters from concurrent access

* [NOD-1162] Add _test to all files in integration package

* [NOD-1162] Introduced appHarness to better encapsulate a single node

* [NOD-1162] Removed onChainChanged handler

* [NOD-1162] Remove redundant closure

* [NOD-1162] Correctly mark integration_test config as Simnet

* [NOD-1162] Rename app.ID -> app.P2PNodeID

* [NOD-1162] Move TestIntegrationBasicSync to basic_sync_test.go

* [NOD-1210] Made it possible to setup any number of harnesses needed

* [NOD-1210] Rename appHarness1/2 to incoming/outgoing in connect function

* [NOD-1210] Add the 117-incoming-connections integration test

* [NOD-1210] Delete 117-incoming-connections test because it opens too much files

* [NOD-1210] Added function to notify of blocks conveniently

* [NOD-1210] Added function to mine a block from-A-to-Z

* [NOD-1210] Added IBD integration test

* [NOD-1210] Finish test for IBD and fix bug where
requestSelectedTipsIfRequired ran in handshake's goroutine

* [NOD-1210] Set log level to debug

* [NOD-1211] Add test for transaction relay

* [NOD-1211] Compare fix incorrect comaprison in KaspadMessage_RequestTransactions.fromWireMessage

* [NOD-1211] Return ok instead of err from FetchTxDesc and FetchTransaction

* [NOD-1211] Added MsgTransactionNotFound type

* [NOD-1211] Added HandlRequestedTransactions flow

* [NOD-1211] Wait for blocks to be accepted before moving forward

* [NOD-1211] Rename CmdNotFound to CmdTransactionNotFound

* [NOD-1211] Rename: requestAndSolveTemplate -> mineNextBlock

* [NOD-1211] Renamed incoming/outgoing to appHarness1/appHarness2 in isConnected

* [NOD-1211] Move check of Hash == nil to outside wireHashToProto

* [NOD-1211] Instantiate payloadHash before *x
2020-08-02 16:11:16 +03:00
Ori Newman
e70561141d [NOD-1212] Request IBD blocks in batches (#835)
* [NOD-1212] Request IBD blocks in batches

* [NOD-1212] Fix condition

* [NOD-1212] Remove redundant functions

* [NOD-1212] gofmt

* [NOD-1212] Fix condition

* [NOD-1212] Fix off by one error and add messages to messages.proto

* [NOD-1212] Refactor downloadBlocks

* [NOD-1212] Fix comment

* [NOD-1212] Return DefaultTimeout to original value
2020-08-02 13:46:07 +03:00
Ori Newman
20b547984e [NOD-1191] Fix erroneous condition (#837)
* [NOD-1191] Convert wire protocol to 100% protobuf

* [NOD-1191] Simplify wire interface and remove redundant messages

* [NOD-1191] Map all proto to wire conversions

* [NOD-1203] Create netadapter outside of protocol manager

* [NOD-1191] Fix nil errors

* [NOD-1191] Fix comments

* [NOD-1191] Add converter interface

* [NOD-1191] Add missing GetBlockLocator message

* [NOD-1191] Change message names that starts with 'get' to 'request'

* [NOD-1191] Change message commands values

* [NOD-1191] Remove redundant methods

* [NOD-1191] Rename message constructors

* [NOD-1191] Change message commands to use iota

* [NOD-1191] Add missing outputs to protobuf conversion

* [NOD-1191] Make block header a required field

* [NOD-1191] Rename variables

* [NOD-1212] Fix test names

* [NOD-1191] Rename flow names

* [NOD-1191] Fix infinite loop

* [NOD-1191] Fix wrong condition
2020-08-02 11:10:56 +03:00
Svarog
16a658a5be [NOD-1210] Add integration test for IBD and fix bug where requestSelectedTipsIfRequired ran in handshake's goroutine (#834)
* [NOD-1162] Separate kaspad to it's own package, so that I can use it out of integration test

* [NOD-1162] Begin integration tests

* [NOD-1162] [FIX] Assign cfg to RPCServer

* [NOD-1162] Basic integration test ready

* [NOD-1162] Wait for connection for real

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] Make connect something that can be invoked in middle of test

* [NOD-1162] Complete first integration test

* [NOD-1162] Undo refactor error

* [NOD-1162] Rename Kaspad to App

* [NOD-1162] Convert checking connection to polling

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] Add comment

* [NOD-1162] Added support for 3 nodes and clients in integration tests

* [NOD-1162] Add third node to integration test

* [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock

* [NOD-1192] Broadcast transactions only if there's more then 0

* [NOD-1162] Removed double waitTillNextIteration

* [NOD-1192] Rename: broadcastTransactions -> broadcastTransactionsAfterBlockAdded

* [NOD-1162] Call NotifyBlocks on client3 as well

* [NOD-1162] ErrTimeout and ErrRouteClosed should be ProtocolErrors

* [NOD-1162] Added comment and removed redundant type PeerAddedCallback

* [NOD-1162] Revert overly eager rename

* [NOD-1162] Move DisalbeTLS to common config + minimize call for ioutil.TempDir()

* [NOD-1162] Add some clarifications in code

* [NOD-1193] Skip closed connections in NetAdapter.Broadcast

* [NOD-1193] Make sure to protect connectionsToRouters from concurrent access

* [NOD-1162] Add _test to all files in integration package

* [NOD-1162] Introduced appHarness to better encapsulate a single node

* [NOD-1162] Removed onChainChanged handler

* [NOD-1162] Remove redundant closure

* [NOD-1162] Correctly mark integration_test config as Simnet

* [NOD-1162] Rename app.ID -> app.P2PNodeID

* [NOD-1162] Move TestIntegrationBasicSync to basic_sync_test.go

* [NOD-1210] Made it possible to setup any number of harnesses needed

* [NOD-1210] Rename appHarness1/2 to incoming/outgoing in connect function

* [NOD-1210] Add the 117-incoming-connections integration test

* [NOD-1210] Delete 117-incoming-connections test because it opens too much files

* [NOD-1210] Added function to notify of blocks conveniently

* [NOD-1210] Added function to mine a block from-A-to-Z

* [NOD-1210] Added IBD integration test

* [NOD-1210] Finish test for IBD and fix bug where
requestSelectedTipsIfRequired ran in handshake's goroutine

* [NOD-1210] Set log level to debug

* [NOD-1210] A bunch of renamings
2020-08-02 09:42:27 +03:00
Ori Newman
42e50e6dc2 [NOD-1191] Convert wire protocol to proto (#831)
* [NOD-1191] Convert wire protocol to 100% protobuf

* [NOD-1191] Simplify wire interface and remove redundant messages

* [NOD-1191] Map all proto to wire conversions

* [NOD-1203] Create netadapter outside of protocol manager

* [NOD-1191] Fix nil errors

* [NOD-1191] Fix comments

* [NOD-1191] Add converter interface

* [NOD-1191] Add missing GetBlockLocator message

* [NOD-1191] Change message names that starts with 'get' to 'request'

* [NOD-1191] Change message commands values

* [NOD-1191] Remove redundant methods

* [NOD-1191] Rename message constructors

* [NOD-1191] Change message commands to use iota

* [NOD-1191] Add missing outputs to protobuf conversion

* [NOD-1191] Make block header a required field

* [NOD-1191] Rename variables

* [NOD-1212] Fix test names

* [NOD-1191] Rename flow names

* [NOD-1191] Fix infinite loop
2020-07-30 18:19:55 +03:00
Svarog
3d942ce355 [NOD-1162] Integration test (#822)
* [NOD-1162] Separate kaspad to it's own package, so that I can use it out of integration test

* [NOD-1162] Begin integration tests

* [NOD-1162] [FIX] Assign cfg to RPCServer

* [NOD-1162] Basic integration test ready

* [NOD-1162] Wait for connection for real

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] Make connect something that can be invoked in middle of test

* [NOD-1162] Complete first integration test

* [NOD-1162] Undo refactor error

* [NOD-1162] Rename Kaspad to App

* [NOD-1162] Convert checking connection to polling

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] Add comment

* [NOD-1162] Added support for 3 nodes and clients in integration tests

* [NOD-1162] Add third node to integration test

* [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock

* [NOD-1192] Broadcast transactions only if there's more then 0

* [NOD-1162] Removed double waitTillNextIteration

* [NOD-1192] Rename: broadcastTransactions -> broadcastTransactionsAfterBlockAdded

* [NOD-1162] Call NotifyBlocks on client3 as well

* [NOD-1162] ErrTimeout and ErrRouteClosed should be ProtocolErrors

* [NOD-1162] Added comment and removed redundant type PeerAddedCallback

* [NOD-1162] Revert overly eager rename

* [NOD-1162] Move DisalbeTLS to common config + minimize call for ioutil.TempDir()

* [NOD-1162] Add some clarifications in code

* [NOD-1193] Skip closed connections in NetAdapter.Broadcast

* [NOD-1193] Make sure to protect connectionsToRouters from concurrent access

* [NOD-1162] Add _test to all files in integration package

* [NOD-1162] Introduced appHarness to better encapsulate a single node

* [NOD-1162] Removed onChainChanged handler

* [NOD-1162] Remove redundant closure

* [NOD-1162] Correctly mark integration_test config as Simnet

* [NOD-1162] Rename app.ID -> app.P2PNodeID
2020-07-30 10:47:56 +03:00
Ori Newman
94f617b06a [NOD-1206] Call peer.StartIBD in new goroutine (#833) 2020-07-29 12:03:59 +03:00
Svarog
211c4d05e8 [NOD-1120] Separate registration of routes, and the starting of flows (#832)
* [NOD-1120] Separate flow registration and running

* [NOD-1120] Extract executeFunc to separate function

* [NOD-1120] Move the registration of flows out of goroutine

* [NOD-1120] Return after handleError

* [NOD-1120] Rename: addXXXFlow -> registerXXXFlow

* Rename: stop -> errChan

* [NOD-1120] Fix name of goroutine
2020-07-29 12:02:04 +03:00
Ori Newman
a9f3bdf4ab [NOD-1203] Create netadapter outside of protocol manager (#830) 2020-07-29 10:17:13 +03:00
Svarog
2303aecab4 [NOD-1198] Make router a property of netConnection, and remove map from connection to router in netAdapter (#829)
* [NOD-1198] Make router a property of netConnection, and remove map from connection to router in netAdapter

* [NOD-1198] Moved all router logic from netAdapter to netConnection

* [NOD-1198] Move disconnect to NetConnection

* [NOD-1198] Unexport netConnection.start

* [NOD-1198] Remove error from Disconnect functions

* [NOD-1198] Make sure OnDisconnectedHandler doesn't run when it shouldn't
2020-07-28 11:27:48 +03:00
Svarog
7655841e9f [NOD-1194] Make error handling more centralized, and ignore ErrRouteClosed (#828)
* [NOD-1194] Make error handling more centralized, and ignore ErrRouteClosed

* [NOD-1194] Ignore ErrRouteClosed in connection_loops as well

* [NOD-1194] Enhance comment

* [NOD-1194] Return after any HandleError

* [NOD-1194] Rephrased comment
2020-07-27 15:07:28 +03:00
stasatdaglabs
c4bbcf9de6 [NOD-1181] Mark banned peers in address manager and persist bans to disk (#826)
* [NOD-1079] Fix block rejects over "Already have block" (#783)

* [NOD-1079] Return regular error instead of ruleError on already-have-block in ProcessBlock.

* [NOD-1079] Fix bad implementation of IsSelectedTipKnown.

* [NOD-1079] In shouldQueryPeerSelectedTips use selected DAG tip timestamp instead of past median time.

* [NOD-1079] Remove redundant (and possibly buggy) clearing of sm.requestedBlocks.

* [NOD-684] change simnet block rate to block per ms (#782)

* [NOD-684] Get rid of dag.targetTimePerBlock and use finality duration in dag params

* [NOD-684] Fix regtest genesis block

* [NOD-684] Set simnet's TargetTimePerBlock to 1ms

* [NOD-684] Shorten simnet finality duration

* [NOD-684] Change isDAGCurrentMaxDiff to be written as number of blocks

* [NOD-684] Fix NextBlockMinimumTime to be add one millisecond after past median time

* [NOD-1004] Make AddrManager.getAddress use only 1 loop to check all address chances and pick one of them (#741)

* [NOD-1004] Remove code duplication in Good().

* [NOD-1004] Remove some more code duplication in Good().

* [NOD-1004] Remove some more code duplication in Good().

* [NOD-1004] Remove code duplication in GetAddress().

* [NOD-1004] Remove code duplication in updateAddress.

* [NOD-1004] Remove some more code duplication in updateAddress.

* [NOD-1004] Remove redundant check in expireNew.

* [NOD-1004] Remove superfluous existence check from updateAddress.

* [NOD-1004] Make triedBucket use a slice instead of a list.

* [NOD-1004] Remove code duplication in getAddress.

* [NOD-1004] Remove infinite loops out of getAddress.

* [NOD-1004] Made impossible branch panic.

* [NOD-1004] Remove a mystery comment.

* [NOD-1004] Remove an unnecessary sort.

* [NOD-1004] Make AddressKey a type alias.

* [NOD-1004] Added comment for AddressKey

* [NOD-1004] Fix merge errors.

* [NOD-1004] Fix merge errors.

* [NOD-1004] Do some renaming.

* [NOD-1004] Do some more renaming.

* [NOD-1004] Rename AddrManager to AddressManager.

* [NOD-1004] Rename AddrManager to AddressManager.

* [NOD-1004] Do some more renaming.

* [NOD-1004] Rename bucket to addressBucketArray.

* [NOD-1004] Fix a comment.

* [NOD-1004] Rename na to netAddress.

* [NOD-1004] Bring back an existence check.

* [NOD-1004] Fix an error message.

* [NOD-1004] Fix a comment.

* [NOD-1004] Use a boolean instead of -1.

* [NOD-1004] Use a boolean instead of -1 in another place.

Co-authored-by: Mike Zak <feanorr@gmail.com>

* Fix merge errors.

* [NOD-1181] Move isBanned logic into addressManager.

* [NOD-1181] Persist bans to disk.

* [NOD-1181] Add comments.

* [NOD-1181] Add an additional exit condition to the connection loop.

* [NOD-1181] Add a TODO.

* [NOD-1181] Wrap not-found errors in addressManager.

* [NOD-1181] Fix a comment.

* [NOD-1181] Rename banned to isBanned.

* [NOD-1181] Fix bad error handling in routerInitializer.

* [NOD-1181] Remove a TODO.

Co-authored-by: Ori Newman <orinewman1@gmail.com>
Co-authored-by: Mike Zak <feanorr@gmail.com>
2020-07-27 14:45:18 +03:00
stasatdaglabs
0cec1ce23e [NOD-1189] Exit early if we've filtered out all the hashes in block relay. (#827) 2020-07-27 12:23:43 +03:00
Svarog
089fe828aa [NOD-1193] Skip closed connections in NetAdapter.Broadcast (#825)
* [NOD-1193] Skip closed connections in NetAdapter.Broadcast

* [NOD-1193] Make sure to protect connectionsToRouters from concurrent access
2020-07-27 10:32:07 +03:00
stasatdaglabs
24a09fb3df Merge 0.6.0-dev into 0.6.0-libp2p (#824)
* [NOD-1079] Fix block rejects over "Already have block" (#783)

* [NOD-1079] Return regular error instead of ruleError on already-have-block in ProcessBlock.

* [NOD-1079] Fix bad implementation of IsSelectedTipKnown.

* [NOD-1079] In shouldQueryPeerSelectedTips use selected DAG tip timestamp instead of past median time.

* [NOD-1079] Remove redundant (and possibly buggy) clearing of sm.requestedBlocks.

* [NOD-684] change simnet block rate to block per ms (#782)

* [NOD-684] Get rid of dag.targetTimePerBlock and use finality duration in dag params

* [NOD-684] Fix regtest genesis block

* [NOD-684] Set simnet's TargetTimePerBlock to 1ms

* [NOD-684] Shorten simnet finality duration

* [NOD-684] Change isDAGCurrentMaxDiff to be written as number of blocks

* [NOD-684] Fix NextBlockMinimumTime to be add one millisecond after past median time

* [NOD-1004] Make AddrManager.getAddress use only 1 loop to check all address chances and pick one of them (#741)

* [NOD-1004] Remove code duplication in Good().

* [NOD-1004] Remove some more code duplication in Good().

* [NOD-1004] Remove some more code duplication in Good().

* [NOD-1004] Remove code duplication in GetAddress().

* [NOD-1004] Remove code duplication in updateAddress.

* [NOD-1004] Remove some more code duplication in updateAddress.

* [NOD-1004] Remove redundant check in expireNew.

* [NOD-1004] Remove superfluous existence check from updateAddress.

* [NOD-1004] Make triedBucket use a slice instead of a list.

* [NOD-1004] Remove code duplication in getAddress.

* [NOD-1004] Remove infinite loops out of getAddress.

* [NOD-1004] Made impossible branch panic.

* [NOD-1004] Remove a mystery comment.

* [NOD-1004] Remove an unnecessary sort.

* [NOD-1004] Make AddressKey a type alias.

* [NOD-1004] Added comment for AddressKey

* [NOD-1004] Fix merge errors.

* [NOD-1004] Fix merge errors.

* [NOD-1004] Do some renaming.

* [NOD-1004] Do some more renaming.

* [NOD-1004] Rename AddrManager to AddressManager.

* [NOD-1004] Rename AddrManager to AddressManager.

* [NOD-1004] Do some more renaming.

* [NOD-1004] Rename bucket to addressBucketArray.

* [NOD-1004] Fix a comment.

* [NOD-1004] Rename na to netAddress.

* [NOD-1004] Bring back an existence check.

* [NOD-1004] Fix an error message.

* [NOD-1004] Fix a comment.

* [NOD-1004] Use a boolean instead of -1.

* [NOD-1004] Use a boolean instead of -1 in another place.

Co-authored-by: Mike Zak <feanorr@gmail.com>

* Fix merge errors.

Co-authored-by: Ori Newman <orinewman1@gmail.com>
Co-authored-by: Mike Zak <feanorr@gmail.com>
2020-07-26 15:23:18 +03:00
Svarog
b2901454d6 [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock (#823)
* [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock

* [NOD-1192] Broadcast transactions only if there's more then 0

* [NOD-1192] Rename: broadcastTransactions -> broadcastTransactionsAfterBlockAdded
2020-07-26 14:42:59 +03:00
stasatdaglabs
6cf589dc9b [NOD-1145] Normalize panics in flows (#819)
* [NOD-1145] Remove panics from regular flows.

* [NOD-1145] Remove panics from the handshake flow.

* [NOD-1045] Fix merge errors.

* [NOD-1045] Remove a comment.

* [NOD-1045] Handle errors properly in AddTransaction and AddBlock.

* [NOD-1045] Remove a comment.

* [NOD-1045] Wrap ErrPeerWithSameIDExists with ProtocolError.

* [NOD-1145] Add TODOs.
2020-07-26 13:46:59 +03:00
stasatdaglabs
683ceda3a7 [NOD-1152] Move banning from netAdapter to connectionManager (#820)
* [NOD-1152] Move banning out of netadapter.

* [NOD-1152] Add a comment.

* [NOD-1152] Fix a comment.
2020-07-26 13:42:48 +03:00
Svarog
6a18b56587 [NOD-1162] Fixes from integration test (#821)
* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] Add comment
2020-07-26 11:44:16 +03:00
Ori Newman
2c9e5be816 [NOD-1177] In handleGetConnectedPeerInfo, populate the missing result fields (#818) 2020-07-23 13:35:30 +03:00
stasatdaglabs
5d5a0ef335 [NOD-1153] Remove redundant maps from NetAdapter (#817)
* [NOD-1153] Remove redundant maps from NetAdapter.

* [NOD-1153] Fix a comment.

* [NOD-1153] Fix a comment.
2020-07-23 12:07:53 +03:00
Ori Newman
428f16ffef [NOD-1185] Broadcast blocks submitted through RPC (#816)
* [NOD-1185] Broadcast blocks submitted through RPC

* [NOD-1185] Send inv instead of block

* [NOD-1185] Fix go.sum and go.mod
2020-07-23 11:50:44 +03:00
stasatdaglabs
f93e54b63c [NOD-1184] Protect incomingRoutes from concurrent read/write. (#815) 2020-07-23 11:17:52 +03:00
Ori Newman
c30b350e8e [NOD-1183] Fix rpc server config (#814) 2020-07-23 10:49:46 +03:00
stasatdaglabs
8fdb5aa024 [NOD-1123] Implement banning (#812)
* [NOD-1123] Bubble bad-message errors up to the protocol level.

* [NOD-1123] Implement Banning.

* [NOD-1123] Properly use &stopped.

* [NOD-1123] Ban by IP rather than IP and port.

* [NOD-1123] Don't initiate connections to banned peers.

* [NOD-1123] Fix infinite loop in checkOutgoingConnections.

* [NOD-1123] Fix bannedAddresses key.

* [NOD-1123] Rename onBadMessageHandler to onInvalidMessageHandler.
2020-07-23 10:47:28 +03:00
Ori Newman
83a3c30d01 [NOD-1176] Implement a struct for each flow to share flow data (#811)
* [NOD-1176] Implement a struct for each flow to share flow data

* [NOD-1178] Add empty contexts to flow structs for consistency
2020-07-22 15:12:54 +03:00
stasatdaglabs
63646c8c92 [NOD-1175] Implement AddBlock (#809)
* [NOD-1175] Get rid of something weird.

* [NOD-1175] Implement AddBlock.

* [NOD-1175] Implement BFDisallowOrphans.

* [NOD-1175] Pass flags into AddBlock.

* [NOD-1175] Remove isOrphan and isDelayed handling from AddBlock.

* [NOD-1175] Use default return values in error.

* [NOD-1175] Bring back a comment.

* [NOD-1175] Add ErrOrphanBlockIsNotAllowed to errorCodeStrings.
2020-07-22 13:47:38 +03:00
stasatdaglabs
097e7ab42a [NOD-1155] Always use NetConnection for disconnection. (#810) 2020-07-22 12:18:08 +03:00
stasatdaglabs
3d45c8de50 [NOD-1130] Integrate RPC with the new architecture (#807)
* [NOD-1130] Delete rpcadapters.go.

* [NOD-1130] Delete p2p. Move rpc to top level.

* [NOD-1130] Remove DAGParams from rpcserverConfig.

* [NOD-1130] Remove rpcserverPeer, rpcserverConnManager, rpcserverSyncManager, and rpcserverConfig.

* [NOD-1130] Remove wallet RPC commands.

* [NOD-1130] Remove wallet RPC commands.

* [NOD-1130] Remove connmgr and peer.

* [NOD-1130] Move rpcmodel into rpc.

* [NOD-1130] Implement ConnectionCount.

* [NOD-1130] Remove ping and node RPC commands.

* [NOD-1130] Dummify handleGetNetTotals.

* [NOD-1130] Add NetConnection to Peer.

* [NOD-1130] Fix merge errors.

* [NOD-1130] Implement Peers.

* [NOD-1130] Fix HandleGetConnectedPeerInfo.

* [NOD-1130] Fix SendRawTransaction.

* [NOD-1130] Rename addManualNode to connect and removeManualNode to disconnect.

* [NOD-1130] Add a stub for AddBlock.

* [NOD-1130] Fix tests.

* [NOD-1130] Replace half-baked contents of RemoveConnection with a stub.

* [NOD-1130] Fix merge errors.

* [NOD-1130] Make golint happy.

* [NOD-1130] Get rid of something weird.

* [NOD-1130] Rename minerClient back to client.

* [NOD-1130] Add a few fields to GetConnectedPeerInfoResult.

* [NOD-1130] Rename oneTry to isPermanent.

* [NOD-1130] Implement ConnectionCount in NetAdapter.

* [NOD-1130] Move RawMempoolVerbose out of mempool.

* [NOD-1130] Move isSynced into the mining package.

* [NOD-1130] Fix a compilation error.

* [NOD-1130] Make golint happy.

* [NOD-1130] Fix merge errors.
2020-07-22 10:26:39 +03:00
Ori Newman
8e1958c20b [NOD-1168] Add context interfaces for flows (#808)
* [NOD-1168] Add context interfaces to flows

* [NOD-1168] Move IBD state to protocol manager

* [NOD-1168] Move ready peers to protocol manager

* [NOD-1168] Add comments

* [NOD-1168] Separate context interfaces for send and receive pings

* [NOD-1168] Add protocol shared state to FlowContext

* [NOD-1168] Fix comment

* [NOD-1168] Rename Context->HandleHandshakeContext

* [NOD-1168] Initialize readyPeers and transactionsToRebroadcast

* [NOD-1168] Rename readyPeers -> peers
2020-07-21 18:02:33 +03:00
Ori Newman
3e6c1792ef [NOD-1170] Return a custom error when a route is closed (#805)
* [NOD-1170] Return a custom error when a route is closed

* [NOD-1170] Return ErrRouteClosed directly from route methods

* [NOD-1170] Fix comment location
2020-07-21 12:06:11 +03:00
Svarog
6b5b4bfb2a [NOD-1164] Remove the singleton from dbaccess, to enable multiple db connections in same run (#806)
* [NOD-1164] Defined DatabaseContext as the basic object of dbaccess

* [NOD-1164] Update everything to use databaseContext

* [NOD-1164] Fix tests

* [NOD-1164] Add comments

* [NOD-1164] Removed databaseContext from blockNode

* [NOD-1164] Enforce DatabaseContext != nil

* [NOD-1164] Remove redundant and wrong comment line
2020-07-21 12:02:44 +03:00
Ori Newman
b797436884 [NOD-1127] Implement transaction propagation (#803)
* [NOD-1128] Add all flows to a directory names flows

* [NOD-1128] Make everything in protocol package a manager method

* [NOD-1128] Add AddTransaction mechanism to protocol manager

* [NOD-1128] Add mempool related flows

* [NOD-1128] Add mempool related flows

* [NOD-1128] Add mempool related flows

* [NOD-1127] Fix router message types

* [NOD-1127] Inline updateQueues

* [NOD-1127] Rename acceptedTxs->transactionsAcceptedToMempool

* [NOD-1127] Add TODOs to notify transactions to RPC

* [NOD-1127] Fix comment

* [NOD-1127] Rename acceptedTxs->transactionsAcceptedToMempool

* [NOD-1127] Rename MsgTxInv->MsgInvTransaction

* [NOD-1127] Rename MsgTxInv.TXIDs->TxIDS

* [NOD-1127] Change flow name

* [NOD-1127] Call m.addTransactionRelayFlow

* [NOD-1127] Remove redundant line

* [NOD-1127] Use common.DefaultTimeout

* [NOD-1127] Return early if len(idsToRequest) == 0

* [NOD-1127] Add NewBlockHandler to IBD
2020-07-20 16:01:35 +03:00
Svarog
2de3c1d0d4 [NOD-1160] Convert *config.Config from singleton to an object that is being passed around (#802)
* [NOD-1160] remove activeConfig from config package + update main

* [NOD-1160] Update main and addrmanager

* [NOD-1160] Update netAdapater

* [NOD-1160] Update connmanager

* [NOD-1160] Fix connmgr package

* [NOD-1160] Fixed DNSSeed functions

* [NOD-1160] Fixed protocol package and subpackages

* [NOD-1160] Fix p2p package

* [NOD-1160] Fix rpc package

* [NOD-1160] Fix kaspad a final time

* [NOD-1160] Make dnsseed.SeedFromDNS callable outside kaspad

* [NOD-1160] Fix tests

* [NOD-1160] Pass cfg to kaspad

* [NOD-1160] Add comment and remove redundant object

* [NOD-1160] Fix typo
2020-07-20 14:33:35 +03:00
Ori Newman
7e81757e2f [NOD-1161] Name goroutines and log them by the name (#804)
* [NOD-1161] Name goroutines and log them by the name

* [NOD-1161] Fix some goroutine names
2020-07-20 13:00:23 +03:00
stasatdaglabs
4773f87875 [NOD-1125] Implement the IBD flow (#800)
* [NOD-1125] Write a skeleton for starting IBD.

* [NOD-1125] Add WaitForIBDStart to Peer.

* [NOD-1125] Move functions around.

* [NOD-1125] Fix merge errors.

* [NOD-1125] Fix a comment.

* [NOD-1125] Implement sendGetBlockLocator.

* [NOD-1125] Begin implementing findIBDLowHash.

* [NOD-1125] Finish implementing findIBDLowHash.

* [NOD-1125] Rename findIBDLowHash to findHighestSharedBlockHash.

* [NOD-1125] Implement downloadBlocks.

* [NOD-1125] Implement msgIBDBlock.

* [NOD-1125] Implement msgIBDBlock.

* [NOD-1125] Fix message types for HandleIBD.

* [NOD-1125] Write a skeleton for requesting selected tip hashes.

* [NOD-1125] Write a skeleton for the rest of the IBD requests.

* [NOD-1125] Implement HandleGetBlockLocator.

* [NOD-1125] Fix wrong timeout.

* [NOD-1125] Fix compilation error.

* [NOD-1125] Implement HandleGetBlocks.

* [NOD-1125] Fix compilation errors.

* [NOD-1125] Fix merge errors.

* [NOD-1125] Implement selectPeerForIBD.

* [NOD-1125] Implement RequestSelectedTip.

* [NOD-1125] Implement HandleGetSelectedTip.

* [NOD-1125] Make go lint happy.

* [NOD-1125] Add minGetSelectedTipInterval.

* [NOD-1125] Call StartIBDIfRequired where needed.

* [NOD-1125] Fix merge errors.

* [NOD-1125] Remove a redundant line.

* [NOD-1125] Rename shouldContinue to shouldStop.

* [NOD-1125] Lowercasify an error message.

* [NOD-1125] Shuffle statements around in findHighestSharedBlockHash.

* [NOD-1125] Rename hasRecentlyReceivedBlock to isDAGTimeCurrent.

* [NOD-1125] Scope minGetSelectedTipInterval.

* [NOD-1125] Handle an unhandled error.

* [NOD-1125] Use AddUint32 instead of LoadUint32 + StoreUint32.

* [NOD-1125] Use AddUint32 instead of LoadUint32 + StoreUint32.

* [NOD-1125] Use SwapUint32 instead of AddUint32.

* [NOD-1125] Remove error from requestSelectedTips.

* [NOD-1125] Actually stop IBD when it should stop.

* [NOD-1125] Actually stop RequestSelectedTip when it should stop.

* [NOD-1125] Don't ban peers that send us delayed blocks during IBD.

* [NOD-1125] Make unexpected message type messages nicer.

* [NOD-1125] Remove Peer.ready and make HandleHandshake return it to guarantee we never operate on a non-initialized peer.

* [NOD-1125] Remove errors associated with Peer.ready.

* [NOD-1125] Extract maxHashesInMsgIBDBlocks to a const.

* [NOD-1125] Move the ibd package into flows.

* [NOD-1125] Start IBD if required after getting an unknown block inv.

* [NOD-1125] Don't request blocks during relay if we're in the middle of IBD.

* [NOD-1125] Remove AddBlockLocatorHash.

* [NOD-1125] Extract runIBD to a seperate function.

* [NOD-1125] Extract runSelectedTipRequest to a seperate function.

* [NOD-1125] Remove EnqueueWithTimeout.

* [NOD-1125] Increase the capacity of the outgoingRoute.

* [NOD-1125] Fix some bad names.

* [NOD-1125] Fix a comment.

* [NOD-1125] Simplify a comment.

* [NOD-1125] Move WaitFor... functions into their respective run... functions.

* [NOD-1125] Return default values in case of error.

* [NOD-1125] Use CmdXXX in error messages.

* [NOD-1125] Use MaxInvPerMsg in outgoingRouteMaxMessages instead of MaxBlockLocatorsPerMsg.

* [NOD-1125] Fix a comment.

* [NOD-1125] Disconnect a peer that sends us a delayed block during IBD.

* [NOD-1125] Use StoreUint32 instead of SwapUint32.

* [NOD-1125] Add a comment.

* [NOD-1125] Don't ban peers that send us delayed blocks.
2020-07-20 12:52:23 +03:00
Svarog
aa5bc34280 [NOD-1148] P2P stabilization (#798)
* [NOD-1148] Add lock around route's close operation

* [NOD-1148] Added tracing of incoming and outgoing messages

* [NOD-1148] Cast to MsgPing should have been to MsgPong

* [NOD-1148] Check for NeedMoreAddresses before sending GetAddr message
and invert condition
2020-07-19 14:57:34 +03:00
Svarog
b9a25c1141 [NOD-1163] Combine seperated flows into single packages (#801)
* [NOD-1163] Combine seperated flows into single packages

* [NOD-1163] Move handshake.go to handshake package

* [NOD-1163] Use single logger prefix for everything under protocol

* [NOD-1163] Add comment

* [NOD-1163] Fix refactor error
2020-07-19 11:24:25 +03:00
Svarog
b42b8b16fd [NOD-1120] Connection Manager (#796)
* [NOD-1120] Removed closure in NetAdapter.onConnectedHanlder

* [NOD-1120] Implement all connection manager methods

* [NOD-1120] Integrated connmanager into kaspad + added call for dnsseeder

* [NOD-1120] Allow buffer to not be bytes.Buffer

* [NOD-1120] Added timeout to connect

* [NOD-1120] Don't enter connections to  add loop if none needed

* [NOD-1120] Add call for addressManager.Good

* [NOD-1120] Minor bug fixes

* [NOD-1120] Remove errChan from grpcConnection

* [NOD-1120] Add comments to exported methods

* [NOD-1120] cancel the context for DialContext in gRPCServer.Connect

* [NOD-1120] Don't try to remove from connSet a connection that doesn't exist

* [NOD-1120] add ok bool to connectionSet.get

* [NOD-1120] Remove overuse of if-else in checkConnectionRequests

* [NOD-1120] Made some order in ConnectionManager

* [NOD-1120] Moved checkIncomingConnections to it's own file

* [NOD-1120] cleanup in checkOutgoingConnections

* [NOD-1120] Cleanup in SeedDNS, and move call outside of connection manager

* [NOD-1120] Add check that both --connect and --addpeer aren't used

* [NOD-1120] Move dial timeout to constant

* [NOD-1120] Enhance comment

* [NOD-1120] Log connection failure out of initiateConnection

* [NOD-1148] Reshuffle checkRequestedConnections to make more sense

* [NOD-1120] Move continue to correct place + reshuffle logging code

* [NOD-1120] Don't expose server.Connection outside netAdapter - expose a wrapper instead

* [NOD-1120] Add comments

* [NOD-1120] Don't return the connection from netAdapter.Connect()

* [NOD-1120] Use .Address as key for connectionSet

* [NOD-1120] Fix minRetryDuration usage

* [NOD-1120] Remove the correct number of incoming connections

* [NOD-1120] Add comment

* [NOD-1120] Rename connSet -> incomingConnectionSet

* [NOD-1120] fix grammar
2020-07-16 17:15:58 +03:00
Ori Newman
e0aac68759 [NOD-1128] Convert message type to uint32 (#799)
* [NOD-1128] Change message command to uint32

* [NOD-1128] Don't use iota

* [NOD-1128] Remove redundant line
2020-07-16 17:11:05 +03:00
Ori Newman
9939671ccc [NOD-1147] Implement address exchange (#795)
* [NOD-1147] Implement address exchange

* [NOD-1147] Put placeholder for source address

* [NOD-1147] Fix tests

* [NOD-1147] Add comment

* [NOD-1147] Remove needAddresses from MsgGetAddr

* [NOD-1147] Use rand.Shuffle

* [NOD-1147] Remove redundant const

* [NOD-1147] Move defer to its correct place

* [NOD-1147] Fix typo

* [NOD-1147] Use EnqueueWithTimeout for outgoingRoute

* [NOD-1147] Rename MsgGetAddr->MsgGetAddresses

* [NOD-1147] Rename MsgGetAddr->MsgGetAddresses

* [NOD-1147] Rename MsgAddr->MsgAddresses

* [NOD-1147] Rename fakeSrcAddr->fakeSourceAddress

* [NOD-1147] Remove redundant files

* [NOD-1147] CmdAddr -> CmdAddress

* [NOD-1147] Rename addr to address in protocol package
2020-07-15 17:19:46 +03:00
Ori Newman
eaa8515442 [NOD-1150] Add in netadapter function to disconnect router (#797)
* [NOD-1150] Add in netadapter function to disconnect router

* [NOD-1150] Fix comment
2020-07-15 14:42:17 +03:00
Ori Newman
04b578cee1 [NOD-1137] Implement handshake protocol (#792)
* [NOD-1126] Implement block relay flow

* [NOD-1126] Implement block relay flow

* [NOD-1126] Add StartGetRelayBlocksListener

* [NOD-1126] Integrate with new interface

* [NOD-1126] Fix comments

* [NOD-1126] Refactor protocol.go

* [NOD-1126] Split long lines

* [NOD-1126] Fix comment

* [NOD-1126] move sharedRequestedBlocks to a separate file

* [NOD-1126] Fix error message

* [NOD-1126] Move handleInv to StartBlockRelay

* [NOD-1126] Create hashesQueueSet type

* [NOD-1126] Make deleteFromRequestedBlocks a method

* [NOD-1126] Fix comment

* [NOD-1126] Add block logger

* [NOD-1126] Rename advertisedProtoVer->advertisedProtocolVer

* [NOD-1126] Fix comment and an error message

* [NOD-1126] Remove redundant loop

* [NOD-1126] Move requestBlocks upper

* [NOD-1126] Remove exiting blocks in requestedBlocks from hashesToRequest

* [NOD-1126] Change comment

* [NOD-1126] Rename stallResponseTimeout->timeout

* [NOD-1126] Use switch inside readMsgBlock

* [NOD-1126] Fix error message and remove redundant log

* [NOD-1126] Rename pacakge names

* [NOD-1126] Fix comment

* [NOD-1126] Change file names

* [NOD-1126] Convert block to partial if needed

* [NOD-1126] Remove function redeclaration

* [NOD-1126] continue instead of return

* [NOD-1126] Rename LogBlockBlueScore->LogBlock

* [NOD-1126] Add minimum functions to utils

* [NOD-1126] Flip condition on readInv

* [NOD-1126] Rename utilMath->mathUtil

* [NOD-1126] Fix comment

* [NOD-1137] Implement handshake

* [NOD-1137] Replace version's nonce with ID

* [NOD-1137] Remove redundant function

* [NOD-1137] Move handshake to a separate file

* [NOD-1137] Add todo

* [NOD-1137] Replace peer internal id with global peer ID

* [NOD-1137] Add serializer/deserializer to ID

* [NOD-1137] Remove validation from AddUserAgent

* [NOD-1137] Add missing id package

* [NOD-1137] Rename variables

* [NOD-1137] Add comment

* [NOD-1137] Implement GetBestLocalAddress

* [NOD-1137] Implement TODOs

* [NOD-1137] Rename variables

* [NOD-1137] Move errors.Is inside err!=nil branch

* [NOD-1137] Fix erroneous condition on Dequeue

* [NOD-1137] Fix bug in GetReadyPeerIDs

* [NOD-1137] Handle external IP on GetBestLocalAddress

* [NOD-1137] Remove version and verack message types when handshake is over

* [NOD-1137] Add FromBytes to id package

* [NOD-1137] Add protocol error

* [NOD-1137] Add ErrTimeout

* [NOD-1137] Log error only if exists

* [NOD-1137] Replace idFromBytes->id.FromBytes

* [NOD-1137] Add comments

* [NOD-1137] Remove ErrTimeout

* [NOD-1137] Unremove ErrTimeout

* [NOD-1137] Change comment

* [NOD-1137] Use EnqueueWithTimeout everywhere in protocol
2020-07-14 17:20:29 +03:00
stasatdaglabs
f8e53d309c [NOD-1142] Implement EnqueueWithTimeout and DequeueWithTimeout (#794)
* [NOD-1142] Implement EnqueueWithTimeout and DequeueWithTimeout.

* [NOD-1142] Use DequeueWithTimeout in readMsgBlock.

* [NOD-1142] Add comments about the new methods.
2020-07-14 16:14:27 +03:00
stasatdaglabs
6076309b3e [NOD-1142] Implement ping flow (#793)
* [NOD-1124] Move Router to the router package.

* [NOD-1124] Implement SetOnRouteCapacityReachedHandler.

* [NOD-1124] Use Routes instead of bare channels.

* [NOD-1124] Fix merge errors.

* [NOD-1124] Connect the Router to the Connection.

* [NOD-1124] Fix merge errors.

* [NOD-1124] Move some variables around.

* [NOD-1124] Fix unreachable code.

* [NOD-1124] Fix a variable name.

* [NOD-1142] Implement ping flows.

* [NOD-1142] Add ping flows to startFlows.

* [NOD-1142] Fix merge errors.

* [NOD-1142] Fix a typo.

* [NOD-1142] Add comments to exported functions.

* [NOD-1142] Fix bad flow name.

* [NOD-1142] Remove a redundant empty line.

* [NOD-1142] Fix a typo.

* [NOD-1142] Simplify for loop.

* [NOD-1142] Rename HandlePing to HandleIncomingPings and StartPingLoop to StartSendingPings.

* [NOD-1142] Fix no-longer-infinite loop.

* [NOD-1142] Represent ping duration as time.Duration instead of an int64.

* [NOD-1142] Rename HandleIncomingPings to ReceivePings and StartSendingPings to SendPings.

* [NOD-1142] Move pingInterval to within SendPings.

* [NOD-1142] Rephrase a comment.
2020-07-14 12:41:37 +03:00
stasatdaglabs
05db135d23 [NOD-1124] Implement the Flow thread model and architecture (#791)
* [NOD-1124] Move Router to the router package.

* [NOD-1124] Implement SetOnRouteCapacityReachedHandler.

* [NOD-1124] Use Routes instead of bare channels.

* [NOD-1124] Fix merge errors.

* [NOD-1124] Connect the Router to the Connection.

* [NOD-1124] Fix merge errors.

* [NOD-1124] Move some variables around.

* [NOD-1124] Fix unreachable code.

* [NOD-1124] Fix a variable name.

* [NOD-1124] Rename AddRoute to AddIncomingRoute.

* [NOD-1124] Rename SetRouter to Start.

* [NOD-1124] Make AddIncomingRoute create a Route by itself.

* [NOD-1124] Replace IncomingRoute with EnqueueIncomingMessage.

* [NOD-1124] Make Enqueue and Dequeue return isOpen instead of err.

* [NOD-1124] Remove writeDuringDisconnectLock.

* [NOD-1124] In sendLoop, move outgoingRoute to outside the loop.

* [NOD-1124] Start the connection loops only when Start is called.

* [NOD-1124] Replace OnIDReceivedHandler with AssociateRouterID.

* [NOD-1124] Add isOpen to Enqueue and Dequeue.

* [NOD-1124] Protect errChan from writing during disconnect.
2020-07-13 16:51:13 +03:00
Ori Newman
433cdb6006 [NOD-1126] implement block relay flow (#786)
* [NOD-1126] Implement block relay flow

* [NOD-1126] Add StartGetRelayBlocksListener

* [NOD-1126] Implement block relay flow

* [NOD-1126] Integrate with new interface

* [NOD-1126] Fix comments

* [NOD-1126] Refactor protocol.go

* [NOD-1126] Split long lines

* [NOD-1126] Fix comment

* [NOD-1126] move sharedRequestedBlocks to a separate file

* [NOD-1126] Fix error message

* [NOD-1126] Move handleInv to StartBlockRelay

* [NOD-1126] Create hashesQueueSet type

* [NOD-1126] Make deleteFromRequestedBlocks a method

* [NOD-1126] Fix comment

* [NOD-1126] Add block logger

* [NOD-1126] Rename advertisedProtoVer->advertisedProtocolVer

* [NOD-1126] Fix comment and an error message

* [NOD-1126] Remove redundant loop

* [NOD-1126] Move requestBlocks upper

* [NOD-1126] Remove exiting blocks in requestedBlocks from hashesToRequest

* [NOD-1126] Change comment

* [NOD-1126] Rename stallResponseTimeout->timeout

* [NOD-1126] Use switch inside readMsgBlock

* [NOD-1126] Fix error message and remove redundant log

* [NOD-1126] Rename pacakge names

* [NOD-1126] Fix comment

* [NOD-1126] Change file names

* [NOD-1126] Convert block to partial if needed

* [NOD-1126] Remove function redeclaration

* [NOD-1126] continue instead of return

* [NOD-1126] Rename LogBlockBlueScore->LogBlock

* [NOD-1126] Add minimum functions to utils

* [NOD-1126] Flip condition on readInv

* [NOD-1126] Rename utilMath->mathUtil

* [NOD-1126] Fix comment
2020-07-12 16:11:42 +03:00
Svarog
4a4dca1926 [NOD-1118] Implement gRPC basic connectivity (#790)
* [NOD-1118] Added protobufs for the MessageStream

* [NOD-1118] Implement some of the basic grpc methods

* [NOD-1118] Implemented gRPCConnection send and receive

* [NOD-1118] Implemented basic connection loops

* [NOD-1118] gRPC server implementation ready

* [NOD-1118] Add connection management

* [NOD-1118] Sort out the connection loops

* [NOD-1118] Add temporary testConnection

* [NOD-1118] Send to c.errChan whether error was recieved or not

* [NOD-1118] Call OnConnectHandler in time

* [NOD-1118] Handle closing connections properly

* [NOD-1118] Add comments to exported functions

* [NOD-1118] Call server.addConnection on newConnection

* [NOD-1118] Add a TODO comment

* [NOD-1118] Add a TODO comment

* [NOD-1118] Make connection a Stringer

* [NOD-1118] Made the connection loops 100% synchronic

* [NOD-1118] Make connection.isConnected uint32

* [NOD-1118] Move the Add/Remove connection from grpcConnection to register/unregister connection

* [NOD-1118] Convert error messages to lower case

* [NOD-1118] Remove protoc inline dependency

* [NOD-1118] Fix comment

* [NOD-1118] Exit if there was an error starting the protocol manager

* [NOD-1118] Fix error message

* [NOD-1118] Fixed a few comments

* [NOD-1118] Extract listenOn to a method

* [NOD-1118] Use !=0 for isConnected

* [NOD-1118] Refactor listenOn

* [NOD-1118] Add lock on channelWrites in gRPCConnection

* [NOD-1118] Rename channelWriteLock -> writeDuringDisconnectLock

* [NOD-1118] Reshuffle a comment

* [NOD-1118] Add a TODO comment
2020-07-12 15:22:49 +03:00
stasatdaglabs
6d591dde74 [NOD-1124] Implement the Flow thread model and architecture (#789)
* [NOD-1124] Rename Peer to Connection (because Peer is a business logic term)

* [NOD-1124] Implement Close for Router.

* [NOD-1124] Add SetPeerDisconnectedHandler.

* [NOD-1124] Remove mentions of "peer" from the netadapter package.

* [NOD-1124] Handle errors/stopping in netadapter.

* [NOD-1124] Remove netadapter.Connection.

* [NOD-1124] Add startSendLoop.

* [NOD-1124] Implement network IDs.

* [NOD-1124] Implement a map between IDs and routes.

* [NOD-1124] Implement Broadcast.

* [NOD-1124] Fix rename error.

* [NOD-1124] Fix copy+paste error.

* [NOD-1124] Change the type of NetAdapter.stop to uint32.

* [NOD-1124] If NetAdapter is stopped more than once, return an error.

* [NOD-1124] Add an error case to RouteInputMessage.

* [NOD-1124] Rename CreateID to NewID.

* [NOD-1124] Spawn from outside startReceiveLoop and startSendLoop.

* [NOD-1124] Fix a comment.

* [NOD-1124] Replace break with for condition.

* [NOD-1124] Don't disconnect from disconnected peers.

* [NOD-1124] Fix a for condition.

* [NOD-1124] Handle an error.
2020-07-09 09:34:28 +03:00
Svarog
8e624e057e [NOD-1134] Integrate Protocol into main (#788)
* [NOD-1134] Integrate Protocol into main

* [NOD-1134] Fix typo

* [NOD-1134] Added comments

* [NOD-1134] A series of renames to protocol

* [NOD-1134] Fix comment

* [NOD-1134] protocol.ProtocolManager -> Manager

* [NOD-1134] Update comment

* [NOD-1134] protocol.New() -> protocol.NewManager()
2020-07-08 11:52:53 +03:00
stasatdaglabs
eb2642ba90 [NOD-1124] Implement the Flow thread model and architecture (#787)
* [NOD-1124] Begin implementing netadapter.

* [NOD-1124] Implementing a stub gRPC server..

* [NOD-1124] Construct the server inside the netadapter.

* [NOD-1124] Rewrite protocol.go to fit with the new netAdapter model.

* [NOD-1124] Wrap a connection in Peer.

* [NOD-1124] Add a peerstate object.

* [NOD-1124] Remove the peerstate object.

* [NOD-1124] Remove router out of Peer.

* [NOD-1124] Tag a TODO.

* [NOD-1124] Return an error out of AddRoute if a route already exists for some message type.

* [NOD-1124] Rename the package grpc to grpcserver.

* [NOD-1124] Extracted newConnectionHandler into a type.

* [NOD-1124] Extract routerInitializer into a type.

* [NOD-1124] Panic/Add TODOs everywhere that isn't implemented.

* [NOD-1124] Improve the NetAdapter comment.

* [NOD-1124] Rename NewConnectionHandler to PeerConnectedHandler.

* [NOD-1124] Rename buildRouterInitializer to newRouterInitializer.

* [NOD-1124] Remove unreachable code.

* [NOD-1124] Make go vet happy.
2020-07-08 10:14:52 +03:00
Svarog
1a43cabfb9 [NOD-1119] Refactor main, and remove p2p layer from it (#785)
* [NOD-1119] Removed all p2p server from all the initialization of server

* [NOD-1119] Removed any calling for p2p server in main

* [NOD-1119] Simplified some functions to not take both dag and dagParams

* [NOD-1119] Simplify creation of mempool and rpc server

* [NOD-1119] Setup indexes in separate function

* [NOD-1119] Some cleanup in NewServer

* [NOD-1119] Fix mempool test

* [NOD-1119] Fix go format

* [NOD-1119] Unexport dag.timeSource

* [NOD-1119] Removed server package + renamed the Server object to Kaspad, and made it minimal

* [NOD-1119] Delete redundant functions

* Unexported kaspad and related methods

* [NOD-1119] Unexported newKaspad

* [NOD-1119] Revise comments and remove redundant function

* [NOD-1119] Make comments of unexported methods lower-case

* [NOD-1119] Some more refactoring in newKaspad
2020-07-06 18:00:28 +03:00
Ori Newman
580e37943b [NOD-1117] Write interfaces for P2P layer (#784)
* [NOD-1117] Write interfaces for P2P layer

* [NOD-1117] Add logs
2020-07-05 12:10:01 +03:00
Ori Newman
749775c7ea [NOD-1098] Change timestamp precision to one millisecond (#778)
* [NOD-1098] Change timestamps to be millisecond precision

* [NOD-1098] Change lock times to use milliseconds

* [NOD-1098] Use milliseconds precision everywhere

* [NOD-1098] Implement type mstime.Time

* [NOD-1098] Fix block 100000 timestamp

* [NOD-1098] Change orphan child to be one millisecond delay after its parent

* [NOD-1098] Remove test that checks if header timestamps have the right precision, and instead add tests for mstime, and fix genesis for testnet and devnet

* [NOD-1098] Fix comment

* [NOD-1098] Fix comment

* [NOD-1098] Fix testnet genesis

* [NOD-1098] Rename UnixMilli->UnixMilliseconds
2020-07-01 16:09:04 +03:00
Mike Zak
8ff8c30fb4 Merge remote-tracking branch 'origin/v0.5.0-dev' into v0.6.0-dev 2020-07-01 15:05:17 +03:00
stasatdaglabs
9893b7396c [NOD-1105] When recovering acceptance index, use a database transaction per block instead of for the entire recovery (#781)
* [NOD-1105] Don't use a database transaction when recovering acceptance index.

* Revert "[NOD-1105] Don't use a database transaction when recovering acceptance index."

This reverts commit da550f8e

* [NOD-1105] When recovering acceptance index, use a database transaction per block instead of for the entire recovery.
2020-07-01 13:43:51 +03:00
Svarog
8c90344f28 [NOD-1103] Fix testnetGenesisTxPayload with 8-byte blue-score (#780)
* [NOD-1103] Fix testnetGenesisTxPayload with 8-byte blue-score

* [NOD-1103] Fix genesis block bytes
2020-07-01 09:21:42 +03:00
Mike Zak
e4955729d2 Merge remote-tracking branch 'origin/v0.5.0-dev' into v0.6.0-dev 2020-06-30 08:48:31 +03:00
Mike Zak
8a7b0314e5 Merge branch 'v0.5.0-dev' of github.com:kaspanet/kaspad into v0.5.0-dev 2020-06-29 12:17:41 +03:00
Mike Zak
e87d00c9cf [NOD-1063] Fix a bug in which a block is pointing directly to a block in the selected parent chain below the reindex root
commit e303efef42
Author: stasatdaglabs <stas@daglabs.com>
Date:   Mon Jun 29 11:59:36 2020 +0300

    [NOD-1063] Rename a test.

commit bfecd57470
Author: stasatdaglabs <stas@daglabs.com>
Date:   Mon Jun 29 11:57:36 2020 +0300

    [NOD-1063] Fix a comment.

commit b969e5922d
Author: stasatdaglabs <stas@daglabs.com>
Date:   Sun Jun 28 18:14:44 2020 +0300

    [NOD-1063] Convert modifiedTreeNode to an out param.

commit 170f9872f4
Author: stasatdaglabs <stas@daglabs.com>
Date:   Sun Jun 28 17:05:01 2020 +0300

    [NOD-1063] Fix a bug in which a block is added to the selected parent chain below the reindex root.
2020-06-29 12:16:47 +03:00
stasatdaglabs
336347b3c5 [NOD-1063] Fix a bug in which a block is pointing directly to a block in the selected parent chain below the reindex root (#777)
* [NOD-1063] Fix a bug in which a block is added to the selected parent chain below the reindex root.

* [NOD-1063] Convert modifiedTreeNode to an out param.

* [NOD-1063] Fix a comment.

* [NOD-1063] Rename a test.
2020-06-29 12:13:51 +03:00
Mike Zak
15d0899406 Update to version v0.6.0 2020-06-29 09:17:45 +03:00
Mike Zak
ad096f9781 Update to version 0.5.0 2020-06-29 08:59:56 +03:00
Elichai Turkel
d3c6a3dffc [NOD-1093] Add hashMerkleRoot to GetBlockTemplateResult (#776)
* Add hashMerkleRoot field to GetBlockTemplateResult

* Use hashMerkleRoot from template instead of recalculating

* Move ParseBlock from kaspaminer into rpcclient

* Rename ParseBlock to ConvertGetBlockTemplateResultToBlock and wrap errors
2020-06-28 16:53:09 +03:00
stasatdaglabs
57b1653383 [NOD-1063] Optimize deep reachability tree insertions (#773)
* [NOD-1055] Give higher priority for requesting missing ancestors when sending a getdata message (#767)

* [NOD-1063] Remove the remainingInterval field.

* [NOD-1063] Add helper functions to reachabilityTreeNode.

* [NOD-1063] Add reachabilityReindexRoot.

* [NOD-1063] Start implementing findNextReachabilityReindexRoot.

* [NOD-1063] Implement findCommonAncestor.

* [NOD-1063] Implement findReachabilityTreeAncestorInChildren.

* [NOD-1063] Add reachabilityReindexWindow.

* [NOD-1063] Fix findReachabilityTreeAncestorInChildren.

* [NOD-1063] Remove BlockDAG reference in findReachabilityTreeAncestorInChildren.

* [NOD-1063] Extract updateReachabilityReindexRoot to a separate function.

* [NOD-1063] Add reachabilityReindexSlack.

* [NOD-1063] Implement splitReindexRootChildrenAroundChosen.

* [NOD-1063] Implement calcReachabilityTreeNodeSizes.

* [NOD-1063] Implement propagateChildIntervals.

* [NOD-1063] Extract tightenReachabilityTreeIntervalsBeforeChosenReindexRootChild and tightenReachabilityTreeIntervalsAfterChosenReindexRootChild to separate functions.

* [NOD-1063] Implement expandReachabilityTreeIntervalInChosenReindexRootChild.

* [NOD-1063] Finished implementing concentrateReachabilityTreeIntervalAroundReindexRootChild.

* [NOD-1063] Begin implementing reindexIntervalsBeforeReindexRoot.

* [NOD-1063] Implement top-level logic of reindexIntervalsBeforeReindexRoot.

* [NOD-1063] Implement reclaimIntervalBeforeChosenChild.

* [NOD-1063] Add a debug log for reindexIntervalsBeforeReindexRoot.

* [NOD-1063] Rename reindexIntervalsBeforeReindexRoot to reindexIntervalsEarlierThanReindexRoot.

* [NOD-1063] Implement reclaimIntervalAfterChosenChild.

* [NOD-1063] Add a debug log for updateReachabilityReindexRoot.

* [NOD-1063] Convert modifiedTreeNodes from slices to sets.

* [NOD-1063] Fix findCommonAncestor.

* [NOD-1063] Fix reindexIntervalsEarlierThanReindexRoot.`

* [NOD-1063] Remove redundant nil conditions.

* [NOD-1063] Make map[*reachabilityTreeNode]struct{} into a type alias with a copyAllFrom method.

* [NOD-1063] Remove setInterval.

* [NOD-1063] Create a new struct to hold reachability stuff called reachabilityTree.

* [NOD-1063] Rename functions under reachabilityTree.

* [NOD-1063] Move reachabilityStore into reachabilityTree.

* [NOD-1063] Move the rest of the functions in reachability.go into the reachabilityTree struct.

* [NOD-1063] Update newReachabilityTree to take an instance of reachabilityStore.

* [NOD-1063] Fix merge errors.

* [NOD-1063] Fix merge errors.

* [NOD-1063] Pass a reference to the dag into reachabilityTree.

* [NOD-1063] Use Wrapf instead of Errorf.

* [NOD-1063] Merge assignments.

* [NOD-1063] Disambiguate a varaible name.

* [NOD-1063] Add a test case for intervalBefore.

* [NOD-1063] Simplify splitChildrenAroundChosenChild.

* [NOD-1063] Fold temporary variables into newReachabilityInterval.

* [NOD-1063] Fold more temporary variables into newReachabilityInterval.

* [NOD-1063] Fix a bug in expandIntervalInReindexRootChosenChild.

* [NOD-1063] Remove blockNode from futureCoveringBlock.

* [NOD-1063] Get rid of futureCoveringBlock.

* [NOD-1063] Use findIndex directly in findAncestorAmongChildren.

* [NOD-1063] Make findIndex a bit nicer to use. Also rename it to findAncestorIndexOfNode.

* [NOD-1063] Rename childIntervalAllocationRange to intervalRangeForChildAllocation.

* [NOD-1063] Optimize findCommonAncestor.

* [NOD-1063] In reindexIntervalsBeforeChosenChild, use chosenChild.interval.start - 1 instead of childrenBeforeChosen[len(childrenBeforeChosen)-1].interval.end + 1.

* [NOD-1063] Rename reindexIntervalsBeforeChosenChild to reindexIntervalsBeforeNode.

* [NOD-1063] Add a comment explain what "the chosen child" is.

* [NOD-1063] In concentrateIntervalAroundReindexRootChosenChild, rename modifiedTreeNodes to allModifiedTreeNodes.

* [NOD-1063] Extract propagateIntervals to a function.

* [NOD-1063] Extract interval "contains" logic to a separate function.

* [NOD-1063] Simplify "looping up" logic in reclaimIntervalXXXChosenChild.

* [NOD-1063] Add comments to reclaimIntervalXXXChosenChild.

* [NOD-1063] Rename copyAllFrom to addAll.

* [NOD-1063] Rename reachabilityStore (the variable) to just store.

* [NOD-1063] Fix an error message.

* [NOD-1063] Reword a comment.

* [NOD-1063] Don't return -1 from findAncestorIndexOfNode.

* [NOD-1063] Extract slackReachabilityIntervalForReclaiming to a constant.

* [NOD-1063] Add a missing condition.

* [NOD-1063] Call isAncestorOf directly in insertNode.

* [NOD-1063] Rename chosenReindexRootChild to reindexRootChosenChild.

* [NOD-1063] Rename treeNodeSet to orderedTreeNodeSet.

* [NOD-1063] Add a disclaimer to orderedTreeNodeSet.

* [NOD-1063] Implement StoreReachabilityReindexRoot and FetchReachabilityReindexRoot.

* [NOD-1063] Move storing the reindex root to within reachabilityTree.

* [NOD-1063] Remove isAncestorOf from reachabilityInterval.

* [NOD-1063] Add a comment about graph theory conventions.

* [NOD-1063] Fix tests.

* [NOD-1063] Change inclusion in isAncestorOf functions.

* [NOD-1063] Rename a test.

* [NOD-1063] Implement TestIsInFuture.

* [NOD-1063] Fix error messages in TestIsInFuture.

* [NOD-1063] Fix error messages in TestIsInFuture.

* [NOD-1063] Rename isInSelectedParentChain to isInSelectedParentChainOf.

* [NOD-1063] Rename isInFuture to isInPast.

* [NOD-1063] Expand on a comment.

* [NOD-1063] Rename modifiedTreeNodes.

* [NOD-1063] Implement test: TestReindexIntervalsEarlierThanReindexRoot.

* [NOD-1063] Implement test: TestUpdateReindexRoot.

* [NOD-1063] Explain a check.

* [NOD-1063] Use a method instead of calling reachabilityStore.loaded directly.

* [NOD-1063] Lowercasified an error message.

* [NOD-1063] Fix failing test.

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-06-28 14:27:01 +03:00
Ori Newman
a86255ba51 [NOD-1088] Rename RejectReasion to RejectReason (#775) 2020-06-25 18:08:58 +03:00
Ori Newman
0a7a4ce7d6 [NOD-1085] Use all nonce space in kaspaminer (#774) 2020-06-24 11:12:57 +03:00
Ori Newman
4c3735a897 [NOD-906] Move transaction mass limit from CheckTransactionSanity to mempool (#772)
* [NOD-906] Move transaction mass limit from CheckTransactionSanity to mempool

* [NOD-906] Fix tests

* [NOD-906] Add spaces to comments
2020-06-22 17:20:17 +03:00
Ori Newman
22fd38c053 [NOD-1060] Don't sync from misbehaving peer (#768)
* [NOD-1038] Give higher priority for requesting missing ancestors when sending a getdata message (#767)

* [NOD-1060] Don't sync from peers that break the netsync protocol
2020-06-22 17:15:03 +03:00
Ori Newman
895f67a8d4 [NOD-1035] Use reachability to check finality (#763)
* [NOD-1034] Use reachability to check finality

* [NOD-1034] Add comments and rename variables

* [NOD-1034] Fix comments

* [NOD-1034] Rename checkFinalityRules->checkFinalityViolation

* [NOD-1034] Change isAncestorOf to be exclusive

* [NOD-1034] Make isAncestorOf exclusive and also more explicit, and add TestReachabilityTreeNodeIsAncestorOf
2020-06-22 17:14:59 +03:00
Svarog
56e807b663 [NOD-999] Set TargetOutbound=0 to prevent rogue connectionRequests except the one requested (#771) 2020-06-22 14:20:12 +03:00
Mike Zak
af64c7dc2d Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-21 16:15:05 +03:00
Svarog
1e6458973b [NOD-1064] Don't send GetBlockInvsMsg with lowHash = nil (#769) 2020-06-21 09:09:07 +03:00
Ori Newman
7bf8bb5436 [NOD-1017] Move peers.json to db (#733)
* [NOD-1017] Move peers.json to db

* [NOD-1017] Fix tests

* [NOD-1017] Change comments and rename variables

* [NOD-1017] Separate to smaller functions

* [NOD-1017] Renames

* [NOD-1017] Name newAddrManagerForTest return params

* [NOD-1017] Fix handling of non existing peersState

* [NOD-1017] Add getPeersState rpc command

* [NOD-1017] Fix comment

* [NOD-1017] Split long line

* [NOD-1017] Rename getPeersState->getPeerAddresses

* [NOD-1017] Rename getPeerInfo->getConnectedPeerInfo
2020-06-18 12:12:49 +03:00
Svarog
1358911d95 [NOD-1064] Don't send GetBlockInvsMsg with lowHash = nil (#769) 2020-06-17 14:18:00 +03:00
Ori Newman
1271d2f113 [NOD-1038] Give higher priority for requesting missing ancestors when sending a getdata message (#767) 2020-06-17 11:52:10 +03:00
Ori Newman
bc0227b49b [NOD-1059] Always call sm.restartSyncIfNeeded() when getting selectedTip message (#766) 2020-06-16 16:51:38 +03:00
Ori Newman
dc643c2d76 [NOD-833] Remove getBlockTemplate capabilites and move mining address to getBlockTemplate (#762)
* [NOD-833] Remove getBlockTemplate capabilites and move mining address to getBlockTemplate

* [NOD-833] Fix tests

* [NOD-833] Break long lines
2020-06-16 11:01:06 +03:00
Ori Newman
0744e8ebc0 [NOD-1042] Ignore very high orphans (#761)
* [NOD-530] Remove coinbase inputs and add blue score to payload

* [NOD-1042] Ignore very high orphans

* [NOD-1042] Add ban score to an orphan with malformed blue score

* [NOD-1042] Fix log
2020-06-15 16:08:25 +03:00
Ori Newman
d4c9fdf6ac [NOD-614] Add ban score (#760)
* [NOD-614] Copy bitcoin-core ban score policy

* [NOD-614] Add ban score to disconnects

* [NOD-614] Fix wrong branch of AddBanScore

* [NOD-614] Add ban score on sending too many addresses

* [NOD-614] Add comments

* [NOD-614] Remove redundant reject messages

* [NOD-614] Fix log message

* [NOD-614] Ban every node that sends invalid invs

* [NOD-614] Make constants for ban scores
2020-06-15 12:12:38 +03:00
stasatdaglabs
829979b6c7 [NOD-1007] Split checkBlockSanity subroutines (#743)
* [NOD-1007] Split checkBlockSanity subroutines.

* [NOD-1007] Put back the comments about performance.

* [NOD-1007] Make all the functions in checkBlockSanity take a *util.Block.

* [NOD-1007] Rename checkBlockTransactionsOrderedBySubnetwork to checkBlockTransactionOrder.

* [NOD-1007] Move a comment up a scope level.
2020-06-15 11:07:52 +03:00
Mike Zak
32cd29bf70 Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-14 12:51:59 +03:00
stasatdaglabs
03cb6cbd4d [NOD-1048] Use a smaller writeBuffer and use disableSeeksCompaction directly. (#759) 2020-06-11 16:11:22 +03:00
Ori Newman
ba4a89488e [NOD-530] Remove coinbase inputs and add blue score to payload (#752)
* [NOD-530] Remove coinbase inputs and add blue score to payload

* [NOD-530] Fix comment

* [NOD-530] Change util.Block private fields comments
2020-06-11 15:54:11 +03:00
Ori Newman
b0d4a92e47 [NOD-1046] Delete redundant conversion from rule error (#755) 2020-06-11 12:19:49 +03:00
stasatdaglabs
3e5a840c5a [NOD-1052] Add a lock around clearOldEntries to protect against concurrent access of utxoDiffStore.loaded. (#758) 2020-06-11 11:56:25 +03:00
Ori Newman
d6d34238d2 [NOD-1049] Allow empty addr messages (#753) 2020-06-10 16:13:13 +03:00
Ori Newman
8bbced5925 [NOD-1051] Don't disconnect from sync peer if it sends an orphan (#757) 2020-06-10 16:05:48 +03:00
stasatdaglabs
20da1b9c9a [NOD-1048] Make leveldb compaction much less frequent (#756)
* [NOD-1048] Make leveldb compaction much less frequent. Also, allocate an entire gigabyte for leveldb's blockCache and writeBuffer.

* [NOD-1048] Implement changing the options for testing purposes.

* [NOD-1048] Rename originalOptions to originalLDBOptions.

* [NOD-1048] Add a comment.
2020-06-10 16:05:02 +03:00
Ori Newman
b6a6e577c4 [NOD-1013] Don't block handleBlockDAGNotification when calling peerNotifier (#749)
* [NOD-1013] Don't block handleBlockDAGNotification when calling peerNotifier

* [NOD-1013] Add comment
2020-06-09 12:12:18 +03:00
Mike Zak
84888221ae Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-08 12:23:33 +03:00
stasatdaglabs
222477b33e [NOD-1040] Don't remove DAG tips from the diffStore's loaded set (#750)
* [NOD-1040] Don't remove DAG tips from the diffStore's loaded set

* [NOD-1040] Remove a debug log.
2020-06-08 12:14:58 +03:00
Mike Zak
4a50d94633 Update to v0.4.1 2020-06-07 17:54:30 +03:00
stasatdaglabs
b4dba782fb [NOD-1040] Increase maxBlueScoreDifferenceToKeepLoaded to 1500 (#746)
* [NOD-1040] Don't remove DAG tips from the diffStore's loaded set

* [NOD-1040] Fix TestClearOldEntries.

* Revert "[NOD-1040] Fix TestClearOldEntries."

This reverts commit e0705814

* Revert "[NOD-1040] Don't remove DAG tips from the diffStore's loaded set"

This reverts commit d3eba1c1

* [NOD-1040] Increase maxBlueScoreDifferenceToKeepLoaded to 1500.
2020-06-07 17:50:57 +03:00
stasatdaglabs
9c78a797e4 [NOD-1041] Call outboundPeerConnected and outboundPeerConnectionFailed directly instead of routing them through peerHandler (#748)
* [NOD-1041] Fix a deadlock between connHandler and peerHandler.

* [NOD-1041] Simplified the fix.
2020-06-07 16:35:48 +03:00
Ori Newman
35c733a4c1 [NOD-970] Add isSyncing flag (#747)
* [NOD-970] Add isSyncing flag

* [NOD-970] Rename shouldSendSelectedTip->peerShouldSendSelectedTip
2020-06-07 16:31:17 +03:00
Mike Zak
e5810d023e Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-07 14:21:49 +03:00
stasatdaglabs
96930bd6ea [NOD-1039] Remove the call to SetGCPercent. (#745) 2020-06-07 09:19:28 +03:00
Mike Zak
e09ce32146 Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-04 15:11:40 +03:00
stasatdaglabs
d15c009b3c [NOD-1030] Disconnect from syncPeers that send orphan blocks (#744)
* [NOD-1030] Disconnect from syncPeers that send orphan blocks.

* [NOD-1030] Remove debug log.

* [NOD-1030] Remove unnecessary call to stopSyncFromPeer.
2020-06-04 15:11:05 +03:00
stasatdaglabs
95c8b8e9d8 [NOD-1023] Rename isCurrent/current to isSynced/synced (#742)
* [NOD-1023] Rename BlockDAG.isCurrent to isSynced.

* [NOD-1023] Rename SyncManager.current to synced.

* [NOD-1023] Fix comments.
2020-06-03 16:04:14 +03:00
stasatdaglabs
2d798a5611 [NOD-1020] Do send addr response to getaddr messages even if there aren't any addresses to send. (#740) 2020-06-01 14:09:18 +03:00
stasatdaglabs
3a22249be9 [NOD-1012] Fix erroneous partial node check (#739)
* [NOD-1012] Fix bad partial node check.

* [NOD-1012] Fix unit tests.
2020-05-31 14:13:30 +03:00
stasatdaglabs
a4c1898624 [NOD-1012] Disable subnetworks (#731)
* [NOD-1012] Disallow non-native/coinbase transactions.

* [NOD-1012] Fix logic error.

* [NOD-1012] Fix/skip tests and remove --subnetwork.

* [NOD-1012] Disconnect from non-native peers.

* [NOD-1012] Don't skip subnetwork tests.

* [NOD-1012] Use EnableNonNativeSubnetworks in peer.go.

* [NOD-1012] Set EnableNonNativeSubnetworks = true in the tests that need them rather than by default in Simnet.
2020-05-31 10:50:46 +03:00
Ori Newman
672f02490a [NOD-763] Change genesis version (#737) 2020-05-28 09:55:59 +03:00
stasatdaglabs
fc00275d9c [NOD-553] Get rid of base58 (#735)
* [NOD-553] Get rid of wif.

* [NOD-553] Get rid of base58.
2020-05-27 17:37:03 +03:00
Ori Newman
6219b93430 [NOD-1018] Exit after 2 minutes if graceful shutdown fails (#732)
* [NOD-1018] Exit after 2 minutes if graceful shutdown fails

* [NOD-1018] Change time.Tick to time.After
2020-05-25 14:30:43 +03:00
Ori Newman
3a4571d671 [NOD-965] Make LookupNode return boolean (#729)
* [NOD-965] Make dag.index.LookupNode return false if node is not found

* [NOD-965] Rename blockDAG->dag

* [NOD-965] Remove irrelevant test

* [NOD-965] Use bi.index's ok in LookupNode
2020-05-25 12:51:30 +03:00
Ori Newman
96052ac69a [NOD-809] Change fee rate to fee per megagram (#730) 2020-05-24 16:59:37 +03:00
Ori Newman
6463a4b5d0 [NOD-1011] Don't cache isSynced on getBlockTemplate (#728) 2020-05-20 14:38:24 +03:00
Svarog
0ca127853d [NOD-974] UTXO-Commitments shouldn't include the new block's transactions (#727)
* [NOD-975] Don't include block transactions inside its UTXO commitment (#711)

* [NOD-975] Don't include block transactions inside its UTXO commitment.

* Revert "[NOD-975] Don't include block transactions inside its UTXO commitment."

This reverts commit b1a2ae66

* [NOD-975] Implement a (currently failing) TestUTXOCommitment.

* [NOD-975] Remove the block's own transactions from calcMultiset.

* [NOD-975] Simplify calcMultiset.

* [NOD-975] Add a comment on top of selectedParentMultiset.

* [NOD-975] Use pastUTXO instead of selectedParentUTXO in calcMultiset.

* [NOD-975] Use selected parent's pastUTXO instead of this block's pastUTXO in calcMultiset.

* [NOD-975] Extract selectedParentPastUTXO to a separate function.

* [NOD-975] Remove selectedParentUTXO from pastUTXO's return values.

* [NOD-975] Add txs to TestUTXOCommitment.

* [NOD-975] Remove debug code.

* [NOD-975] In pastUTXOMultiSet, copy the multiset to avoid modifying the original.

* [NOD-975] Add a test: TestPastUTXOMultiSet.

* [NOD-975] Improve TestPastUTXOMultiSet.

* [NOD-976] Implement tests for UTXO commitments (#715)

* [NOD-975] Don't include block transactions inside its UTXO commitment.

* Revert "[NOD-975] Don't include block transactions inside its UTXO commitment."

This reverts commit b1a2ae66

* [NOD-975] Implement a (currently failing) TestUTXOCommitment.

* [NOD-975] Remove the block's own transactions from calcMultiset.

* [NOD-975] Simplify calcMultiset.

* [NOD-975] Add a comment on top of selectedParentMultiset.

* [NOD-975] Use pastUTXO instead of selectedParentUTXO in calcMultiset.

* [NOD-975] Use selected parent's pastUTXO instead of this block's pastUTXO in calcMultiset.

* [NOD-975] Extract selectedParentPastUTXO to a separate function.

* [NOD-975] Remove selectedParentUTXO from pastUTXO's return values.

* [NOD-975] Add txs to TestUTXOCommitment.

* [NOD-976] Generate new blockDB blocks for tests.

* [NOD-976] Fix TestBlueBlockWindow.

* [NOD-976] Fix TestIsKnownBlock.

* [NOD-976] Fix TestGHOSTDAG.

* [NOD-976] Fix TestUTXOCommitment.

* [NOD-976] Remove kaka.

* [NOD-990] Save utxo diffs of past UTXO (#724)

* [NOD-990] Save UTXO diffs of past UTXO

* [NOD-990] Check for block double spends with its past instead of building its UTXO

* [NOD-990] Call resetExtraNonceForTest in TestUTXOCommitment

* [NOD-990] Remove redundant functions diffFromTx and diffFromAcceptedTx

* [NOD-990] Rename i->j to avoid confusion

* [NOD-990] Break long lines

* [NOD-990] Rename ErrDoubleSpendsWithBlockTransaction -> ErrDoubleSpendInSameBlock

* [NOD-990] Make ErrDoubleSpendInSameBlock more detailed

* [NOD-990] Add testProcessBlockRuleError

* [NOD-990] Fix comment

* [NOD-990] Add test for duplicate transactions on the same block

* [NOD-990] Use pkg/errors on panic

* [NOD-990] Make cloneWithoutBase method

* [NOD-990] Break long lines

* [NOD-990] Fix comment

* [NOD-990] Fix wrong variable names

* [NOD-990] Fix comment

* [NOD-974] Generate new test blocks.

* [NOD-974] Fix TestIsKnownBlock and TestGHOSTDAG.

* [NOD-974] Fix TestUTXOCommitment.

* [NOD-974] Fix comments

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
Co-authored-by: stasatdaglabs <stas@daglabs.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-05-20 12:43:52 +03:00
stasatdaglabs
b884ba128e [NOD-1008] In utxoDiffStore, keep diffData in memory for blocks whose blueScore is at least virtualBlueScore - X (#726)
* [NOD-1008] Use *blockNode as keys in utxoDiffStore.loaded and .dirty.

* [NOD-1008] Implement clearOldEntries.

* [NOD-1008] Increase maxBlueScoreDifferenceToKeepLoaded to 100.

* [NOD-1008] Fix a typo.

* [NOD-1008] Add clearOldEntries to saveChangesFromBlock.

* [NOD-1008] Begin implementing TestClearOldEntries.

* [NOD-1008] Finish implementing TestClearOldEntries.

* [NOD-1008] Fix a comment.

* [NOD-1008] Rename diffDataByHash to diffDataByBlockNode.

* [NOD-1008] Use dag.TipHashes instead of tracking tips manually.
2020-05-20 10:47:01 +03:00
Svarog
fe25ea3d8c [NOD-1001] Make an error in Peer.start() stop the connection process from continuing. (#723)
* [NOD-1001] Move side-effects of connection out of OnVersion

* [NOD-1001] Make AssociateConnection synchronous

* [NOD-1001] Wait for 2 veracks in TestPeerListeners

* [NOD-1001] Made AssociateConnection return error

* [NOD-1001] Remove temporary logs

* [NOD-1001] Fix typos and find-and-replace errors

* [NOD-1001] Move example_test back out of peer package + fix some typos

* [NOD-1001] Use correct remote address in setupPeersWithConns and return to address string literals

* [NOD-1001] Use separate verack channels for inPeer and outPeer

* [NOD-1001] Make verack channels buffered

* [NOD-1001] Removed temporary sleep of 1 second

* [NOD-1001] Removed redundant //
2020-05-20 10:36:44 +03:00
stasatdaglabs
e0f587f599 [NOD-877] Separate UTXO header code to two fields in serialization: blue score and packed flags (#725)
* [NOD-877] In UTXOEntry serialization, extract packedFlags out to a separate Uint8.

* [NOD-877] Generate new test blocks.

* [NOD-877] Fix TestIsKnownBlock.

* [NOD-877] Fix TestBlueBlockWindow.

* [NOD-877] Fix TestUTXOSerialization and TestGHOSTDAG.

* [NOD-877] Fix TestVirtualBlock.
2020-05-19 17:56:07 +03:00
stasatdaglabs
e9e1ef4772 [NOD-1006] Make use of a pool to avoid excessive allocation of big.Ints (#722)
* [NOD-1006] Make CompactToBig take an out param so that we can reuse the same big.Int in averageTarget.

* [NOD-1006] Fix merge errors.

* [NOD-1006] Use CompactToBigWithDestination only in averageTarget.

* [NOD-1006] Fix refactor errors.

* [NOD-1006] Fix refactor errors.

* [NOD-1006] Optimize averageTarget with a big.Int pool.

* [NOD-1006] Defer releasing bigInts.

* [NOD-1006] Use a pool for requiredDifficulty as well.

* [NOD-1006] Move the big int pool to utils.

* [NOD-1006] Remove unnecessary line.
2020-05-19 16:29:21 +03:00
Svarog
eb8b841850 [NOD-1005] Use sm.isSynced to check whether should request blocks from invs (#721)
* [NOD-1005] Moved isSyncedForMining to netsync manager, and renamed to isSynced + removed isCurrent

* [NOD-1005] Use sm.isSynced to check whether should request blocks from invs

* [NOD-1005] Use private version of isSynced to avoid infinite loop

* [NOD-1005] Fix a few typos
2020-05-18 10:42:58 +03:00
Svarog
28681affda [NOD-994] Greatly increase the amount of logs kaspad keeps before rotating them away (#720)
* [NOD-994] Greatly increased the amount of logs kaspad keeps before rotating them away

* [NOD-994] Actually invcrease the log file

* [NOD-994] Update comments

* [NOD-994] Fix typo
2020-05-14 10:58:46 +03:00
Svarog
378f0b659a [NOD-993] Get rid of redundant error types + Use %+v when printing startup errors (#719)
* [NOD-993] Use %+v when printing errors

* [NOD-993] Get rid of AssertError

* [NOD-993] Made ruleError use github.com/pkg/errors

* [NOD-993] remove redundant TODO

* [NOD-993] remove redundant Comment

* [NOD-993] Removed DeploymentError
2020-05-13 17:27:53 +03:00
stasatdaglabs
35b943e04f [NOD-996] Disable kaspad logs in TestScripts (#718)
* [NOD-996] Disable kaspad logs in TestScripts.

* [NOD-996] Return the log level to its original state after TestScripts is done.
2020-05-13 15:57:30 +03:00
stasatdaglabs
65f75c17fc [NOD-982] Log message with level WARN when getting MsgReject (#717)
* [NOD-982] Log message with level WARN when getting MsgReject.

* [NOD-982] Fix wrong logLevel in Write and Writef.

* [NOD-982] Use Write and Writef inside Trace, Tracef, Debug, Debugf, etc...

* [NOD-982] Move peer message logging to a separate file.
2020-05-13 10:03:37 +03:00
stasatdaglabs
806eab817c [NOD-820] When the node isn't synced, make getBlockTemplate return a boolean isSynced instead of an error (#716)
* [NOD-820] Add IsSynced to GetBlockTemplateResult.

* [NOD-820] Add isSynced to the help file.

* [NOD-820] Add MineWhenNotSynced to the kaspaminer config.

* [NOD-820] Implement miner MineWhenNotSynced logic.

* [NOD-820] Fixed capitalization in an error message.
2020-05-12 15:08:24 +03:00
Ori Newman
585510d76c [NOD-847] Fix CIDR protection and prevent connecting to the same address twice (#714)
* [NOD-847] Fix CIDR protection and prevent connecting to the same address twice

* [NOD-847] Fix Tests

* [NOD-847] Add TestDuplicateOutboundConnections and TestSameOutboundGroupConnections

* [NOD-847] Fix TestRetryPermanent, TestNetworkFailure and wait 10 ms before restoring the previous active config

* [NOD-847] Add "is" before boolean methods

* [NOD-847] Fix Connect's lock

* [NOD-847] Make numAddressesInAddressManager an argument

* [NOD-847] Add teardown function for address manager

* [NOD-847] Add stack trace to ConnManager errors

* [NOD-847] Change emptyAddressManagerForTest->createEmptyAddressManagerForTest and fix typos

* [NOD-847] Fix wrong test name for addressManagerForTest

* [NOD-847] Change error message if New fails

* [NOD-847] Add new line on releaseAddress

* [NOD-847] Always try to reconnect on disconnect
2020-05-12 13:47:15 +03:00
Svarog
c8a381d5bb [NOD-981] Fixed error message when both --notls and --rpccert ommited (#713) 2020-05-06 13:05:48 +03:00
Ori Newman
3d04e6bded [NOD-943] Add acceptedBlockHashes to GetBlockVerboseResult (#708)
* [NOD-943] Add acceptedBlockHashes to GetBlockVerboseResult

* [NOD-943] Remove intermediate variables

* [NOD-943] Add block hash to error message

* [NOD-943] Change comment
2020-05-05 17:26:54 +03:00
Svarog
f8e851a6ed [NOD-968] Wrap all ldb errors with pkg/errors (#712) 2020-05-04 16:33:23 +03:00
stasatdaglabs
e70a615135 [NOD-872] Defer all currently undeferred unlocks in the database package (#706)
* [NOD-872] Defer unlocks in write.go.

* [NOD-872] Defer unlocks in rollback.go.

* [NOD-872] Defer unlocks in read.go.

* [NOD-872] Fix duplicate RUnlock.

* [NOD-872] Remove a redundant empty line.

* [NOD-872] Extract closeCurrentWriteCursorFile to a separate method.
2020-05-04 13:07:40 +03:00
Ori Newman
73ad0adf72 [NOD-913] Use sync rate in getBlockTemplate (#705)
* [NOD-913] Use sync rate in getBlockTemplate

* [NOD-913] Rename addBlockProcessTime->addBlockProcessTimestamp, maxDiff->maxTipAge

* [NOD-913] Pass maxDeviation as an argument

* [NOD-913] Change maxDeviation to +5%

* [NOD-913] Rename variables

* [NOD-913] Rename variables and functions and change comments

* [NOD-913] Split addBlockProcessingTimestamp
2020-05-04 09:09:23 +03:00
stasatdaglabs
5b74e51db1 [NOD-956] Increase K to 15. (#710) 2020-05-03 14:56:47 +03:00
stasatdaglabs
2e2492cc5d [NOD-849] Database tests (#695)
* [NOD-849] Cover ffldb/transaction with tests.

* [NOD-849] Cover cursor.go with tests.

* [NOD-849] Cover ldb/transaction with tests.

* [NOD-849] Cover location.go with tests.

* [NOD-849] Write TestFlatFileMultiFileRollback.

* [NOD-849] Fix merge errors.

* [NOD-849] Fix a comment.

* [NOD-849] Fix a comment.

* [NOD-849] Add a test that makes sure that files get deleted on rollback.

* [NOD-849] Add a test that makes sure that serializeLocation serialized to an expected value.

* [NOD-849] Improve TestFlatFileLocationDeserializationErrors.

* [NOD-849] Fix a copy+paste error.

* [NOD-849] Explain maxFileSize = 16.

* [NOD-849] Remove redundant RollbackUnlessClosed call.

* [NOD-849] Extract bucket to a variable in TestCursorSanity.

* [NOD-849] Rename TestKeyValueTransactionCommit to TestTransactionCommitForLevelDBMethods.

* [NOD-849] Extract prepareXXX into separate functions.

* [NOD-849] Simplify function calls in TestTransactionCloseErrors.

* [NOD-849] Extract validateCurrentCursorKeyAndValue to a separate function.

* [NOD-849] Add a comment over TestCursorSanity.

* [NOD-849] Add a comment over function in TestCursorCloseErrors.

* [NOD-849] Add a comment over function in TestTransactionCloseErrors.

* [NOD-849] Separate TestTransactionCloseErrors to TestTransactionCommitErrors and TestTransactionRollbackErrors.

* [NOD-849] Separate TestTransactionCloseErrors to TestTransactionCommitErrors and TestTransactionRollbackErrors.

* [NOD-849] Fix copy+paste error in comments.

* [NOD-849] Fix merge errors.

* [NOD-849] Merge TestTransactionCommitErrors and TestTransactionRollbackErrors into TestTransactionCloseErrors.

* [NOD-849] Move prepareDatabaseForTest into ffldb_test.go.

* [NOD-849] Add cursorKey to Value error messages in validateCurrentCursorKeyAndValue.
2020-05-03 12:19:09 +03:00
Ori Newman
2ef5c2cbac [NOD-915] Check if lockableFile underlying file is nil before closing it (#709) 2020-04-30 14:43:38 +03:00
Ori Newman
3c89e1f7b3 [NOD-952] Fix nil derefernce bug on outboundPeerConnectionFailed (#704) 2020-04-27 13:50:09 +03:00
stasatdaglabs
2910724b49 [NOD-934] Fix addresses not getting their retry attempt counter incremented if they fail to connect (#702)
* [NOD-934] Fix addresses not getting their retry attempt counter incremented if they fail to connect.

* [NOD-922] Inline parseNetAddress.

* [NOD-922] Fix debug logs.
2020-04-23 17:01:09 +03:00
stasatdaglabs
3af945692e [NOD-922] Panic from cursor Next and First (#703)
* [NOD-922] Panic in Cursor First and Next if the cursor is closed.

* [NOD-922] Fix broken tests.

* [NOD-922] Fix a comment.
2020-04-23 16:55:25 +03:00
stasatdaglabs
5fe9dae557 [NOD-863] Write interface tests for the new database (#697)
* [NOD-863] Write TestCursorNext.

* [NOD-863] Write TestCursorFirst.

* [NOD-863] Fix merge errors.

* [NOD-863] Add TestCursorSeek.

* [NOD-863] Add TestCursorCloseErrors.

* [NOD-863] Add TestCursorCloseFirstAndNext.

* [NOD-863] Add TestDataAccessorPut.

* [NOD-863] Add TestDataAccessorGet.

* [NOD-863] Add TestDataAccessorHas.

* [NOD-863] Add TestDatabaseDelete.

* [NOD-863] Add TestDatabaseAppendToStoreAndRetrieveFromStore.

* [NOD-863] Add TestTransactionAppendToStoreAndRetrieveFromStore.

* [NOD-863] Add TestTransactionDelete.

* [NOD-863] Add TestTransactionHas.

* [NOD-863] Add TestTransactionGet.

* [NOD-863] Add TestTransactionPut.

* [NOD-863] Move cursor tests to the bottom of interface_test.go.

* [NOD-863] Move interface_test.go to a database_test package.

* [NOD-863] Make each test in interface_test.go run for every database driver. Currently, only ffldb.

* [NOD-863] Make each cursor test in interface_test.go run for every database driver. Currently, only ffldb.

* [NOD-863] Split interface_test.go into separate files.

* [NOD-863] Rename interface_test.go to common_test.go.

* [NOD-863] Extract testForAllDatabaseTypes to a separate function.

* [NOD-863] Reorganize how test data gets added to the database.

* [NOD-863] Add explanations about testForAllDatabaseTypes.

* [NOD-863] Add tests that make sure that database changes don't affect previously opened transactions.

* [NOD-863] Extract databasePrepareFunc to a type alias.

* [NOD-863] Fix comments.

* [NOD-863] Add cursor exhaustion test to testCursorFirst.

* [NOD-863] Add cursor Next clause to testCursorSeek.

* [NOD-863] Add additional varification to testDatabasePut.

* [NOD-863] Add an additional verification into to testTransactionGet.

* [NOD-863] Add TestTransactionCommit.

* [NOD-863] Add TestTransactionRollback.

* [NOD-863] Add TestTransactionRollbackUnlessClosed.

* [NOD-863] Remove equals sign from databasePrepareFunc declaration.
2020-04-20 12:14:55 +03:00
Svarog
42c53ec3e2 [NOD-869] Add a print after os.Exit(1) to see if it is ever called (#701) 2020-04-16 16:08:32 +03:00
Ori Newman
291df8bfef [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer (#700)
* [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer

* [NOD-858] SetShouldSendBlockLocator(false) on OnBlockLocator

* [NOD-858] Rename shouldSendBlockLocator->wasBlockLocatorRequested

* [NOD-858] Move panic to shouldReplaceSyncPeer
2020-04-13 15:50:55 +03:00
Ori Newman
d015286f65 [NOD-909] Add tests for double spends (#694)
* [NOD-909] Add tests for double spends

* [NOD-909] Add prepareAndProcessBlock that gets parent hashes and transactions as argument

* [NOD-909] Use PrepareAndProcessBlockForTest where possible

* [NOD-909] Use more meaningful names

* [NOD-909] Change a comment

* [NOD-909] Fix comment

* [NOD-909] Fix comment
2020-04-13 12:28:59 +03:00
Ori Newman
fe91b4c878 [NOD-914] Make LevelDB.Cursor receive bucket instead of prefix (#696) 2020-04-12 09:25:40 +03:00
Ori Newman
7609c50641 [NOD-885] Use database.Key and database.Bucket instead of byte slices (#692)
* [NOD-885] Create database.Key type

* [NOD-885] Rename FullKey()->FullKeyBytes() and Key()->KeyBytes()

* [NOD-885] Make Key.String return a hex string

* [NOD-885] Rename key parts

* [NOD-885] Rename separator->bucketSeparator

* [NOD-885] Rename SuffixBytes->Suffix and PrefixBytes->Prefix

* [NOD-885] Change comments

* [NOD-885] Change key prefix to bucket

* [NOD-885] Don't use database.NewKey inside dbaccess

* [NOD-885] Fix nil bug in Bucket.Path()

* [NOD-885] Rename helpers.go -> keys.go

* [NOD-885] Unexport database.NewKey

* [NOD-885] Remove redundant code in Bucket.Path()
2020-04-08 12:12:21 +03:00
Ori Newman
df934990d7 [NOD-822] Don't return rule errors from utxoset code (#693)
* [NOD-822] Remove rule errors from the UTXO diff code

* [NOD-822] Rename applyTransactions -> applyAndVerifyBlockTransactionsToPastUTXO

* [NOD-822] Fix comment
2020-04-07 12:45:12 +03:00
stasatdaglabs
3c4a80f16d [NOD-899] Inside the database, in case we're out of disk space, panic without printing the stack trace (#691)
* [NOD-899] Inside the database, in case we're out of disk space, panic without printing the stack trace.

* [NOD-899] Fix bad variable name.

* [NOD-899] Reduce code duplication.
2020-04-06 16:00:48 +03:00
stasatdaglabs
a31139d4a5 [NOD-895] Break down initDAGState to sub-routines (#690) 2020-04-06 11:08:57 +03:00
Mike Zak
6da3606721 Update to version 0.4.0 2020-04-05 16:23:01 +03:00
Ori Newman
bfbc72724d [NOD-873] Reuse allocated space when updating the UTXO set in database (#688) 2020-04-05 11:46:16 +03:00
stasatdaglabs
956b6f7d95 [NOD-900] Fix bad key in Seek (#687)
* [NOD-900] Fix Seek not working at expected.

* [NOD-900] Wrap error messages.

* [NOD-900] Use ldbIterator.Key instead of LevelDBCursor.Key.

* [NOD-900] Add a comment.
2020-04-02 17:47:51 +03:00
stasatdaglabs
c1a039de3f [NOD-900] Fix Seek not working as expected (#686)
* [NOD-900] Fix Seek not working at expected.

* [NOD-900] Wrap error messages.
2020-04-02 17:05:58 +03:00
stasatdaglabs
f8b18e09d6 [NOD-805] Redesign the database (#685)
* [NOD-828] Reimplement FFLDB (#663)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-887] Add a couple of QoL features to Cursor (#674)

* [NOD-887] Changed First to not return an error.

* [NOD-887] Fix merge error.

* [NOD-887] Make Cursor.Key not return the entire key path.

* [NOD-888] Add RollbackUnlessClosed to Context (#676)

* [NOD-888] Add RollbackUnlessClosed to Context.

* [NOD-888] Fix copy+paste error.

* [NOD-889] Instead of returning a boolean for not-found, return an error (#677)

* [NOD-889] Instead of returning a boolean for not-found, return an error.

* [NOD-889] Wrapped ErrNotFound for Get calls with nicer error messages.

* [NOD-889] Fix format.

* [NOD-889] Fix double space in a comment.

* [NOD-889] Add IsNotFoundError to dbaccess.

* [NOD-862] Replace calls to Tx.StoreBlock, Tx.HasBlock, Tx.FetchBlock with appropriate calls in dbaccess (#672)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-862] Use dbaccess.HasBlock instead of Tx.HasBlock in initDAGState.

* [NOD-862] Use dbaccess.StoreBlock instead of dbStoreBlock.

* [NOD-862] Use dbaccess.FetchBlock instead of various block fetching mechanisms.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Implement a dbaccess.BlockNode struct.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* [NOD-828] Implement toDBBlockNode and fromDBBlockNode.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Implement storing index blocks.

* [NOD-862] Use database transactions where appropriate.

* [NOD-862] Fix tests failing on DAGSetup.

* [NOD-862] Fix bad make call.

* [NOD-862] Fix remaining database opening problems in tests.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Iterate over the new block index in dagio.

* [NOD-862] Fix block index key.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-862] Pass byte slices to dbaccess instead of objects.

* [NOD-862] Fix errors.

* [NOD-862] Fix maybeAcceptBlock not checking block existence.

* [NOD-862] Fix TestAcceptanceIndexRecover.

* [NOD-862] Add comments to StoreBlockIndex and BlockIndexCursor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-862] Fix merge errors.

* [NOD-862] Fix DAGSetup using bad temp directories.

* [NOD-862] Fix TestProcessDelayedBlocks not closing the database properly.

* [NOD-862] Fix merge errors.

* [NOD-862] Merge flushToDBWithContext and flushToDB.

* [NOD-862] Remove TODO.

* [NOD-862] Add prefix to the temp dir in DAGSetup.

* [NOD-862] Bring back dbFetchBlockByHash.

* [NOD-862] Use BlockDAG.BlockByHash in p2p and rpc.

* [NOD-862] Use daghash.Hash in dbaccess.

* [NOD-862] Add defer to RollbackUnlessClosed after NewTx().

* [NOD-862] Extract dbStoreBlock to a separate function.

* [NOD-862] Fix grammar in comment.

* [NOD-862] Fix merge errors.

* [NOD-867] Migrate database logic in blockdag/dagio.go to dbaccess (#675)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-862] Use dbaccess.HasBlock instead of Tx.HasBlock in initDAGState.

* [NOD-862] Use dbaccess.StoreBlock instead of dbStoreBlock.

* [NOD-862] Use dbaccess.FetchBlock instead of various block fetching mechanisms.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Implement a dbaccess.BlockNode struct.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* [NOD-828] Implement toDBBlockNode and fromDBBlockNode.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Implement storing index blocks.

* [NOD-862] Use database transactions where appropriate.

* [NOD-862] Fix tests failing on DAGSetup.

* [NOD-862] Fix bad make call.

* [NOD-862] Fix remaining database opening problems in tests.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Iterate over the new block index in dagio.

* [NOD-862] Fix block index key.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-862] Pass byte slices to dbaccess instead of objects.

* [NOD-862] Fix errors.

* [NOD-862] Fix maybeAcceptBlock not checking block existence.

* [NOD-862] Fix TestAcceptanceIndexRecover.

* [NOD-862] Add comments to StoreBlockIndex and BlockIndexCursor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-862] Fix merge errors.

* [NOD-862] Fix DAGSetup using bad temp directories.

* [NOD-862] Fix TestProcessDelayedBlocks not closing the database properly.

* [NOD-867] Remove blockIndexBucket from dagio.

* [NOD-867] Fix wrong key in StoreIndexBucket.

* [NOD-867] Migrate DAG state to dbaccess.

* [NOD-867] Remove utxoSetVersionKeyName.

* [NOD-862] Fix merge errors.

* [NOD-867] Move localSubnetworkID into dagState.

* [NOD-867] Fix a comment.

* [NOD-867] Remove an unused function.

* [NOD-867] Migrate the database's UTXO set to dbaccess.

* [NOD-867] Add missing error check.

* [NOD-867] Changed First to not return an error.

* [NOD-867] Make Cursor.Key not return the entire key path.

* [NOD-887] Fix the comment above BlockIndexCursorFrom.

* [NOD-862] Merge flushToDBWithContext and flushToDB.

* [NOD-862] Remove TODO.

* [NOD-862] Add prefix to the temp dir in DAGSetup.

* [NOD-862] Bring back dbFetchBlockByHash.

* [NOD-862] Use BlockDAG.BlockByHash in p2p and rpc.

* [NOD-862] Use daghash.Hash in dbaccess.

* [NOD-862] Add defer to RollbackUnlessClosed after NewTx().

* [NOD-862] Extract dbStoreBlock to a separate function.

* [NOD-867] Remove TODOs.

* [NOD-867] Fix merge errors.

* [NOD-867] Fix comments and errors.

* [NOD-867] Unexport blockIndexKey.

* [NOD-867] Fix merge errors.

* [NOD-867] Move a misplaced comment.

* [NOD-867] Fix an error message.

* [NOD-867] Remove preallocation in initDAGState.

* [NOD-866] Migrate database logic in blockdag/indexers package to dbaccess (#682)

* [NOD-865] Delete blockidhash.go.

* [NOD-865] Remove a lot of no-longer relevant logic from indexers.

* [NOD-865] Pass TxContext to ConnectBlock.

* [NOD-865] Migrate the acceptance index to dbaccess.

* [NOD-865] Fix a block not being sent to ConnectBlock.

* [NOD-865] Pass the block's hash instead of the whole block.

* [NOD-865] Add forgotten Commit call.

* [NOD-865] Add comments.

* [NOD-866] Fix a comment.

* [NOD-866] Fix a comment.

* [NOD-866] Remove pointless indirection in acceptanceindex.

* [NOD-866] Fix comment over ForEachHash.

* [NOD-866] Rename ClearAcceptanceIndex to DropAcceptanceIndex.

* [NOD-866] Explain collecting keys before deleting them.

* [NOD-865] Move misc db logic to db access (#681)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-865] Move fee data db operations to dbaccess

* [NOD-865] Move reachability data db operations to dbaccess

* [NOD-865] Move UTXO diff data db operations to dbaccess

* [NOD-865] Move subnetwork data db operations to dbaccess

* [NOD-865] Fix createDAGState

* [NOD-865] Remove old Get signature with "exists"

* [NOD-865] Move multiset db operations to dbaccess

* [NOD-865] Use dbaccess transactions where possible

* [NOD-865] Remove old Get signature with "exists"

* [NOD-881] Recover TestGHOSTDAGErrors

* [NOD-865] Create function for db keys

* [NOD-865] Change Exists to Has, and use accessor.Has where possible

* [NOD-865] Make ClearReachabilityData transactive

* [NOD-865] Don't iterate cursors while changing db data

* [NOD-865] Rename RegisterSubnetwork -> StoreSubnetwork

* [NOD-865] Change bucket from utxodiffs to utxo-diffs

* [NOD-865] Rename SubnetworkExists->HasSubnetwork

* [NOD-865] Change a comment

* [NOD-865] Fix tests

* [NOD-865] Fix comment

* [NOD-865] Remove the prefix "db" from some functions

* [NOD-865] Remove redundant comments

* [NOD-865] Make clearBucket function

* [NOD-865] Make clear functions get a dbTx as an arg

* [NOD-865] Remove erroneous tx commit

Co-authored-by: stasatdaglabs <stas@daglabs.com>

* [NOD-868] Delete the old database package (#683)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-865] Move fee data db operations to dbaccess

* [NOD-865] Move reachability data db operations to dbaccess

* [NOD-865] Move UTXO diff data db operations to dbaccess

* [NOD-865] Move subnetwork data db operations to dbaccess

* [NOD-865] Fix createDAGState

* [NOD-865] Remove old Get signature with "exists"

* [NOD-865] Move multiset db operations to dbaccess

* [NOD-865] Use dbaccess transactions where possible

* [NOD-865] Remove old Get signature with "exists"

* [NOD-881] Recover TestGHOSTDAGErrors

* [NOD-865] Create function for db keys

* [NOD-865] Change Exists to Has, and use accessor.Has where possible

* [NOD-865] Make ClearReachabilityData transactive

* [NOD-865] Don't iterate cursors while changing db data

* [NOD-865] Rename RegisterSubnetwork -> StoreSubnetwork

* [NOD-865] Change bucket from utxodiffs to utxo-diffs

* [NOD-865] Rename SubnetworkExists->HasSubnetwork

* [NOD-865] Change a comment

* [NOD-868] Remove all tests from old database.

* [NOD-868] Remove all unused methods from the old database's interfaces.

* [NOD-865] Fix tests

* [NOD-868] Remove references to DB.

* [NOD-865] Fix comment

* [NOD-868] Remove the old ffldb besides the interface and errors.go.

* [NOD-868] Remove errors.go.

* [NOD-868] Remove the old database package.

* [NOD-868] Add openDB to DAGSetup to emulate the old dbpath in dag.config.

* [NOD-868] Rename database2 to database.

* [NOD-868] Use NewTx instead of NoTx where required.

* [NOD-868] Fix merge errors.

* [NOD-868] Rename dbXXX functions to just xxx.

* [NOD-868] Rename putDAGState to saveDAGState.

* [NOD-868] Replace comments in initDAGState with logs.

* [NOD-868] Explain the openDB parameter in DAGSetup.

* [NOD-868] Fixup doc.go and README.md.

* [NOD-868] Remove pointless transactions.

Co-authored-by: Ori Newman <orinewman1@gmail.com>

* [NOD-805] Fix merge errors.

* [NOD-805] Fix a comment.

* [NOD-805] Don't return virtualTxsAcceptanceData from applyDAGChanges.

* [NOD-805] Add missing error handling in TestAcceptanceDataIndexRecover.

* [NOD-805] Rename blockDAG to dag in indexers/manager.go.

* [NOD-805] Defer cursor.Close() everywhere.

* [NOD-805] Rename scanFlatFiles to findCurrentLocation.

* [NOD-805] Extract crc32ChecksumLength and dataLengthLength to constants.

* [NOD-805] Handle open files properly in rollback.go.

* [NOD-805] Remove unnecessary func wrapper.

* [NOD-805] Remove unnecessary trimming in initialize.

* [NOD-805] Made StoreBlock accept only TxContext.

* [NOD-805] Changed the log level of an error message to Error.

* [NOD-805] Add a note about holding mutexes over deleteFile.

* [NOD-805] Remove a false comment.

* [NOD-805] Fix a comment.

* [NOD-805] Rename blk to block.

* [NOD-805] Extract utxoKey to a separate function.

* [NOD-805] Move dbaccess.xxxKey functions to the tops of their respective files.

* [NOD-805] Fix grammar in dbaccess/db.go.

* [NOD-805] Wrap a failed database corruption recovery error.

* [NOD-805] Split lines with WithStack in them.

* [NOD-805] Fix the comment over initialize.

* [NOD-805] Rename ffdb to flatFileDB and ldb to levelDB.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a comment.

* [NOD-805] Use s.writeCursor instead of cursor.

* [NOD-805] Embed file in lockableFile.

* [NOD-805] the the -> the

* [NOD-805] openDB -> db

* [NOD-805] Use TxContext in all flushToDB functions.

* [NOD-805] Rename context -> dbContext.

* [NOD-805] Reword the comment at the beginning on initDAGState.

* [NOD-805] Explain cursor key trimming.

* [NOD-805] Remove Error from Cursor.

* [NOD-805] Return ErrNotFound from done Cursor Key and Value.

* [NOD-805] Add missing error handling.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a variable name.

* [NOD-805] Remove pointless underscore.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a variable name.

Co-authored-by: Mike Zak <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-04-02 13:56:32 +03:00
Ori Newman
b20a7a679b [NOD-874] If the node is not current call sm.restartSyncIfNeeded() on handleInvMsg (#684)
* [NOD-874] If the node is not current call sm.restartSyncIfNeeded() on handleInvMsg

* [NOD-874] Check haveUnknownInvBlock before restartSyncIfNeeded

* [NOD-874] Fix comment

* [NOD-874] Fix comment

* [NOD-874] Fix comment
2020-04-01 12:56:10 +03:00
Ori Newman
36d866375e [NOD-881] Don't recalculate subtreesize for children (#678)
* [NOD-881] Don't recalculate subtreesize for children

* [NOD-881] Make BenchmarkReindexInterval clearer

* [NOD-881] Use b.ResetTimer

* [NOD-881] Fix BenchmarkReindexInterval to use b.N
2020-03-31 12:43:02 +03:00
Svarog
024edc30a3 [NOD-857] Add generalized profiler package and use it everwhere (#679)
* [NOD-857] Add generalized profiler package and use it everwhere

* [NOD-857] Dependency-inject log into profiling.Start()
2020-03-31 12:41:21 +03:00
Ori Newman
6aa5e0b5a8 [NOD-882] Remove ecc and hdkeychain (#680)
* [NOD-882] Remove ecc and hdkeychain

* [NOD-882] Remove HDCoinType from dagParams
2020-03-31 10:58:11 +03:00
Mike Zak
1a38550fdd Update to version 0.3.0 2020-03-29 14:15:17 +03:00
stasatdaglabs
3e7ebb5a84 [NOD-861] Get rid of dbtool/fetchblockregion.go. (#667) 2020-03-29 12:47:13 +03:00
Svarog
4bca7342d3 [NOD-883] Fix dockerfile in kaspaminer + set real version for go-libsecp256k1 (#673) 2020-03-26 17:50:09 +02:00
Elichai Turkel
f80908fb4e [NOD-876] Replace ecc with go-secp256k1 for public keys (#670)
* Replace ecc with go-secp256k1 in txscript

* Replace ecc with go-secp256k1 in util and cmd

* Replace ecc.Multiset with secp256k1.MultiSet
2020-03-26 17:03:39 +02:00
stasatdaglabs
e000e10738 [NOD-880] Remove CGO_ENABLED=0 from Dockerfile. (#671) 2020-03-26 14:02:57 +02:00
Ori Newman
d83862f36c [NOD-855] Save ECMH for block utxo and not diff utxo (#669)
* [NOD-855] Save ECMH for each block UTXO

* [NOD-855] Remove UpdateExtraNonce method

* [NOD-855] Remove multiset data from UTXO diffs

* [NOD-855] Fix to fetch multiset of selected parent

* [NOD-855] Don't remove coinbase inputs from multiset

* [NOD-855] Create multisetBucketName on startup

* [NOD-855] Remove multiset from UTXO diff tests

* [NOD-855] clear new entries from multisetstore on saveChangesFromBlock

* [NOD-855] Fix tests

* [NOD-855] Use UnacceptedBlueScore when adding current block transactions to multiset

* [NOD-855] Hash utxo before adding it to multiset

* [NOD-855] Pass isCoinbase to NewUTXOEntry

* [NOD-855] Do not use hash when adding entries to multiset

* [NOD-855] When calculating multiset, replace the unaccepted blue score of selected parent transaction with the block blue score

* [NOD-855] Manually add a chained transaction to a block in TestChainedTransactions

* [NOD-855] Change name and comments

* [NOD-855] Use FindAcceptanceData to find a specific block acceptance data

* [NOD-855] Remove redundant copy of txIn.PreviousOutpoint

* [NOD-855] Use fmt.Sprintf when creating internalRPCError
2020-03-26 13:06:12 +02:00
Svarog
1020402b34 [NOD-869] Close panicHandlerDone instead of sending an empty struct + use time.After instead of time.Tick (#668) 2020-03-25 16:14:08 +02:00
Mike Zak
bc6ce6ed53 Update version to v0.2.0 2020-03-25 11:51:14 +02:00
Ori Newman
d3b1953deb [NOD-848] optimize utxo diffs serialize allocations (#666)
* [NOD-848] Optimize allocations when serializing UTXO diffs

* [NOD-848] Use same UTXO serialization everywhere, and use compression as well

* [NOD-848] Fix usage of wrong buffer

* [NOD-848] Fix tests

* [NOD-848] Fix wire tests

* [NOD-848] Fix tests

* [NOD-848] Remove VLQ

* [NOD-848] Fix comments

* [NOD-848] Add varint for big endian encoding

* [NOD-848] In TestVarIntWire, assume the expected decoded value is the same as the serialization input

* [NOD-848] Serialize outpoint index with big endian varint

* [NOD-848] Remove p2pk from compression support

* [NOD-848] Fix comments

* [NOD-848] Remove p2pk from decompression support

* [NOD-848] Make entry compression optional

* [NOD-848] Fix tests

* [NOD-848] Fix comments and var names

* [NOD-848] Remove UTXO compression

* [NOD-848] Fix tests

* [NOD-848] Remove big endian varint

* [NOD-848] Fix comments

* [NOD-848] Rename ReadVarIntLittleEndian->ReadVarInt and fix WriteVarInt comment

* [NOD-848] Add outpointIndexByteOrder variable

* [NOD-848] Remove redundant comment

* [NOD-848] Fix outpointMaxSerializeSize to the correct value

* [NOD-848] Move subBuffer to utils
2020-03-24 16:44:41 +02:00
Svarog
3c67215e76 [NOD-796] Upgrade to go 1.14 (#665) 2020-03-22 14:50:13 +02:00
Svarog
586624c836 [NOD-853] Add profiler server to kaspaminer (#664) 2020-03-19 17:19:31 +02:00
Svarog
49855e6333 [NOD-823] Use WithDiffInPlace for the implementation of WithDiff (#657)
* [NOD-823] Use WithDiffInPlace for the implementation of WithDiff

* [NOD-823] Unexport withDiffInPlace
2020-03-17 11:19:02 +02:00
Ori Newman
624249c0f3 [NOD-842] Use flushToDB with the same transaction as everything else in saveChangesFromBlock and never ignore flushToDB errors (#662) 2020-03-16 11:05:17 +02:00
Ori Newman
1cf443a63b [NOD-841] Fix tests to not be dependent on block rate (#661)
* [NOD-841] Fix TestDifficulty

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Fix TestCheckBlockSanity

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Shorten long lines
2020-03-15 18:08:03 +02:00
Ori Newman
8909679f44 [NOD-818] Remove time adjustment (#658)
* [NOD-818] Remove time adjustment

* [NOD-818] Remove interface ensuring and copyright message

* [NOD-818] Update comment
2020-03-15 17:37:01 +02:00
Ori Newman
e58efbf0ea [NOD-839] Panic from non-rule error from ProcessBlock (#660) 2020-03-15 17:26:53 +02:00
Ori Newman
34fb066590 [NOD-518] Implement getmempoolentry (#656) 2020-03-12 16:00:18 +02:00
stasatdaglabs
299826f392 [NOD-827] Get rid of dbtools insecureimport.go and loadheaders.go (#655)
* [NOD-827] Get rid of dbtools insecureimport.go and loadheaders.go

* [NOD-827] Remove commands from realMain().
2020-03-10 16:31:13 +02:00
stasatdaglabs
3d8dd8724d [NOD-816] Remove TxIndex and AddrIndex (#653)
* [NOD-816] Remove TxIndex.

* [NOD-816] Remove AddrIndex.

* [NOD-816] Remove mentions of TxIndex and AddrIndex.

* [NOD-816] Remove mentions of getrawtransaction.

* [NOD-816] Remove mentions of searchrawtransaction.

* [NOD-816] Remove cmd/addsubnetwork.

* [NOD-816] Fix a comment.

* [NOD-816] Fix a comment.

* [NOD-816] Implement BlockDAG.TxConfirmations.

* [NOD-816] Return confirmations in getTxOut.

* [NOD-816] Rename TxConfirmations to UTXOConfirmations.

* [NOD-816] Rename txConfirmations to utxoConfirmations.

* [NOD-816] Fix capitalization in variable names.

* [NOD-816] Add acceptance index to addblock.

* [NOD-816] Get rid of txrawresult-confirmations.

* [NOD-816] Fix config flag.
2020-03-10 16:09:31 +02:00
Svarog
b8a00f7519 [NOD-778] Optimize RestoreUTXO (#652)
* [NOD-778] Add WithDiffInPlace

* [NOD-778] Fix bug in WithDiffInPlace

* [NOD-778] Add comment to WithDiffInPlace

* [NOD-778] Add double dag.restoreUTXO to benchmark, to remove time for hard-disk loading

* [NOD-778] Also test WithDiffInPlace in TestUTXODiffRules

* [NOD-778] Add tests for all cases possible in TestUTXODiffRules

* [NOD-778] Fix test-case 'first in toAdd in this, second in toRemove in this and toAdd in other'

* [NOD-778] Fixed in WithDiffInPlace

* [NOD-778] Update error messages when diffFrom(withDiffResult) fails in TestUTXODiffRules

* [NOD-778] diffFrom: disallow utxos both in d.toAdd, other.toAdd, and only one of d.toRemove and other.toRemove

* [NOD-778] Fix expected value in 'first in toRemove in this, second in toRemove in other'

* [NOD-778] diffFrom: Disallow situations where utxo both in d.toRemove and other.toRemove with different blue scores and no corresponding utxo in d.toAdd

* [NOD-778] WithDiff: Fix faulty logic that allows updates to blue scores

* [NOD-778] Fix WithDiffInPlace to pass all tests

* [NOD-778] Deleted temporary prints

* [NOD-778] Sorted TestUTXODiffRules tests according to spreadsheet

* [NOD-778] Delete deeputxo_test.go

* [NOD-778] Updated comments

* [NOD-778] Re-order

* [NOD-778] Re-order test-cases to be according to spreadsheet

* [NOD-778] Simplified case when both d.toRemove and other.toRemove have the same outpoint in diffFrom

* [NOD-778] Change a few error messages that say 'transaction' instead of 'outpoint'

* [NOD-778] Rename: utxoToAdd/Remove -> entryToAdd/Remove

* [NOD-788] Remove redundant else

* [NOD-778] Rename: existingUTXO -> existingEntry + remove redundant else

* [NOD-778] Correct test name
2020-03-10 15:32:19 +02:00
stasatdaglabs
4dfc8cf5b0 [NOD-816] Remove addsubnetwork. (#654) 2020-03-10 11:09:33 +02:00
Ori Newman
5a99e4d2f3 [NOD-806] Exit early after panic (#650)
* [NOD-806] After panic, gracefully stop logs, and then exit immediately

* [NOD-806] Convert non-kaspad applications to use the new spawn

* [NOD-806] Fix disabled log at rpcclient

* [NOD-806] Refactor HandlePanic

* [NOD-806] Cancel Logger interface

* [NOD-806] Remove redundant spawn checks from waitgroup_test.go

* [NOD-806] Use caller subsystem when logging panics

* [NOD-806] Fix go vet errors
2020-03-08 11:24:37 +02:00
Svarog
606cd668ff [NOD-810] Fix error text in lookupParentNodes (#651) 2020-03-05 15:49:36 +02:00
Ori Newman
dd537f5143 [NOD-808] Use syndtr/goleveldb instead of btcsuite/goleveldb. (#649) 2020-03-05 12:26:48 +02:00
stasatdaglabs
a1c631be62 [NOD-798] Disconnect from a peer if a block received from it gets rejected (#648)
* [NOD-798] Disconnect from a peer if its block gets rejected.

* [NOD-798] Make a comment less ambiguous.
2020-03-03 09:47:22 +02:00
Ori Newman
707a728656 [NOD-552] Add NormalizeRPCServerAddress and use it where needed (#643)
* [NOD-552] Add NormalizeRPCServerAddress and use it where needed

* [NOD-552] Make NormalizeAddress return an error for an invalid address

* [NOD-552] Use longer lines for a comment
2020-03-01 16:37:26 +02:00
stasatdaglabs
80b5631a48 [NOD-726] Only print "no sync peer" message when not current (#646)
* [NOD-726] Only print "no sync peer" message when not current.

* [NOD-726] Shorten duration in which "no sync peer" messages would not print.
2020-02-27 17:38:39 +02:00
Ori Newman
2373965551 [NOD-576] Rename NextHashes to ChildHashes in GetBlock/GetBlockHeaders rpc call (#645)
* [NOD-576] Rename NextHashes to ChildHashes in GetBlock/GetBlockHeaders rpc call

* [NOD-576] Fix typo
2020-02-27 17:34:38 +02:00
Ori Newman
65cbb6655b [NOD-661] Change BCDB subsystem tag (for logs) to KSDB (#644) 2020-02-27 17:30:08 +02:00
Ori Newman
cdd96d0670 [NOD-664] Remove version from everything inside kaspad/cmd - use kaspad version instead (#642)
* [NOD-664] Remove version from everything inside kaspad/cmd - use kaspad version instead

* [NOD-664] Fix broken import
2020-02-27 13:26:22 +02:00
Dan Aharoni
ad04bbde83 [NOD-782] Make sure errors.As gets parameter that implements error interface (#641)
* [NOD-782] Make sure errors.As gets parameter that implements error interface.

* [NOD-782] Pass pointer to errors.As
2020-02-27 12:27:38 +02:00
Ori Newman
5374d95416 [NOD-656] Log hashrate in kaspaminer (#632)
* [NOD-656] Log hashrate in kaspaminer

* [NOD-656] Measure hash rate in kilohashes

* [NOD-656] Show hash rate once in 10 seconds

* [NOD-656] Put hash rate logic in a separate function

* [NOD-656] Create logHashRateInterval constant
2020-02-24 11:59:02 +02:00
Ori Newman
de9aa39cc5 [NOD-721] Add defers (#638)
* [NOD-721] Defer unlocks

* [NOD-721] Add functions with locks to rpcmodel

* [NOD-721] Defer unlocks

* [NOD-721] Add filterDataWithLock function

* [NOD-721] Defer unlocks

* [NOD-721] Defer .Close()

* [NOD-721] Fix access to wsc.filterData without a lock

* [NOD-721] De-anonymize some anonymous functions

* [NOD-721] Remove redundant assignments

* [NOD-721] Remove redundant assignments

* [NOD-721] Remove redundant assignments

* [NOD-721] Get rid of submitOld, and break handleGetBlockTemplateLongPoll to smaller functions

* [NOD-721] Rename existsUnspentOutpoint->existsUnspentOutpointNoLock, existsUnspentOutpointWithLock->existsUnspentOutpoint

* [NOD-721] Rename filterDataWithLock->FilterData

* [NOD-721] Fixed comments
2020-02-24 09:19:44 +02:00
Ori Newman
98987f4a8f [NOD-603] Update validateParents to use reachability (#640)
* [NOD-603] Update validateParents to use reachability

* [NOD-603] Break a long line

* [NOD-721] Remove redundant check if block parent is a tip
2020-02-24 08:59:12 +02:00
Ori Newman
9745f31b69 [NOD-693] Update link to license (#639) 2020-02-20 17:12:53 +02:00
Ori Newman
ee08531a52 [NOD-610] Rename newSet->newBlockSet and setFromSlice->blockSetFromSlice (#635) 2020-02-20 16:19:28 +02:00
stasatdaglabs
61baf7b260 [NOD-769] Add a log for when a reachability reindex occurs (#637)
*  [NOD-719] Added defers to unlocks  (#618)

* [NOD-719] Added defers to unlocks

* [NOD-719] Added another defer to another Unlock

* [NOD-719] Added yet another defer to yet another Unlock

* [NOD-747] Change FinalityInterval to be 24 hours, isCurrent to be true if the DAG's time is less than 12 hours than the present, and change MaxInvPerMsg to be 1 << 17 (#625)

* [NOD-769] Add a log for when a reachability reindex occurs.

Co-authored-by: Svarog <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-02-19 13:39:45 +02:00
Ori Newman
650e4f735e [NOD-757] Readd addrmanager tests (#628) 2020-02-18 18:12:19 +02:00
Dan Aharoni
550b12b041 [NOD-772] Fix a bug where we ignore the return value of forAllOutboundPeers. (#636) 2020-02-18 18:02:15 +02:00
Ori Newman
a4bb070722 [NOD-754] Fix staticcheck errors (#627)
* [NOD-754] Fix staticcheck errors

* [NOD-754] Remove some unused exported functions

* [NOD-754] Fix staticcheck errors

* [NOD-754] Don't panic if out/in close fails

* [NOD-754] Wrap outside errors with custom message
2020-02-18 16:56:38 +02:00
Ori Newman
30fe0c279b [NOD-738] Move rpcmodel helper functions to pointers package (#629)
* [NOD-738] Move rpcmodel helper functions to copytopointer package

* [NOD-738] Rename copytopointer->pointers
2020-02-18 14:06:34 +02:00
stasatdaglabs
e405dd5981 [NOD-694] Fix requesting blocks that will surely be orphaned during netsync. (#630) 2020-02-18 12:12:34 +02:00
stasatdaglabs
243b4b8021 [NOD-765] Fix database corruption after restart in reachabilitystore and utxodiffstore. (#634) 2020-02-18 12:04:50 +02:00
Ori Newman
dd4c93e1ef [NOD-759] Merge v0.1.1-dev into v0.1.2-dev (#633)
*  [NOD-719] Added defers to unlocks  (#618)

* [NOD-719] Added defers to unlocks

* [NOD-719] Added another defer to another Unlock

* [NOD-719] Added yet another defer to yet another Unlock

* [NOD-747] Change FinalityInterval to be 24 hours, isCurrent to be true if the DAG's time is less than 12 hours than the present, and change MaxInvPerMsg to be 1 << 17 (#625)
2020-02-18 11:02:25 +02:00
Ori Newman
a07335d74d [NOD-737] Remove btc prefix from util file names (#631) 2020-02-17 13:11:24 +02:00
Dan Aharoni
7567cd4cb9 [NOD-744] Wrap go routines with spawn (#626)
* [NOD-744] Wrap go routines with spawn

* [NOD-747] Wrap some more go routines with spawn

* [NOD-744] Some more missing go routines

* [NOD-744] Break lines so make code more readable

* [NOD-744] Declare a local scope variable so the func would use it.

* [NOD-744] Fix type and update comment.

* [NOD-744] Declare local var so go routine would use it

* [NOD-744] Rename variable, use normal assignment;

* [NOD-744] Rename variable.
2020-02-13 13:10:07 +02:00
stasatdaglabs
51ff9e2562 [NOD-571] Cover ghostdag in tests where possible (#613)
* [NOD-571] Cover reachabilityInterval split methods.

* [NOD-571] Cover reindexInterval.

* [NOD-571] Cover reachability String() methods.

* [NOD-571] Cover blueAnticoneSize.

* [NOD-571] Remove unnecessary error from setTreeNode.

* [NOD-571] Add TestGHOSTDAGErrors.

* [NOD-571] Use PrepareBlockForTest in TestBlueAnticoneSizeErrors.

* [NOD-571] Use PrepareBlockForTest in TestGHOSTDAGErrors.

* [NOD-571] Add substring checks to TestSplitFractionErrors.

* [NOD-571] Add substring checks to TestSplitExactErrors and TestSplitWithExponentialBiasErrors.

* [NOD-571] Add comments to TestReindexIntervalErrors.

* [NOD-571] Add additional info in some error messages.

* [NOD-571] Fix error messages.
2020-02-09 11:27:10 +02:00
stasatdaglabs
5b8ab63890 [NOD-717] Fix nodes getting stuck in an infinite loop in addrManager.getAddress (#624)
* [NOD-717] Fix nodes getting stuck in an infinite loop in addrManager.getAddress.

* [NOD-717] Rename ResetFailedAttempts -> NotifyConnectionRequestComplete.
2020-02-06 18:17:10 +02:00
Dan Aharoni
3dd7dc4496 [NOD-727] Do not allow delayed blocks from RPC. (#623)
* [NOD-727] Do not allow delayed blocks from RPC.

* [NOD-727] Refactor sentFromRPC -> DisallowDelay

* [NOD-727] Clarify comment; Clarify error message.

* [NOD-727] Change error message.
2020-02-05 11:14:26 +02:00
Ori Newman
d90a08ecfa [NOD-722] Fix processBlockMsg case in blockHandler to send only one response to msg.reply, and rename blockHandler->messageHandler (#622) 2020-02-04 18:10:15 +02:00
Ori Newman
45dc1a3e7b [NOD-545] Remove headers first related logic (#621)
* [NOD-545] Remove headers first related logic

* [NOD-545] Fix tests

* [NOD-545] Change getTopHeadersMaxHeaders to be equal to getHeadersMaxHeaders
2020-02-04 14:54:42 +02:00
Ori Newman
4ffb5daa37 [NOD-622] Fix populateTemplateFromCandidates to sort txsForBlockTemplate.txMasses and txsForBlockTemplate.txFees (#617)
* [NOD-622] Fix populateTemplateFromCandidates to sort txsForBlockTemplate.txMasses and txsForBlockTemplate.txFees

* [NOD-622] Sort transactions in PrepareBlockForTest

* [NOD-622] Remove duplicate append of selected transactions
2020-02-03 13:42:40 +02:00
Ori Newman
b9138b720d [NOD-597] Make BlockIndex clear its dirty entries only after it successfully written them to disk (#620) 2020-02-03 13:39:25 +02:00
Ori Newman
d8954f1339 [NOD-615] Make bluesAnticoneSizes a map with *blockNode as a key (#619) 2020-02-03 12:40:39 +02:00
Ori Newman
eb953286ec [NOD-641] Upgrade to github.com/pkg/errors v0.9.1 and use errors.As where needed (#614)
* [NOD-641] Upgrade to github.com/pkg/errors v0.9.1 and use errors.As where needed

* [NOD-641] Fix find and replace error

* [NOD-641] Use errors.As for error type checking

* [NOD-641] Fix errors.As for pointer types

* [NOD-641] Use errors.As where needed

* [NOD-641] Rename rErr->ruleErr

* [NOD-641] Rename derr->dbErr

* [NOD-641] e->flagsErr where necessary

* [NOD-641] change jerr to more appropriate name

* [NOD-641] Rename cerr->bdRuleErr

* [NOD-641] Rename serr->scriptErr

* [NOD-641] Use errors.Is instead of testutil.AreErrorsEqual in TestNewHashFromStr

* [NOD-641] Rename bdRuleErr->dagRuleErr

* [NOD-641] Rename mErr->msgErr

* [NOD-641] Rename dErr->deserializeErr
2020-02-03 12:38:33 +02:00
Ori Newman
41c8178ad3 [NOD-648] Add TestProcessDelayedBlocks (#612)
* [NOD-648] Add TestProcessDelayedBlocks

* [NOD-648] Add one second to secondsUntilDelayedBlockIsValid to make sure the delayedBlock timestamp will be valid, and add comments

* [NOD-648] Remove redundant import

* [NOD-648] Use fakeTimeSource instead of time.Sleep

* [NOD-648] Rename dag.HaveBlock->dag.IsKnownBlock,  dag.BlockExists->dag.IsInDAG

* [NOD-648] Add comment

* [NOD-641] Rename HaveBlock->IsKnownBlock, BlockExists->IsInDAG
2020-02-03 11:30:03 +02:00
Ori Newman
aa74b51e6f [NOD-687] Remove -gcflags='-l' from all tests (#616) 2020-02-02 15:26:26 +02:00
Mike Zak
f7800eb5c4 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-02-02 15:17:25 +02:00
stasatdaglabs
193add502f [NOD-716] Fix a crash in GetTopHeaders. (#615) 2020-02-02 13:51:53 +02:00
Ori Newman
44c55900f8 [NOD-715] Replace testDbRoot with os.TempDir() (#611) 2020-01-30 12:54:15 +02:00
Ori Newman
4c0ea78026 [NOD-586] Remove subTreeSize from reachabilityTreeNode (#610)
* [NOD-586] Remove subTreeSize from reachabilityTreeNode

* [NOD-586] Convert else { if { ... } } to else if { ... }
2020-01-30 10:39:53 +02:00
stasatdaglabs
03a93fe51e [NOD-647] Create a default config file even if the sample default config file is missing (#609)
* [NOD-647] Create a default config file even if the sample default config file is missing.

* [NOD-647] Unfancify WriteString().
2020-01-29 17:40:59 +02:00
Mike Zak
eca0514465 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 14:39:22 +02:00
stasatdaglabs
aadbebb720 [NOD-691] Remove addTrying from AddrManager. (#608) 2020-01-28 13:57:02 +02:00
Mike Zak
5daab45947 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 12:25:52 +02:00
stasatdaglabs
607b838ded [NOD-702] Fix netsync slowing down significantly due to excessive allocs in serializeUTXO (#605)
* [NOD-702] Fix netsync slowing down significantly due to excessive allocs in serializeUTXO.

* [NOD-702] Fix bad make statement.

* [NOD-702] Move writeBuffer to flushToDB.
2020-01-28 12:24:09 +02:00
Mike Zak
25bdaeed31 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 11:35:21 +02:00
Dan Aharoni
8b2d3f07ce [NOD-636] Prevent db corruption on crash. (#607)
* [NOD-636] Scope err so defer anonymous defer function will get it.

* [NOD-636] Add comment to explain why this line is needed.

* [NOD-636] Edit comment.
2020-01-28 11:04:41 +02:00
Mike Zak
a3dc2f7da7 Update version to v0.1.2 2020-01-28 10:53:10 +02:00
Svarog
bf36f9ceb6 [NOD-704] Call dag.IsInSelectedParentChain in every iteration of dag.SelectedParentChain (#606) 2020-01-27 17:09:41 +02:00
stasatdaglabs
11de12304e [NOD-700] Convert blockSet to map[*blockNode]struct{} (#604)
* [NOD-700] Convert blockSet to map[*blockNode]struct{}.

* [NOD-700] Rename bluestNode to bluestBlock in bluest().

* [NOD-700] Make IsInSelectedParentChain not use the now-slower containsHash.

* [NOD-700] Rename block to node in blockset.go.

* [NOD-700] Remove containsHash and hashesEqual.

* [NOD-700] Add a comment to IsInSelectedParentChain about how it'll fail if the given blockHash is not within the block index.
2020-01-27 11:48:58 +02:00
Ori Newman
a10320ad7b [NOD-696] Handle panics on time.AfterFunc (#600)
* [NOD-696] Handle panics on time.AfterFunc

* [NOD-696] Fix comment

* [NOD-696] Rename afterFunc->spawnAfter
2020-01-27 11:12:23 +02:00
Ori Newman
fd2bbf3557 [NOD-698] Change confirmations to be selectedTip.blueScore-acceptingBlock.blueScore+1 (#602) 2020-01-27 11:10:27 +02:00
stasatdaglabs
7f9cf17274 [NOD-697] In blockLocator, return an error if lowHash blueScore >= highHash blueScore. (#601) 2020-01-26 15:44:58 +02:00
Ori Newman
ba0e239557 [NOD-692] Fix thresholdState to use blueBlockWindow instead of selected chain height (#599) 2020-01-23 17:17:56 +02:00
Dan Aharoni
ed606bfda3 [NOD-575] Change devent address prefix to kaspadev. (#598) 2020-01-23 15:36:40 +02:00
Dan Aharoni
c0463a8a68 [NOD-675] Replace start-hash/stop-hash with meaningful names (#597)
* [NOD-675] Rename startHash/stopHash to lowHigh/stopHash

* [NOD-675] Fix typo

* [NOD-675] Undo go.mod go.sum conflicts

* [NOD-675] revert back to startHash for getChainFromBlock.

* [NOD-675] Revet back to startHash in getChainFromBlock leftovers.

* [NOD-675] Fix test name.
2020-01-22 18:14:42 +02:00
Dan Aharoni
52e0a0967d [NOD-629] change GHOSTDAG k to uint8 (#594)
* [NOD-629] Change GHOSTDAG K to a a single byte using type

* [NOD-629] Rename variable

* [NOD-629] Rename K to KSize

* [NOD-629] Remove redundant casting

* [NOD-629] Add test for KSize

* [NOD-629] Seperate block serialization and db store

* [NOD-629] Make sure K is serialized as uint8

* [NOD-629] Rename KSize to KType

* [NOD-629] Comment for test

* [NOD-629] Change fail message

* [NOD-629] Remove newlines

* [NOD-629] Fix test

* [NOD-629] Do not use maxuint8, but !0 instead

* [NOD-629] Fix test

* [NOD-629] Merge conflict

* [NOD-629] Fix test; Update comment
2020-01-22 16:58:53 +02:00
Ori Newman
29bcc271b5 [NOD-652] Add selected tip and get selected tip messages (#595)
* [NOD-652] Add selectedTip and getSelectedTip messages

* [NOD-652] Remove peerSyncState.isSelectedTipKnown

* [NOD-652] Do nothing on OnSelectedTip if the peer selected tip hasn't changed

* [NOD-652] Handle selected tip message with block handler

* [NOD-652] Add comments

* [NOD-652] go mod tidy

* [NOD-652] Fix TestVersion

* [NOD-652] Use dag.AdjustedTime instead of dag.timeSource.AdjustedTime

* [NOD-652] Create shouldQueryPeerSelectedTips and queueMsgGetSelectedTip functions

* [NOD-652] Change selectedTip to selectedTipHash where needed

* [NOD-652] add minDAGTimeDelay constant

* [NOD-652] add comments

* [NOD-652] Fix names and comments

* [NOD-652] Put msg.reply push in the right place

* [NOD-652] Fix comments and names
2020-01-22 16:34:21 +02:00
stasatdaglabs
94ec159147 [NOD-676] Remove blockLocator defaults. (#596) 2020-01-22 14:05:01 +02:00
stasatdaglabs
9d434de4a5 [NOD-640] Revamp blueBlocksBetween to return up to maxEntries from lowNode's antiPast to highNode's antiFuture (#593)
* [NOD-640] Revamp blueBlocksBetween to return up to maxEntries from highNode's antiFuture.

* [NOD-640] Fix bad traversal.

* [NOD-640] Use more accurate len.

* [NOD-640] Use more appropriate len in another place.

* [NOD-640] Remove the whole business with highNode's anticone.

* [NOD-640] Rename highNodeAntiFuture to candidateNodes.

* [NOD-640] Explain the highNode.blueScore-lowNode.blueScore+1 approximation.

* [NOD-640] UpHeap -> upHeap.

* [NOD-640] Fix off-by-one error.

* [NOD-640] Rename blueBlocksBetween to antiPastBetween,

* [NOD-640] upHeap -> up-heap.

* [NOD-640] Use a classic for to populate nodes.

* [NOD-640] Reworded a comment.

* [NOD-640] Clarify a comment.

* [NOD-640] Fix nodes declaration.
2020-01-22 12:13:55 +02:00
stasatdaglabs
49418f4222 [NOD-669] Rename start/endHash -> low/highHash (#591)
* [NOD-669] Remove the "get" from getBlueBlocksBetween.

* [NOD-669] Remove the "Get" from GetBlueBlocksHeadersBetween.

* [NOD-669] In blueBlocksBetween, rename startHash to lowHash and stopHash to highHash.

* [NOD-669] Rename startHash to lowHash and stopHash to highHash in blockLocator logic.

* [NOD-669] Remove zeroHash logic in blockLocator.

* [NOD-669] Finish renaming startHash and stopHash in blockdag.

* [NOD-669] Rename startHash and stopHash in blockdag where I previously missed it.

* [NOD-669] Rename startHash and stopHash in blockdag where I previously missed it some more.

* [NOD-669] Rename startHash and stopHash in blockdag where I previously missed it some more some more.

* [NOD-669] Fix bad grammar in method names.

* [NOD-669] Rename lowHash to blockHash in SelectedParentChain.

* [NOD-669] Fix a comment.
2020-01-20 12:47:16 +02:00
stasatdaglabs
38b4749f20 [NOD-669] Fix startSync sending a blockLocatorMsg with a zeroHash insead of the peer's selectedTip (#592)
* [NOD-669] Fix startSync sending a blockLocatorMsg with a zeroHash instead of the peer's selectedTip.

* [NOD-669] Rename bestPeer to syncPeer.

* [NOD-669] Fix comments.
2020-01-20 12:29:17 +02:00
stasatdaglabs
045984e6b9 [NOD-665] Initialize blockNode blueScore to be MaxUint64 by default (#590)
* [NOD-665] Initialize blockNode blueScore to be MaxUint64 by default.

* [NOD-665] Remove redundant err declaration.
2020-01-19 15:21:43 +02:00
Ori Newman
38883d1a98 [NOD-650] Remove CPU miner from the node and add kaspaminer in ./cmd (#587)
* [NOD-650] Add kaspaminer

* [NOD-650] Remove CPU miner

* [NOD-650] Fix comments and error messages

* [NOD-650] Remove redundant check for closing foundBlock

* [NOD-650] Submit block synchronically

* [NOD-650] Use ParseUint instead of ParseInt

* [NOD-650] Rearrange functions order in mineloop.go

* [NOD-650] Add block delay CLI argument to kaspaminer

* [NOD-650] Remove redundant spawn

* [NOD-650] Add Dockerfile for kaspaminer

* [NOD-650] Remove redundant comments

* [NOD-650] Remove tests from kaspaminer Dockerfile

* [NOD-650] Remove redundant argument on OnFilteredBlockAdded
2020-01-19 15:18:26 +02:00
stasatdaglabs
b5f365d282 [NOD-668] Rename height to blueScore in OnFilteredBlockAdded. (#589) 2020-01-16 14:10:26 +02:00
stasatdaglabs
a7d3a40465 [NOD-646] Fix initDAGState treating invalid blocks as genesis blocks. (#588) 2020-01-16 13:21:20 +02:00
stasatdaglabs
359b16fca9 [NOD-616] Remove blockNode.chainHeight (#586)
* [NOD-616] Remove unused methods from BlockDAG.

* [NOD-616] Remove Height from GetRawMempoolVerboseResult and TxDesc.

* [NOD-616] Replaced BlockDAG.ChainHeight with SelectedTipBlueScore.

* [NOD-616] Remove the unused BlockChainHeightByHash.

* [NOD-616] Remove the unused blockChainHeight from checkBlockHeaderContext.

* [NOD-616] Remove chainHeight from util.Block.

* [NOD-616] Remove TestChainHeight.

* [NOD-616] Update unknown rule activation warning to use blueScore.

* [NOD-616] Update thresholdState to use blueScore instead of chainHeight.

* [NOD-616] Update blockLocator to use blueScore instead of chainHeight.

* [NOD-616] Remove blockNode.chainHeight.

* [NOD-616] Fix comments and variable names.

* [NOD-616] Replace a weird for loop with a while loop.

* [NOD-616] Fix a comment.

* [NOD-616] Remove pre-allocation in blockLocator.

* [NOD-616] Coalesce checks that startHash and stopHash are not the same into the same condition.

* [NOD-616] Fix a comment.

* [NOD-616] Remove weird blueScore logic around childHashStrings.

* [NOD-616] Fix hash pointer comparison.

* [NOD-616] Fix a comment.

* [NOD-616] Add ban score to peers misusing GetBlockLocator.

* [NOD-616] Replace adding ban score with disconnecting.

* [NOD-616] Add blueScore to FilteredBlockAddedNtfn.
2020-01-16 13:09:16 +02:00
Mike Zak
8b8e73feb5 Merge branch 'v0.1.1-dev' of github.com:kaspanet/kaspad into v0.1.1-dev 2020-01-13 16:59:40 +02:00
Svarog
6044b6ac1a [NOD-643] Remove monkey patch (#585)
* [NOD-643] Removed any mentions of monkey.Patch

* [NOD-643] Removed monkey from go.mod
2020-01-13 16:59:16 +02:00
stasatdaglabs
a177ea4f15 [NOD-555] Remove build scripts. (#581) 2020-01-13 13:37:21 +02:00
Mike Zak
3a15aa4bae Update version to v0.1.1 2020-01-13 12:24:36 +02:00
Ori Newman
427185b6a8 [NOD-635] Change testnet maximum difficulty (#582) 2020-01-09 18:10:23 +02:00
Svarog
b282734a3f [NOD-626] Delete CHANGES file (#580) 2020-01-08 18:43:50 +02:00
Ori Newman
6d765f58ba [NOD-570] Separate genesis variables for different netwroks (#578)
* [NOD-570] Separate genesis variables for different netwroks

* [NOD-570] Make Testnet genesis

* [NOD-570] Make simnet and regtest genesis

* [NOD-570] Remake devnet genesis

* [NOD-570] Rename regNet -> regTest testnet->testNet

* [NOD-570] Change network names to one word instead of camel case

* [NOD-570] Change network names to one word instead of camel case

* [NOD-570] Fix test names

* [NOD-570] Fix TestGHOSTDAG

Co-authored-by: Dan Aharoni <dereeno@protonmail.com>
2020-01-08 18:42:47 +02:00
Svarog
20819ca4cd [NOD-505] Try reading the response in case of upnp error (#576) 2020-01-08 17:51:31 +02:00
Ori Newman
2174a0a7f2 [NOD-497] Implement GHOSTDAG (#575)
* [NOD-540] Implement reachability (#545)

* [NOD-540] Begin implementing reachability.

* [NOD-540] Finish implementing reachability.

* [NOD-540] Implement TestIsFutureBlock.

* [NOD-540] Implement TestInsertFutureBlock.

* [NOD-540] Add comments.

* [NOD-540] Add comment for interval in blockNode.

* [NOD-540] Updated comments over insertFutureBlock and isFutureBlock.

* [NOD-540] Implement interval splitting methods.

* [NOD-540] Begin implementing tree manipulation in blockNode.

* [NOD-540] Implement countSubtreesUp.

* [NOD-540] Add a comment explaining an impossible condition.

* [NOD-540] Implement applyIntervalDown.

* [NOD-540] Moved the reachability tree stuff into reachability.go.

* [NOD-540] Add some comments.

* [NOD-540] Add more comments, implement isInPast.

* [NOD-540] Fix comments.

* [NOD-540] Implement TestSplitFraction.

* [NOD-540] Implement TestSplitExact.

* [NOD-540] Implement TestSplit.

* [NOD-540] Add comments to structs.

* [NOD-540] Implement TestAddTreeChild.

* [NOD-540] Fix a comment.

* [NOD-540] Rename isInPast to isAncestorOf.

* [NOD-540] Rename futureBlocks to futureCoveringSet.

* [NOD-540] Rename isFutureBlock to isInFuture.

* [NOD-540] move reachabilityInterval to the top of reachability.go.

* [NOD-540] Change "s.t." to "such that" in a comment.

* [NOD-540] Fix indentation.

* [NOD-540] Fix a potential bug involving float inaccuracy.

* [NOD-540] Wrote a more descriptive error message.

* [NOD-540] Fix error messsage.

* [NOD-540] Fix the recursive countSubtreesUp.

* [NOD-540] Rename countSubtreesUp to countSubtrees and applyIntervalDown to propagateInterval.

* [NOD-540] Implement updating reachability for a valid new block.

* [NOD-540] Implement a disk storage for reachability data.

* [NOD-540] Fix not all tree nodes being written to the database.

* [NOD-540] Implement serialization for reachabilityData.

* [NOD-540] Implement some deserialization for reachabilityData.

* [NOD-540] Implement restoring the reachabilityStore on node restart.

* [NOD-540] Made interval and remainingInterval pointers.

* [NOD-540] Rename setTreeInterval to setInterval.

* [NOD-540] Rename reindexTreeIntervals to reindexIntervals and fixed the comment above it.

* [NOD-540] Expand the comment above reindexIntervals.

* [NOD-540] Fix comment above countSubtrees.

* [NOD-540] Fix comment above countSubtrees some more.

* [NOD-540] Fix comment above split.

* [NOD-540] Fix comment above isAncestorOf.

* [NOD-540] Fix comment above reachabilityTreeNode.

* [NOD-540] Fix weird condition in addTreeChild.

* [NOD-540] Rename addTreeChild to addChild.

* [NOD-540] Fix weird condition in splitFraction.

* [NOD-540] Reverse the lines in reachabilityTreeNode.String().

* [NOD-540] Renamed f to fraction and x to size.

* [NOD-540] Fix comment above bisect.

* [NOD-540] Implement rtn.isAncestorOf().

* [NOD-540] Use treeNode isAncestorOf instead of treeInterval isAncestorOf.

* [NOD-540] Use newReachabilityInterval instead of struct initialization.

* [NOD-540] Make reachabilityTreeNode.String() use strings.Join.

* [NOD-540] Use sync.RWMutex instead of locks.PriorityMutex.

* [NOD-540] Rename thisTreeNode to newTreeNode.

* [NOD-540] Rename setTreeNode to addTreeNode.

* [NOD-540] Extracted selectedParentAnticone to a separate function.

* [NOD-540] Rename node to this.

* [NOD-540] Move updateReachability and isAncestorOf from dag.go to reachability.go.

* [NOD-540] Add whitespace after multiline function signatures in reachability.go.

* [NOD-540] Make splitFraction return an error on empty interval.

* [NOD-540] Add a comment about rounding to splitFraction.

* [NOD-540] Replace sneaky tabs with spaces.

* [NOD-540] Rename split to splitExponential.

* [NOD-540] Extract exponentialFractions to a separate function.

* [NOD-540] Rename bisect to findIndex.

* [NOD-540] Add call to reachabilityStore.clearDirtyEntries at the end of saveChangesFromBlock.

* [NOD-540] Explain the dirty hack in reachabilityStore.init().

* [NOD-540] Split the function signature for deserializeReachabilityData to two lines.

* [NOD-540] Add a comment about float precision loss to exponentialFractions.

* [NOD-540] Corrected a comment about float precision loss to exponentialFractions.

* [NOD-540] Fixed a comment about float precision loss to exponentialFractions some more.

* [NOD-540] Added further comments above futureCoveringBlockSet.

* [NOD-540] Rename addTreeNode to setTreeNode.

* [NOD-540] Rename splitExponential to splitWithExponentialBias.

* [NOD-540] Fix object references in reachabilityData deserialization (#563)

* [NOD-540] Fix broken references in deserialization.

* [NOD-540] Fix broken references in futureCoveringSet deserialization. Also add comments.

* [NOD-540] Don't deserialize on the first pass in reachabilityStore.init().

* [NOD-540] Remove redundant assignment to loaded[hash].

* [NOD-540] Use NewHash instead of SetBytes. Rename data to destination.

* [NOD-540] Preallocate futureCoveringSet.

* [NOD-541] Implement GHOSTDAG (#560)

* [NOD-541] Implement GHOSTDAG

* [NOD-541] Replace the old PHANTOM variant with GHOSTDAG

* [NOD-541] Move dag.updateReachability to the top of dag.applyDAGChanges to update reachability before the virtual block is updated

* [NOD-541] Fix blueAnticoneSize

* [NOD-541] Initialize node.bluesAnticoneSizes

* [NOD-541] Fix pastUTXO and applyBlueBlocks blues order

* [NOD-541] Add serialization logic to node.bluesAnticoneSizes

* [NOD-541] Fix GHOSTDAG to not count the new block and the blue candidates anticone, add selected parent to blues, and save to node.bluesAnticoneSizes properly

* [NOD-541] Fix test names in inner strings

* [NOD-541] Writing TestGHOSTDAG

* [NOD-541] In blueAnticoneSize change node->current

* [NOD-541] name ghostdag return values

* [NOD-541] fix ghostdag to return slice

* [NOD-541] Split k-cluster violation rules

* [NOD-541] Add missing space

* [NOD-541] Add comment to ghostdag

* [NOD-541] In selectedParentAnticone rename past->selectedParentPast

* [NOD-541] Fix misrefernces to TestChainUpdates

* [NOD-541] Fix ghostdag comment

* [NOD-541] Make PrepareBlockForTest in blockdag package

* [NOD-541] Make PrepareBlockForTest in blockdag package

* [NOD-541] Assign to selectedParentAnticone[i] instead of appending

* [NOD-541] Remove redundant forceTransactions arguments from PrepareBlockForTEST

* [NOD-541] Add non-selected parents to anticoneHeap

* [NOD-541] add test for ghostdag

* [NOD-541] Add comments

* [NOD-541] Use adjusted time for initializing blockNode

* [NOD-541] Rename isAncestorOf -> isAncestorOfBlueCandidate

* [NOD-541] Remove params from PrepareBlockForTest

* [NOD-541] Fix TestChainHeight

* [NOD-541] Remove recursive lock

* [NOD-541] Fix TestTxIndexConnectBlock

* [NOD-541] Fix TestBlueBlockWindow

* [NOD-541] Put prepareAndProcessBlock in common_test.go

* [NOD-541] Fix TestConfirmations

* [NOD-541] Fix TestAcceptingBlock

* [NOD-541] Fix TestDifficulty

* [NOD-541] Fix TestVirtualBlock

* [NOD-541] Fix TestSelectedPath

* [NOD-541] Fix TestChainUpdates

* [NOD-541] Shorten TestDifficulty test time

* [NOD-541] Make PrepareBlockForTest use minimal valid block time

* [NOD-541] Remove TODO comment

* [NOD-541] Move blockdag related mining functions to mining.go

* [NOD-541] Use NextBlockCoinbaseTransaction instead of NextBlockCoinbaseTransactionNoLock in NextCoinbaseFromAddress

* [NOD-541] Remove useMinimalTime from BlockForMining

* [NOD-541] Make MedianAdjustedTime a *BlockDAG method

* [NOD-541] Fix ghostdag to use anticone slice instead of heap

* [NOD-541] Fix NewBlockTemplate locks

* [NOD-541] Fix ghostdag comments

* [NOD-541] Convert MedianAdjustedTime to NextBlockTime

* [NOD-541] Fix ghostdag comment

* [NOD-541] Fix TestGHOSTDAG comment

* [NOD-541] Add comment before sanity check

* [NOD-541] Explicitly initialize .blues in ghostdag

* [NOD-541] Rename *blockNode.lessThan to *blockNode.less

* [NOD-541] Remove redundant check if block != chainBlock

* [NOD-541] Fix comment

* [NOD-541] Fix comment

* [NOD-497] Add comment; General refactoring

* [NOD-497] General refactoring.

* [NOD-497] Use isAncestor of the tree rather than the node

* [NOD-497] Remove reachability mutex lock as it is redundant (dag lock is held so no need); General refactoring.

* [NOD-497] Update comment

* [NOD-497] Undo test blocktimestamp

* [NOD-497] Update comments; Use BlockNode.less for blockset;

* [NOD-497] Change processBlock to return boolean and not the delay duration (merge conflict)

* [NOD-497] Undo change for bluest to use less; Change blocknode less to use daghash.Less

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
Co-authored-by: Dan Aharoni <dereeno@protonmail.com>
2020-01-08 17:41:22 +02:00
Dan Aharoni
ea6f7a28c2 [NOD-420] Process delayed blocks (#529)
* [NOD-420] Delay blocks with valid timestamp (non-delayed) that point to a delayed block.

* [NOD-420] Mark block as requested when setting as delayed.

* [NOD-420] Merge master; Use dag.timeSource.AdjustedTime() instead of time.Now;

* [NOD-420] Return nil when not expecting an error

* [NOD-420] Initialise delyaed blocks mapping

* [NOD-420] Trigger delayed blocks processing every time we process a block.

* [NOD-420] Hold the read lock in processDelayedBlocks

* [NOD-420] Add delayed blocks heap sorted by their process time so we could process them in order.

* [NOD-420] Update debug log

* [NOD-420] Fix process blocks loop

* [NOD-420] Add comment

* [NOD-420] Log error message

* [NOD-420] Implement peek method for delayed block heap. extract delayed block processing to another  function.

* [NOD-420] Trigger process delayed blocks only in process block

* [NOD-420] Move delayed block addition to process block

* [NOD-420] Use process block to make sure we fully process the delayed block and deal with orphans.

* [NOD-420] Unexport functions when not needed; Return isDelayed boolean from ProcessBlock instead of the delay duration

* [NOd-420] Remove redundant delayedBlocksLock

* [NOD-420] Resolve merge conflict; Return delay 0 instead of boolean

* [NOD-420] Do not treat delayed block as orphan

* [NOD-420] Make sure block is not processed if we have already sa delayed.

* [NOD-420] Process delayed block if parent is delayed to make sure it would not be treated as orphan.

* [NOD-420] Rename variable

* [NOD-420] Rename function. Move maxDelayOfParents to process.go

* [NOD-420] Fix typo

* [NOD-420] Handle errors from processDelayedBlocks properly

* [NOD-420] Return default values if err != nil from dag.addDelayedBlock

* [NOD-420] Return default values if err != nil from dag.addDelayedBlock in another place

Co-authored-by: Svarog <feanorr@gmail.com>
2020-01-08 15:28:52 +02:00
Svarog
ac9aa74a75 [NOD-549] Use version.Version anywhere relevant + update related tests (#577)
* [NOD-549] Update version to 0.1.0 and allow injection of appBuild

* [NOD-549] Fixed peer tests

* [NOD-549] Fixed wire tests

* [NOD-549] Remove any mention of semVer.

* [NOD-549] Don't include appBuild at all if it includes invalid characters

* [NOD-549] Panic if appBuild contains invalid characters

* [NOD-549] Move checkAppBuild into

* [NOD-549] Update comment
2020-01-08 12:14:59 +02:00
Ori Newman
d46857677f [NOD-503] Remove Tor functionality (#573) 2020-01-08 10:45:27 +02:00
stasatdaglabs
cd719b1d5b [NOD-546] Report build failures to Discord instead of Telegram (#572)
* [NOD-546] Report build failures to Discord instead of Telegram.

* [NOD-546] Make a temporary compilation error.

* [NOD-546] Make a couple of temporary print outs.

* [NOD-546] Remove temporary debug stuff.

* [NOD-546] Make notify_discord() return early if Discord variables are not set.
2020-01-06 16:36:21 +02:00
Svarog
7cf15ac93b [NOD-549] Update version to 0.1.0 and allow injection of appBuild (#568)
* [NOD-549] Update version to 0.1.0 and allow injection of appBuild

* [NOD-549] Fixed peer tests

* [NOD-549] Fixed wire tests

* [NOD-549] Remove any mention of semVer.

* [NOD-549] Don't include appBuild at all if it includes invalid characters

* [NOD-549] Panic if appBuild contains invalid characters

* [NOD-549] Move checkAppBuild into
2020-01-06 15:30:00 +02:00
Svarog
d8e3191469 [NOD-619] Disable tor related cli flags (#571)
* [NOD-619] Disable tor related cli flags

* [NOD-619] Disabled --proxy and related flags

* [NOD-619] Added missing space
2020-01-06 14:50:32 +02:00
Svarog
784d3de4ca [NOD-618] Removed outdated docs folder (#570) 2020-01-06 10:48:41 +02:00
Ori Newman
733d06af5a [NOD-617] Remove test coverage from test.sh (#569)
* [NOD-617] Remove test coverage from test.sh

* [NOD-617] Log coverage
2020-01-06 10:31:09 +02:00
stasatdaglabs
df91643976 [NOD-608] Make the user agent read the version from the version package. (#566) 2020-01-05 16:47:58 +02:00
stasatdaglabs
ebf635e6ff [NOD-559] Remove cmd/genaddr. (#567) 2020-01-05 11:41:14 +02:00
stasatdaglabs
e41d9866c3 [NOD-613] Fix concurrent access to ecmh cache (#565)
* [NOD-613] Fix concurrent access to ecmh cache.

* [NOD-613] Localized dag.Lock().
2020-01-02 18:03:25 +02:00
stasatdaglabs
d984151549 [NOD-517] Update doc.go files (#559)
* [NOD-517] Remove copyright notices from all doc.go.

* [NOD-517] Updated the root doc.go.

* [NOD-517] Remove all cov_report.sh and test_coverage.txt.

* [NOD-517] Make all doc.go use the same style of comment.

* [NOD-517] Update dagconfig doc.go.

* [NOD-517] Update blockdag doc.go.

* [NOD-517] Update doc.go in connmgr.

* [NOD-517] Update doc.go in fullblocktests.

* [NOD-517] Update doc.go in database.

* [NOD-517] Update doc.go in ecc.

* [NOD-517] Update doc.go in rpctest.

* [NOD-517] Removed superfluous license in logs.

* [NOD-517] Update doc.go in mempool.

* [NOD-517] Updated doc.go in peer.

* [NOD-517] Update doc.go in rpcclient.

* [NOD-517] Update doc.go in txscript.

* [NOD-517] Update doc.go in util.

* [NOD-517] Update doc.go in base58.

* [NOD-517] Update doc.go in bech32.

* [NOD-517] Update doc.go in txsort.

* [NOD-517] Update doc.go in wire.

* [NOD-517] Fix indentation.

* [NOD-517] Add a copyright notice to the main doc.go.

* [NOD-517] Add Conformal to the license notices.

* [NOD-517] Remove superfluous language from a doc.

* [NOD-517] Fix bad example.
2020-01-02 16:57:43 +02:00
stasatdaglabs
6099ce56bd [NOD-5] Remove TestFullBlocks and package fullblocktests (#564) 2019-12-31 16:16:10 +02:00
Svarog
e0b5c145f7 [NOD-543] Added dnsseeds to testnet and mainnet (#562) 2019-12-31 10:57:43 +02:00
Ori Newman
cf37f733ef [NOD-601] Omit nil selected parent in GetBlockVerboseResult (#561) 2019-12-30 18:44:17 +02:00
Ori Newman
66a92a243c [NOD-591] Add selected parent to GetBlockVerboseResult (#558)
* [NOD-591] Add selected parent to GetBlockVerboseResult

* [NOD-591] Add selected parent to GetBlockHeaderResult
2019-12-29 12:46:35 +02:00
Ori Newman
4a88eea57e [NOD-590] Export newLogClosure (#557) 2019-12-26 18:26:22 +02:00
aspect
fbaf360a42 Fix missing pkg/errors reference in windows-only file (#547)
* Fixing missing pkg/errors reference

* go fmt pass
2019-12-25 15:50:43 +02:00
Svarog
1346810af8 [NOD-583] Move InterruptListener() to beggining of main (#555) 2019-12-25 13:14:28 +02:00
Svarog
9cbab94264 [NOD-579] Remove erroneous dependancy on github.com/prometheus/common/log (#554) 2019-12-25 11:54:36 +02:00
Svarog
48f29cc11f [NOD-401] Allow to pass PrefixUnknown to ParseAddress + add .Prefix() to addresses (#553)
* [NOD-401] Created CLI-Wallet base structure and new command

* [NOD-401] Switched to go-flags sub-command parsing

* [NOD-401] Added config for all sub-commands

* [NOD-401] Work in progress for send command in cli-wallet

* [NOD-401] Allow to pass PrefixUnknown to ParseAddress + add .Prefix() to addresses

* [NOD-401] Finished implementing all wallet commands

* [NOD-401] some refactorings to sendTx

* [NOD-401] Moved wallet to kasparov repo + updated tests with new prefixes
2019-12-24 15:38:47 +02:00
Ori Newman
e2b57e6231 [NOD-566] Do not accept coinbase transaction from the selected parent anticone (#552) 2019-12-19 18:05:55 +02:00
Ori Newman
f72afc8bbb [NOD-499] Change ports and network magics (#550)
* [NOD-499] Change network magics

* [NOD-499] Change default rpc ports

* [NOD-499] Change default p2p ports

* [NOD-499] Change port 18333 to 10433 everywhere

* [NOD-499] Change port 8333 to 10333 everywhere

* [NOD-499] Fix TestElementWire

* [NOD-499] Fix tests

* [NOD-499] Change port 10333->16111 and 10332->16110

* [NOD-499] Change port 10433->16211 and 10432->16210

* [NOD-499] Change port 10633->16511 and 10632->16510

* [NOD-499] Change port 10533->16611 and 10532->16610
2019-12-18 16:18:37 +02:00
stasatdaglabs
0d1f447cb7 [NOD-510] Fix comments so that they don't mention bitcoin. (#551) 2019-12-18 13:46:32 +02:00
Svarog
818f8c93eb [NOD-551] Move httpserverutils to kasparov (#549)
* [NOD-511] Move httpserverutils to kasparov repository
2019-12-18 12:51:05 +02:00
Svarog
264ffaae93 [NOD-495] Move out non-kaspad apps (#548)
* [NOD-495] Remove txgen to separate repository

* [NOD-495] Remove DNSSeeder to separate repository

* [NOD-495] Remove kasparov to separate repository

* [NOD-495] Remove miningsimulator to separate repository

* [NOD-495] httpserverutils should use kaspad logger package

* [NOD-495] Remove faucet to separate repository

* [NOD-495] httpserverutils should use kasparov logger
2019-12-18 12:14:07 +02:00
stasatdaglabs
03b7af9a13 [NOD-532] Replace "chain" with "DAG" where appropriate (#537)
* [NOD-532] Change chain to DAG in the root package.

* [NOD-532] Change chain to DAG in checkpoints.go.

* [NOD-532] Change chain to DAG in blockdag.

* [NOD-532] Change chain to DAG in cmd.

* [NOD-532] Change chain to DAG in dagconfig.

* [NOD-532] Change chain to DAG in database.

* [NOD-532] Change chain to DAG in mempool.

* [NOD-532] Change chain to DAG in mempool.

* [NOD-532] Change chain to DAG in netsync.

* [NOD-532] Change chain to DAG in rpcclient.

* [NOD-532] Change chain to DAG in server.

* [NOD-532] Change chain to DAG in txscript.

* [NOD-532] Change chain to DAG in util.

* [NOD-532] Change chain to DAG in wire.

* [NOD-532] Remove block heights in dagio.go examples.

* [NOD-532] Rename fakeChain to fakeDAG.

* [NOD-532] Fix comments, remove unused EnableBCInfoHacks flag.

* [NOD-532] Fix comments and variable names.

* [NOD-532] Fix comments.

* [NOD-532] Fix merge errors.

* [NOD-532] Formatted project.
2019-12-17 13:40:03 +02:00
Ori Newman
e3d7e83d44 [NOD-548] Remove default dns seed from devnet (#546)
* [NOD-390] Add faucet Dockerfile

* [NOD-390] Allow running migration without -api-server-url and --private-key arguments

* [NOD-390] Change kasparov-server to kasparovd in its Dockerfile

* [NOD-548] Remove default DNS seed from devnet
2019-12-17 12:26:27 +02:00
Ori Newman
07651e51c8 [NOD-390] Add faucet dockerfile (#544)
* [NOD-390] Add faucet Dockerfile

* [NOD-390] Allow running migration without -api-server-url and --private-key arguments

* [NOD-390] Change kasparov-server to kasparovd in its Dockerfile

* [NOD-390] Change API server and Kasparov server to kasparovd
2019-12-17 11:10:59 +02:00
Svarog
1cd2eb9308 [NOD-494] Update readmes (#543)
* [NOD-494] Updated main README.md

* [NOD-494] Updated blockdag/README.md

* [NOD-494] Aligned text length in main README.md

* [NOD-494] Updated most remaining packages READMEs + deleted util/coinset

* [NOD-494] Update integration README

* [NOD-494] Did a final pass over all readmes

* [NOD-494] Updated README for DNSSeeder with more info on how to create a functioning setup

* [NOD-494] Remove all double spaces from readmes

* [NOD-494] Minor fixes in READMEs + update license to kaspanet developers

* [NOD-494] Add backtick around ecc and util in hdkeychain README
2019-12-16 17:37:17 +02:00
stasatdaglabs
a140327dd2 [NOD-500] Remove checkpoints (#541)
* [NOD-502] Remove checkpoints.

* [NOD-502] Remove remaining references to checkpoints.

* [NOD-500] Split RejectFinality to RejectDifficulty.

* [NOD-500] Remove support for headers-first in p2p.

* [NOD-500] Panic in newHashFromStr in case of an error.
2019-12-16 17:22:10 +02:00
stasatdaglabs
c1f7ae72e0 [NOD-514] Change dagcoin to kaspa, dagtest to kaspatest, dagreg to kaspareg, and dagsim to kaspasim (#538)
* [NOD-514] Change dagcoin to kaspa, dagtest to kaspatest, etc.

* [NOD-514] Remove no-longer-relevant link in doc.
2019-12-15 18:24:15 +02:00
stasatdaglabs
3a12fe9b1d [NOD-516] Remove cmd/genesis. (#542) 2019-12-15 14:50:04 +02:00
stasatdaglabs
c25c9b25bd [NOD-502] Remove RPCQuirks. (#540) 2019-12-15 14:49:22 +02:00
stasatdaglabs
f46dec449d [NOD-510] Change all references to Bitcoin to Kaspa (#531)
* [NOD-510] Change coinbase flags to kaspad.

* [NOD-510] Removed superfluous spaces after periods in comments.

* [NOD-510] Rename btcd -> kaspad in the root folder.

* [NOD-510] Rename BtcEncode -> KaspaEncode and BtcDecode -> KaspaDecode.

* [NOD-510] Rename BtcEncode -> KaspaEncode and BtcDecode -> KaspaDecode.

* [NOD-510] Continue renaming btcd -> kaspad.

* [NOD-510] Rename btcjson -> kaspajson.

* [NOD-510] Rename file names inside kaspajson.

* [NOD-510] Rename kaspajson -> jsonrpc.

* [NOD-510] Finish renaming in addrmgr.

* [NOD-510] Rename package btcec to ecc.

* [NOD-510] Finish renaming stuff in blockdag.

* [NOD-510] Rename stuff in cmd.

* [NOD-510] Rename stuff in config.

* [NOD-510] Rename stuff in connmgr.

* [NOD-510] Rename stuff in dagconfig.

* [NOD-510] Rename stuff in database.

* [NOD-510] Rename stuff in docker.

* [NOD-510] Rename stuff in integration.

* [NOD-510] Rename jsonrpc to rpcmodel.

* [NOD-510] Rename stuff in limits.

* [NOD-510] Rename stuff in logger.

* [NOD-510] Rename stuff in mempool.

* [NOD-510] Rename stuff in mining.

* [NOD-510] Rename stuff in netsync.

* [NOD-510] Rename stuff in peer.

* [NOD-510] Rename stuff in release.

* [NOD-510] Rename stuff in rpcclient.

* [NOD-510] Rename stuff in server.

* [NOD-510] Rename stuff in signal.

* [NOD-510] Rename stuff in txscript.

* [NOD-510] Rename stuff in util.

* [NOD-510] Rename stuff in wire.

* [NOD-510] Fix failing tests.

* [NOD-510] Fix merge errors.

* [NOD-510] Fix go vet errors.

* [NOD-510] Remove merged file that's no longer relevant.

* [NOD-510] Add a comment above Op0.

* [NOD-510] Fix some comments referencing Bitcoin Core.

* [NOD-510] Fix some more comments referencing Bitcoin Core.

* [NOD-510] Fix bitcoin -> kaspa.

* [NOD-510] Fix more bitcoin -> kaspa.

* [NOD-510] Fix comments, remove DisconnectBlock in addrindex.

* [NOD-510] Rename KSPD to KASD.

* [NOD-510] Fix comments and user agent.
2019-12-12 15:21:41 +02:00
Ori Newman
60ab6330ff [NOD-521] Add blocks to Kasparov DB immediately after getblocks request (#532) 2019-12-12 10:14:37 +02:00
Dan Aharoni
89dee3e005 [NOD-533] Kaspad renaming in docker (#536)
* [NOD-533] Rename kasparov folder leftovers

* [NOD-533] Rename btcd to kaspad

* [NOD-533] Fix folder name

* [NOD-533] Add file name
2019-12-12 10:09:57 +02:00
Dan Aharoni
70d7009985 [NOD-533] Rename kasparov folder leftovers (#535) 2019-12-11 16:43:39 +02:00
Dan Aharoni
3322a892e9 [NOD-531] Add migrations to Kasparov docker files (#534)
* [NOD-531] Add migrations to kasparov docker files

* [NOD-531] Remove newline
2019-12-11 13:04:46 +02:00
stasatdaglabs
61d066e958 [NOD-525] Rename kasparovsync to kasparovsyncd. (#533) 2019-12-11 12:35:04 +02:00
Svarog
7b9ffc6c25 [NOD-525] renamed folders server and syncd to kasparovd and kasparovsync respectively (#530)
* [NOD-525] renamed folders server and syncd to kasparovserver and kasparovsync respectively

* [NOD-525] Fixed references to kasparov sub-apps

* [NOD-525] Renamed kasparovserver -> kasparovd
2019-12-11 10:28:21 +02:00
Ori Newman
7a163d4dd7 [NOD-471] Make AddTx return false for duplicate coinbase, and make pastUTXO return accepted transaction with the accepting block blue score (#523)
* [NOD-471] Make AddTx return false for duplicate coinbase, and make pastUTXO return accepted transaction with the accepting block blue score

* [NOD-471] Remove diffFromAcceptanceData

* [NOD-471] Make fetchBlueBlocks return also selected parent

* [NOD-471] Skip adding coinbase transactions on applyBlueBlocks

* [NOD-471] Use tx.IsCoinbase() instead of i == util.CoinbaseTransactionIndex
2019-12-10 14:02:10 +02:00
Svarog
189a3380a2 [NOD-340] Remove unimplemented RPC commands: getNetworkHashPS, estimatePriority, getChainTips, invalidateBlock, preciousBlock, reconsiderBlock (#528)
* [NOD-340] Remove GetNetworkHashPS cmd

* [NOD-340] Removed unimplemented commands: estimatePriority, getChainTips, invalidateBlock, preciousBlock, reconsiderBlock

* [NOD-340] Apply gofmt
2019-12-10 12:58:26 +02:00
Dan Aharoni
8680231e5a [NOD-339] Remove cfilters and cfindex (#527)
* [NOD-339] Remove cfilters and cfindex

* [NOD-339] Remove some leftovers
2019-12-10 10:13:49 +02:00
stasatdaglabs
30f0e95969 [NOD-506] Remove blockNode.height and any references to it (#522)
* [NOD-506] Remove blockNode.height.

* [NOD-506] Use blueScore instead of chainHeight in validateParents.
2019-12-09 14:27:53 +02:00
Dan Aharoni
c94becf144 [NOD-498] Disable the option to choose mainnet and activeNet. (#525) 2019-12-08 18:33:04 +02:00
Svarog
369ec449a8 [NOD-509] Change organization name to kaspanet (#524)
* [NOD-509] Change organization name to kaspanet

* [NOD-509] Reorganize imports
2019-12-08 17:33:42 +02:00
Svarog
f4c6859e51 [NOD-509] Updated repository and imports to github.com/daglabs/kaspad (#521) 2019-12-08 16:28:53 +02:00
stasatdaglabs
683dd52fcf [NOD-492] Split API-Server to syncer and frontend applications (#519)
* [NOD-492] Split ApiServer to server and syncd.

* [NOD-492] Add missing file.

* [NOD-492] Remove references to --migrate from common config.

* [NOD-492] Move MQTT to the sync daemon.

* [NOD-492] Fix server Dockerfile and create one for syncd.

* [NOD-492] Rename ApiServer to Kasparov.

* [NOD-492] Fix packages.

* [NOD-492] Fix more packages.

* [NOD-492] Fix comments and package names.

* [NOD-492] Move blank import packages to main.

* [NOD-492] Move common logging logic out of individual config.go files.

* [NOD-492] Move database models to a package called dbmodels.

* [NOD-492] Rename models package to apimodels.
2019-12-08 14:38:47 +02:00
stasatdaglabs
11e936d109 [NOD-493] Make GetBlock return an appropriate error if the hash is known to be of an invalid block. (#520) 2019-12-08 14:05:56 +02:00
stasatdaglabs
9adb105e37 [NOD-487] Implement a mechanism to gracefully shut down after a panic (#512)
* [NOD-487] Implement a mechanism to gracefully shut down after a panic.

* [NOD-487] Fixed bad log.

* [NOD-487] Removed unused import.

* [NOD-487] Convert panic handlers from anonymous functions to methods.
2019-12-05 12:29:39 +02:00
stasatdaglabs
7b6ed9a778 [NOD-412] Remove run-dev.sh-related stuff. (#518) 2019-12-04 18:35:50 +02:00
Ori Newman
3218fc5a04 [NOD-488] Make getBlueBlocksBetween return error if startHash is not in selected parent chain of stopHash (#514)
* [NOD-488] Make getBlueBlocksBetween return error if start hash is not in the selected parent chain of stop hash

* [NOD-488] Convert for to while style
2019-12-04 17:52:28 +02:00
Ori Newman
3f94f8ca4c [NOD-491] Withhold blocks in mining simulator (#517) 2019-12-04 17:36:27 +02:00
Ori Newman
0842778c2c [NOD-485] Remove redundant argument from saveChangesFromBlock (#516) 2019-12-04 17:24:20 +02:00
Ori Newman
1332e1aa68 [NOD-479] Fix unorphaning in API server initial sync (#513) 2019-12-04 17:06:20 +02:00
Ori Newman
e872ebc7b3 [NOD-417] Remove mempool and alert messages (#515) 2019-12-04 10:43:50 +02:00
Svarog
e68b242243 [NOD-489] Don't skip notification about transactions for orphan/non-current blocks (#511) 2019-12-03 14:18:32 +02:00
Ori Newman
9cc2a7260b [NOD-479] Separate max outbound connections and max inbound connections (#509)
* [NOD-479] Separate max outbound connections and max inbound connections

* [NOD-479] Fix merge

* [NOD-479] Renames and add function countinboundPeers

* [NOD-479] Remove redundant check on maximum outbound peers

* [NOD-479] Rename countinboundPeers -> countInboundPeers
2019-12-03 12:27:49 +02:00
Ori Newman
bcd73012de [NOD-428] Require RPC user and password, and do not create a default config file for btcctl if rpc login details were provided (#510)
* [NOD-428] Required RPC user and password, and do not create a default config file for btcctl if rpc login details were provided

* [NOD-428] Don't check rpc user and password if rpc is disabled

* [NOD-428] Fix error message
2019-12-03 11:18:28 +02:00
Dan Aharoni
1fea2a9421 [NOD-486] API Server TX posting: Forward error when RPC Error is received (#507)
* [NOD-486] Forward error when RPC Error is recieved

* [NOD-486] Rename variable

* [NOD-486] Rename variable

* [NOD-486] Rename Variable (again)
2019-12-02 18:44:39 +02:00
stasatdaglabs
bb7d68deda [NOD-484] Fix deadlock between p2p server and sync manager during shutdown (#508)
* [NOD-484] Fix deadlock between p2p server and sync manager during shutdown.

* [NOD-484] Fix quitWaitGroup.Wait() potentially not waiting in some scenarios.

* [NOD-484] Add a comment explaining quitWaitGroup.

* [NOD-484] Fix typo.

* [NOD-484] Add etc to comment.
2019-12-02 18:08:32 +02:00
Ori Newman
3ab861227d [NOD-443] Fix API server unorphaning (#501)
* [NOD-443] Immediately request missing parents in API server sync

* [NOD-443] Add rpc client log to api server

* [NOD-443] Fix wrong ordering of pendingHeaders queue

* [NOD-443] Fix error comparision at TestNewHashFromStr

* [NOD-443] Make a separate handleMissingParentHeaders function

* [NOD-443] Change log level

* [NOD-443] Put handleMissingParentHeaders next to handleBlockHeader

* [NOD-443] Make handleBlockAddedMsg function

* [NOD-443] Make reusable missingParentsHashesStr string

* [NOD-443] Remove redundant 's'

* [NOD-443] Refactor to first get all blocks and then add them to database

* [NOD-443] Rename variables and functions, and remove redundant logs

* [NOD-443] Make fetchBlockAndMissingAncestors use block hash as an argument

* [NOD-443] Add log only for first orphan block

* [NOD-443] Fix wrong order of adding blocks to pendingBlocks

* [NOD-443] Write logs for all orphans

* [NOD-443] Log only missing parents that are not already fetched

* [NOD-443] Rename rawVerboseBlockTuple -> rawVerboseBlock

* [NOD-443] Make fetchBlock return *rawVerboseBlock

* [NOD-443] Rename rawVerboseBlock -> rawAndVerboseBlock
2019-12-02 13:29:28 +02:00
stasatdaglabs
8f0d98ef9b [NOD-464] Fix error messages in GetBlocks. (#506) 2019-12-02 13:05:56 +02:00
Svarog
dbd8bf3d2c [NOD-478] Add buffer to newOutboundConnection channel (#502) 2019-12-01 17:58:05 +02:00
Dan Aharoni
1b6b02e0d2 [NOD-475] Return error when requesting limit=0 (#505) 2019-12-01 17:25:27 +02:00
Ori Newman
2402bae1ff [NOD-410] Add log level CLI argument to API server (#503)
* [NOD-410] Add log level CLI argument to API server

* [NOD-410] Add comments

* [NOD-410] Remove pre-allocation of one item
2019-12-01 17:24:12 +02:00
Dan Aharoni
3dcf8d88b8 [NOD-481] Fix /blocks limit error message for api server (#504) 2019-12-01 16:19:14 +02:00
stasatdaglabs
dbf9c09a2e [NOD-461] Fix error code and message in GetTransactionsByAddressHandler. (#499) 2019-11-28 17:32:34 +02:00
stasatdaglabs
5e9fc2defc [NOD-464] Fix error messages in GetBlocks. (#500) 2019-11-28 17:31:19 +02:00
Ori Newman
bdc3cbceaa [NOD-472] Don't fetch accepting block and tx confirmations for getBlocks (#498)
* [NOD-472] Don't fetch accepting block and tx confirmations for getBlocks

* [NOD-472] Don't fetch accepting block and tx confirmations in any block verbose result

* [NOD-472] Add stringPointerToString function
2019-11-28 13:04:03 +02:00
stasatdaglabs
a71528fefb [NOD-450] Fix netsync clogging its own request queue with orphans that it had just now processed (#497)
* [NOD-450] Fix netsync clogging its own request queue with orphans that it had just now processed.

* [NOD-450] Rename hash to orphanHash.
2019-11-28 11:30:50 +02:00
Dan Aharoni
6725742d2c [NOD-470] Pass string instead of hash to controller (#496) 2019-11-27 17:08:23 +02:00
Dan Aharoni
9a510e2e23 [NOD-463] Change default order to descending (as appears in the spec) (#495) 2019-11-26 18:00:44 +02:00
Dan Aharoni
08a4b0dbf6 [NOD-426] Publish unaccepted transaction notifications (#490)
* [NOD-382] Add notification for accepted transactions

* [NOD-382] Remove print statement

* [NOD-426] Publish notifications for unaccepted transactions

* [NOD-426] Load DB in controller

* [NOD-426] Remove function name from error message

* [NOD-426] Add input addresses for transactions notifications

* [NOD-426] Remove function name from error message

* [NOD-426] Change method name to accepted transactions

* [NOD-426] Remove newlines

* [NOD-426] Use join instead of separate query

* [NOD-426] Remove new line
2019-11-26 16:59:16 +02:00
Ori Newman
0c9e55a358 [NOD-427] Selected tip notification (#494)
* [NOD-427] Send notifications to `dag/selected-tip`

* [NOD-442] Add selected tip notification

* [NOD-427] Add comment to PublishSelectedTipNotification

* [NOD-427] Remove redundant argument from errors.Wrapf

* [NOD-427] Add handleBlockAddedMsg function

* [NOD-427] Return errors instead of panicking

* [NOD-427] Fix findHashOfBluestBlock to use []string instead of dbmodels.Block

* [NOD-427] Add constants

* [NOD-427] use path.Join instead of topic+address

* [NOD-427] Remove redundant select

* [NOD-427] Change break to return

* [NOD-427] Fix findHashOfBluestBlock to handle empty blocks table

* [NOD-427] Return httpserverutils.HasDBError(dbErrors)
2019-11-26 16:46:12 +02:00
Ori Newman
532e57b61c [NOD-427] Add selected tip mqtt notification for the API server (#489)
* [NOD-427] Send notifications to `dag/selected-tip`

* [NOD-442] Add selected tip notification

* [NOD-427] Add comment to PublishSelectedTipNotification

* [NOD-427] Remove redundant argument from errors.Wrapf

* [NOD-427] Add handleBlockAddedMsg function

* [NOD-427] Return errors instead of panicking

* [NOD-427] Fix findHashOfBluestBlock to use []string instead of dbmodels.Block

* [NOD-427] Add constants

* [NOD-427] use path.Join instead of topic+address

* [NOD-427] Remove redundant select

* [NOD-427] Change break to return
2019-11-26 14:44:27 +02:00
Dan Aharoni
b1f59914d2 [NOD-466] Fix error where limit overides skip (#493) 2019-11-26 14:11:49 +02:00
Dan Aharoni
9a54b286c9 [NOD-462] Add error message when address is invalid for UTXO API request (#492)
* [NOD-462] Add error message when address is invalid

* [NOD-462] Fix error message

* [NOD-462] Remove function name from error message
2019-11-26 13:50:36 +02:00
Ori Newman
6e4b18a498 [NOD-442] Make dbFetchTxAcceptingBlock return nil if accepting bucket doesn't exist for transaction (#487) 2019-11-26 10:54:15 +02:00
Dan Aharoni
b5f8a0452e [NOD-460] Fix error where we set skip instead of limit (#491) 2019-11-26 10:36:26 +02:00
Dan Aharoni
fab043ef14 [NOD-382] Add notification for accepted transactions (#488)
* [NOD-382] Add notification for accepted transactions

* [NOD-382] Remove print statement
2019-11-25 10:09:27 +02:00
Ori Newman
8e0e62f21a [NOD-447] fix deadlocks and hanging goroutines (#481)
* [NOD-447] Fix deadlocks and hanging goroutines

* [NOD-447] Add tests

* [NOD-447] Add unpatch to spawnPatch

* [NOD-447] Don't send to releaseWait if waitingCounter is zero

* [NOD-447] Change waitingCounter to boolean and rename to isReleaseWaitWaiting, change checkIfRunningSpawnsAreLeft to return only one function, and lock critical code related to wg.isReleaseWaitWaiting

* [NOD-447] Rename txConfirmations -> txConfirmationsNoLock, txConfirmationsWithLock -> txConfirmations

* [NOD-447] Add documentation and delete redundant spawn

* [NOD-447] Fix comments

* [NOD-447] Fix comments
2019-11-24 15:59:45 +02:00
Dan Aharoni
9a1c2e2641 [NOD-457] Fix error message (#486) 2019-11-24 15:50:15 +02:00
Svarog
8cbc6670cc [NOD-445] Enable using EC2 AutoScalingGroup to get list of btcds for mining simulator (#484)
* [NOD-445] Added option to mining simulator to get address list from AWS

* [NOD-445] Add support to get miningsimulator addresslist from AWS

* [NOD-445] Added mechanism to update when new servers come online

* [NOD-445] Set config in connectionManager

* [NOD-445] Invert DisableTLS condition in readCert
2019-11-24 13:03:26 +02:00
Dan Aharoni
28ee6a8026 [NOD-381] Send transaction notifications to MQTT (#483)
* [NOD-381] Publish transaction messages to MQTT

* [NOD-381] Remove redundant variable

* [NOD-381] Send payload as string

* [NOD-381] Add Error handling

* [NOD-381] Respond with TransactionResponse

* [NOD-381] Use transactionResponse for notifications

* [NOD-381] Move code to appropriate places

* [NOD-381] Pass raw block instead of txId

* [NOD-381] Add comments to public functions

* [NOD-381] Remove print statement

* [NOD-381] Pass transaction instead of block; Use pointers so default will be nil;

* [NOD-381] Use pointers so value could be nil

* [NOD-381] Change variable name

* [NOD-381] Set QoS to 2

* [NOD-381] Move isConnected to MQTT, so client won't have to worry about it; General code refactors;
2019-11-24 10:53:09 +02:00
Svarog
af39e96e3e [NOD-455] Make GetFeeEstimateHandler return err, not HandlerError (#485) 2019-11-24 10:43:51 +02:00
stasatdaglabs
db6e9c773f [NOD-448] Fix initial sync in API Server crashing due to misaligned getBlocks calls (#482)
* [NOD-448] Change GetBlocksCmd to be able to include both raw and verbose block data.

* [NOD-448] Update sync logic to only make one getBlocks call per page.

* [NOD-448] Make GetBlocks get each block only once.
2019-11-21 12:21:19 +02:00
Dan Aharoni
47214121a7 [NOD-423] Implement get selected tip RPC command (#469)
* [NOD-423] Rename BestBlock to SelectedTip

* [NOD-423] Implement GetSelectedTip RPC command

* [NOD-423] Add help to getSelectedTip command

* [NOD-423] Fix getSelectedTip test

* [NOD-423] Fix tests so they would compile. These tests will need to be rewriten at some point.

* [NOD-423] Make integration test compile. Test need to be revisited

* [NOD-423] Rename variables

* [NOD-423] Change comment s about best block to selected tip.

* [NOD-423] Update comment

* [NOD-423] Change height to bluescore
2019-11-20 12:04:22 +02:00
stasatdaglabs
7b07609fd8 [NOD-437] Fix bad logger import in API Server. (#480) 2019-11-19 11:23:44 +02:00
stasatdaglabs
acb4b3f260 [NOD-434] Re-request missing parents when adding a block (#476)
* [NOD-434] Add the same enqueue/process mechanism as chainChangedMsgs for blockAddedMsgs.

* [NOD-434] Clean up after merge.

* [NOD-434] Implement mechanism for re-requesting missing parent blocks.

* [NOD-434] Fixed bad error message.

* [NOD-434] Split processBlockAddedMsgs.

* [NOD-434] Name return values in canHandleBlockAddedMsg.

* [NOD-434] Rename canHandleBlockAddedMsg to missingParentHashes and fix bad loop break.

* [NOD-434] Rename the variable missingParentHashes to missingHashes.

* [NOD-434] Rename a couple of variables.

* [NOD-434] Rename outerloop to outerLoop.

* [NOD-434] Fix typo and remove superfluous continue.

* [NOD-412] Change Warnf to Errorf where appropriate.
2019-11-19 11:22:17 +02:00
Ori Newman
e0221aa8ab [NOD-438] In api server, change block_data to MEDIUMBLOB (#474) 2019-11-19 10:42:57 +02:00
Ori Newman
cba346d753 [NOD-422] Separate request queue and request queue set by inv type (#471)
* [NOD-422] Separate request queue and request queue set by inv type

* [NOD-422] Make one-liner pop and remove redundant nil assignment
2019-11-18 15:34:01 +02:00
Ori Newman
0f34cfb1a2 [NOD-433] Make buildGetBlockVerboseResult use BlockConfirmationsByHashNoLock (#479) 2019-11-18 15:17:51 +02:00
stasatdaglabs
ea846a3284 [NOD-436] Remove unnecessary check before sending a chainChanged notification. (#478) 2019-11-18 14:49:27 +02:00
stasatdaglabs
63bfac9740 [NOD-436] Fix sending empty chainChanged messages. (#477) 2019-11-18 14:33:07 +02:00
Svarog
7284815c21 [NOD-435] Rollback transactions if they were not commited (#475) 2019-11-18 11:14:29 +02:00
Svarog
80307d108b [NOD-430] Print hashes of missing parents in case can't insert block into DB of API-Server (#473)
* [NOD-430] Print hashes of missing parents in case can't insert block into DB of API-Server

* [NOD-430] Use continue OUTER_LOOP instead of break

* [NOD-430] Use lowerCamelCase for label
2019-11-18 10:03:17 +02:00
stasatdaglabs
722437afe9 [NOD-424] Fix API Server not syncing new blocks (#470)
* [NOD-424] Fix typo in SQL query.

* [NOD-424] Rewrite handling chainChangedMsgs.

* [NOD-424] Separated enqueueChainChangedMsg and processChainChangedMsgs.

* [NOD-424] Fix a typo.
2019-11-17 16:52:45 +02:00
Ori Newman
684cf4b5fa [NOD-406] Don't do ECMH operations on mempool (#467)
* [NOD-406] Don't do ECMH operations on mempool

* [NOD-406] Change NewUTXODiff(false) to NewUTXODiffWithoutMultiset

* [NOD-406] Rename dClone -> clone

* [NOD-406] Remove redudnant assignment

* [NOD-406] Remove dag.UTXOToECMHCacheLock and make NewBlockTemplate use dag's write lock

* [NOD-406] Add tests to UTXO diffs without multiset
2019-11-14 17:40:05 +02:00
Ori Newman
c95a7b13a6 [NOD-379] Make a separate limit for block invs in getdata message (#465) 2019-11-14 13:45:24 +02:00
stasatdaglabs
1ce7f21026 [NOD-380] Implement MQTT client in api-server (#468)
* [NOD-380] Add MQTT to the project.

* [NOD-380] Add MQTT params to config.

* [NOD-380] Implement connecting to an mqtt broker.

* [NOD-380] Fix a comment.

* [NOD-380] Removed unnecessary option.

* [NOD-380] Added comments to MQTT functions.

* [NOD-380] Fix copy+paste error.

* [NOD-380] Make it so that all the mqtt flags must be passed together.

* [NOD-380] Use activeConfig instead of passing it everywhere.
2019-11-14 10:44:45 +02:00
Svarog
7d7df10493 [NOD-418] Added IsSpendable field to /utxos/ queries in API server (#466) 2019-11-13 16:25:10 +02:00
Dan Aharoni
8179862e0b [NOD-421] Fix DNSSeeder parsing error caused by NOD-386 (#464) 2019-11-13 12:49:58 +02:00
stasatdaglabs
6828f623b4 [NOD-395] Fix a crash in diffFromAcceptanceData caused by wrong order of iteration over blocks (463)
* [NOD-395] Write a test for the diffFromAcceptanceData crash.

* [NOD-395] Converted MultiBlockTxsAcceptanceData into a slice.

* [NOD-395] Fix failing test.

* [NOD-395] Populate multiBlockTxsAcceptanceData bottom-to-top.

* [NOD-395] Add comment to FindAcceptanceData.

* [NOD-395] Remove no-longer relevant note about probability in TestOrderInDiffFromAcceptanceData.
2019-11-13 12:28:52 +02:00
stasatdaglabs
2c88a5b2fe [NOD-413] Make "Max failed connection attempts reached" logs less frequent (#458)
* [NOD-413] Make "Max failed connection attempts reached" less frequent

* [NOD-413] Throttle only certain types of logs.

* [NOD-413] Add a comment for shouldWriteConnFailedLog.

* [NOD-413] Fix lint error.

* [NOD-413] Make ErrNoAddress a special type to support error wrapping.

* [NOD-413] Make throttledConnFailedLogInterval 10 minutes.

* [NOD-413] Move p2p errors into variables.

* [NOD-413] Reorganize throttled stuff to be next to each other.
2019-11-13 11:25:39 +02:00
Ori Newman
a7f08598f3 [NOD-416] Use errors.Is and add goroutine stack trace to HandlePanic (#459)
* [NOD-416] Use errors.Is and add goroutine stack trace to HandlePanic

* [NOD-416] Don't print goroutineStackTrace if it's nil
2019-11-13 11:20:20 +02:00
Dan Aharoni
83bad65d3a [NOD-419] Btcctl parsing error (introduced by NOD-386) (#462) 2019-11-12 14:25:28 +02:00
Dan Aharoni
1f35378a4d [NOD-386] Extract net parsing functionality to a shared place. (#453)
* [NOD-386] Extract net parsing functionality to a shared place.

* [NOD-386] Add extract ActiveNetParams to cmdconfig

* [NOD-386] Adding comments so go-vet won't shout at me

* [NOD-386] Rename package name to config

* [NOD-386] Rename commandConfig to configFlags

* [NOD-386] Rename function to ResolveNetwork

* [NOD-386] Fix renaming errors

* [NOD-386] Refactor network config to btcd level so APIserver and btcd could use it

* [NOD-386] Refactor network config to config package

* [NOD-386] Move ActiveNetParams to network section

* [NOD-386] Explictly return nil

* [NOD-386] Reuse activeNetParams from netwrok config

* [NOD-386] Set ActiveNetworkFlags instance to be global

* [NOD-386] Remove redundant newline

* [NOD-386] Init ActiveNetParams in address manager test

* [NOD-386] Add dnsseeder network config

* [NOD-386] Use ActiveConfig() method to access configuration
2019-11-12 10:51:36 +02:00
Dan Aharoni
39eab7a6d5 [NOD-373] Schnorr signature scheme (#451)
* [NOD-373] Implement Schnorr digital signatures and remove ECDSA (based on code from gcash/bchd)

* [NOD-374] Add new error to list; Update comments.

* [NOD-373] Remove leftovers of verifyMessage RPC command (which was deleted)

* [NOD-373] Remove redundant test, add Schnorr tests, and fix tests where needed

* [NOD-373] Fix tests and remove redundant ones

* [NOD-373] Refactor functions names

* [NOD-373] Remove empty line

* [NOD-373] Fix comments, rename functions to more meaningful names

* [NOD-373] Additional data in nonceRFC6979 should not be nil

* [NOD-373] Refactor function name

* [NOD-373] Add permalinks for links to bchd code
2019-11-12 10:09:38 +02:00
Dan Aharoni
9dd025d4da [NOD-408] Remove unimplemented redundant RPC command GetTxOutProof (#461) 2019-11-11 12:53:56 +02:00
Dan Aharoni
bb75ea5020 [NOD-414] Remove AES encryption/decryption from btcd (#460) 2019-11-11 11:01:02 +02:00
stasatdaglabs
8dbd4a2bed [NOD-411] Fix underflowing check for resending transactions. (#457) 2019-11-07 16:08:40 +02:00
Ori Newman
24305cda68 [NOD-385] Change confirmation calculation to be relative to the selected tip (#455)
* [NOD-385] Make confirmations be calculated as dag.selectedTip().blueScore - acceptingBlock.blueScore + 2

* [NOD-385] Fix comments

* [NOD-385] Make more explicit check in accepting block for selected tip

* [NOD-385] Put only non accepted transactions in areTxsInBlock

* [NOD-385] fetchSelectedTip only if needed
2019-11-07 13:42:25 +02:00
Ori Newman
770dfd147d [NOD-404] Calculate mass in API server (#452)
* [NOD-404] Calculate mass in API server

* [NOD-404] Fix uninitialized maps

* [NOD-404] Use txID instead of prevDBTransactionsOutput.Transaction.TransactionID
2019-11-07 10:27:12 +02:00
Ori Newman
a9ff9b0e70 [NOD-398] Change API server type HandlerError to work with errors instead of error strings (#454)
* [NOD-398] Change API server type HandlerError to work with errors instead of error strings

* [NOD-398] Rename OriginalError -> Cause and isHandleError -> ok
2019-11-06 16:58:58 +02:00
Ori Newman
3cc6f2d648 [NOD-384] Remove mass from rpc results (#449)
* [NOD-384] Remove mass from rpc results

* [NOD-384] Fix tests
2019-11-04 18:06:01 +02:00
stasatdaglabs
a8f0d7b05b [NOD-400] Fix ECHM cache crashing on concurrent access. (#450) 2019-11-04 17:38:01 +02:00
stasatdaglabs
13f06ca293 [NOD-399] Fix TxGen resending coinbase transactions. (#448) 2019-11-04 13:04:06 +02:00
Ori Newman
c88fa1492e [NOD-375] Move to pkg/errors (#447)
* [NOD-375] Move to pkg/errors

* [NOD-375] Fix tests

* [NOD-375] Make AreErrorsEqual a shared function
2019-11-04 11:24:12 +02:00
Ori Newman
40657a83f5 [NOD-344] Cache ECMH (#445)
* [NOD-134] Change newConnMtx to newConnReqMtx

* [NOD-344] Change ECMH cache size to 4e6

* [NOD-344] Refactor

* [NOD-344] Fix go.mod
2019-11-03 12:29:55 +02:00
Dan Aharoni
44dd58b461 [NOD-396] Fix updateAddedChainBlocks query to select all transactions and not the first one (#444) 2019-11-03 11:29:51 +02:00
Svarog
47891b17ab [NOD-392] If transaction is in mempool - don't try to get number of confirmations (#443) 2019-11-03 11:27:46 +02:00
Ori Newman
f7fbfbf5c4 [NOD-383] Fix updateAddedChainBlocks and updateRemovedChainHashes to update IsChainBlock and AcceptingBlockID appropriately 2019-10-31 15:47:30 +02:00
Ori Newman
0e278ca22b [NOD-350] Implement testnet faucet (#438)
* [NOD-350] Implement testnet faucet

* [NOD-350] Add JSON annotations to api server response types

* [NOD-350] Fix IP check query, update IP usage with upsert, and make IP a primary key

* [NOD-377] Remove redundant float conversion

* [NOD-377] Change not current database error message

* [NOD-377] change API route from /money_request to /request_money

* [NOD-377] Add a constant for 24 hours

* [NOD-377] Remove redundant call for getWalletUTXOSet()

* [NOD-377] Condition refactoring

* [NOD-377] Fix POST request to API server content type

* [NOD-350] Rename day -> timeBetweenRequests

* [NOD-377] Rename timeBetweenRequests -> minRequestInterval, timeBefore24Hours -> minRequestInterval

* [NOD-350] Rename file responsetypes -> response_types

* [NOD-350] Rename convertTxModelToTxResponse -> convertTxDBModelToTxResponse

* [NOD-350] Explicitly select blue_score in fetchSelectedTipBlueScore

* [NOD-350] Refactor and add comments

* [NOD-350] Make calcFee use MassPerTxByte

* [NOD-350] Convert IP column to varchar(39) to allow ipv6 addresses

* [NOD-350] Add comments to isFundedAndIsChangeOutputRequired

* [NOD-350] Remove approximateConfirmationsForCoinbaseMaturity

* [NOD-350] Fix comments
2019-10-31 11:59:56 +02:00
Ori Newman
c66fb294c8 [NOD-377] Don't disconnect from peers with finalized rendezvous point. Instead remove them from sync candidates. (#439) 2019-10-30 17:18:46 +02:00
stasatdaglabs
88b7e7ca03 [NOD-394] Add --cleanup to ./run-dev.sh (#441)
* [NOD-394] Rename --only-build to --no-run.

* [NOD-394] Allow --rm and --no-build to be run together with no-run.

* [NOD-394] Make --cleanup alias for --rm --no-run --no-build.

* [NOD-394] Fix typo in usage string.

* [NOD-394] Set docker/docker-compose.yaml to use devnet instead of testnet.
2019-10-30 17:12:48 +02:00
stasatdaglabs
a9b659a36f [NOD-393] Force docker-compose to actually cleanup before it runs when running ./run-dev.sh --rm. (#440) 2019-10-30 14:43:23 +02:00
stasatdaglabs
90fc6ba3e7 [NOD-376] Fix invalid orphan blocks causing their valid parents to be rejected (#436)
* [NOD-376] Made bad unorphaned blocks not reject the original block.

* [NOD-376] Fix wording in a comment.

* [NOD-376] Add a test to make sure that bad child blocks don't invalidate valid parent blocks.

* [NOD-376] Clarify comments and don't check PoW for child block (it's irrelevant for this test case).
2019-10-29 15:54:59 +02:00
stasatdaglabs
8ea97aa3fd [NOD-356] Add indication in getBlock that a block is an orphan. (#437) 2019-10-29 15:53:56 +02:00
Dan Aharoni
7c9f5a65d8 [NOD-374] Create transaction signer tool (#435)
* [NOD-374 ] Create transaction signer tool

* [NOD-374] Rename variables

* [NOD-374] Add network selection; Move error handling to control-flow function

* [NOD-374] Rename variables

* [NOD-374] Add new line after if blocks

* [NOD-374] Fix formatting error (gofmt)
2019-10-29 15:00:03 +02:00
stasatdaglabs
e2d3c4c821 [NOD-378] Add --no-build and --only-build to run-dev.sh. (#434) 2019-10-28 14:57:14 +02:00
Dan Aharoni
92578e2853 [NOD-368] correct text output of address generator (#433)
* [NOD-368] correct text output of address generator (address instead of public key)

* [NOD-368] add hash160 to output of genaddr

* [NOD-368] change encoding to hexadecimal

* [NOD-368] fix formatting
2019-10-24 18:00:06 +03:00
Dan Aharoni
3018c18616 [NOD-362] Calculate txgen fees by mass instead of size (#431)
* [NOD-362] Calculate txgen fee by mass

* formating

* [NOD-362] add varint size to calculation

* [NOD-362] rename variables

* [NOD-362] formating
2019-10-22 18:08:19 +03:00
Ori Newman
3ac9fa83c1 [NOD-367] Propagate transactions every 100 ms (#432) 2019-10-22 15:17:18 +03:00
Dan Aharoni
c5b0398dac [NOD-357] change finality interval (#430)
* change finality interval to 1000 ( ~16.6 minutes interval)

* [NOD-357] define finality interval in dagParams instead of using a constant.

* use dagParams for FinalityInterval instead of constant

* override parameter so test would pass on CI (Jenkins machine runs out of memory if we use 1000)

* formating the code
2019-10-16 15:32:10 +03:00
Ori Newman
76f23d8a9b [NOD-359] Calculate Transaction mass from previous scriptPubKeys (#429)
* [NOD-359] Calculate Transaction mass from previous scriptPubKeys

* [NOD-359] Add missing block errors
2019-10-15 13:03:16 +03:00
stasatdaglabs
089cee0e1d [NOD-352] Create a Dockerfile for APIServer (#428)
* [NOD-352] Created a Dockerfile for APIServer.

* [NOD-352] Removed unnecessary testing stuff from the APIServer and DNSSeeder Dockerfiles.
2019-10-13 14:07:44 +03:00
stasatdaglabs
982340456d [NOD-361] Fixed Nil dereference in connmgr/seed.go. (#427) 2019-10-08 13:04:07 +03:00
stasatdaglabs
13cf1f7715 [NOD-360] Renamed TestNet3 to TestNet. (#426) 2019-10-08 12:59:54 +03:00
stasatdaglabs
d99af7424c [NOD-353] Fixed passing wrong argument in handleGetRawTransaction. (#424)
* [NOD-353] Fixed passing wrong argument in handleGetRawTransaction.

* [NOD-353] Renamed mtx to msgTx.
2019-10-07 16:14:53 +03:00
Dan Aharoni
40ad9c5d2b [NOD-355] remove 'flushDbCache' and 'getBlockHash' rpc commands (#425)
* [NOD-355] remove 'flushDbCache' and 'getBlockHash' rpc commands

* [NOD-355] remove 'flushDbCache' and 'getBlockHash' from rpc server help
2019-10-07 14:07:20 +03:00
Ori Newman
9dfc3091b4 [NOD-134] Don't connect to an address from the same 16 CIDR (#423)
* [NOD-134] Don't connect to an address from the same 16 CIDR

* [NOD-134] Rename outboundPeerConnected variables

* [NOD-134] Change newConnMtx to newConnReqMtx
2019-10-06 15:53:58 +03:00
Ori Newman
e6a4ed04f3 [NOD-338] recover indexer if didnt work for a while (#422)
* [NOD-338] Recover indexer if it didn't work for a while

* [NOD-338] Recover indexer if it didn't work for a while

* [NOD-338] Recover indexer if it didn't work for a while

* [NOD-338] Add tests and move blockidhash.go to blockdag package

* [NOD-338] Delete index current block id when dropping index, and do some refactoring

* [NOD-338] Change comments

* [NOD-338] Change recover error messages

* [NOD-338] Fix comments

* [NOD-338] Fix comments and fix test name
2019-09-26 18:19:58 +03:00
stasatdaglabs
e3aa8d65dc [NOD-337] In CheckTransactionSanity make max mass of transaction to be half of block max mass (#421)
* [NOD-337] In CheckTransactionSanity, made max mass of transaction to be half of block max mass.

* [NOD-337] Added a comment for MaxMassPerTx.

* [NOD-337] Fixed a couple of comments.
2019-09-24 14:27:04 +03:00
stasatdaglabs
ece0fb83e8 [NOD-342] Renamed CHAN to BDAG. (#420) 2019-09-24 11:42:52 +03:00
stasatdaglabs
683830d574 [NOD-341] Made ChainChanged not fire if the acceptance index is off. (#419) 2019-09-22 17:59:20 +03:00
stasatdaglabs
c5108a4abd [NOD-276] Extracted p2p onXXX methods to separate files. (#418) 2019-09-22 17:40:10 +03:00
stasatdaglabs
40342eb45a [NOD-275] Split rpcserver.go to separate files (#417)
* [NOD-275] Moved getBlockTemplate and related functionality to a separate file.

* [NOD-275] Started moving handlers to separate files.

* [NOD-275] Fixed merge errors.

* [NOD-275] Moved all handlers out of rpcserver.go.

* [NOD-275] Moved non-shared functions out of rpcserver.go.

* [NOD-275] Moved handleGetAllManualNodesInfo to a separate file.

* [NOD-275] Moved handlers out of rpcwebsocket.go to separate files.

* [NOD-275] Fixed import error.

* [NOD-275] Renamed all handler files to include underscores.

* [NOD-275] Moved common rpc helper functions to common.go.
2019-09-22 16:41:37 +03:00
stasatdaglabs
adf4b4380e [NOD-289] Implement API-Server bootstrapping and booting after downtime (#408)
* [NOD-289] Implemented database isCurrent checking and connection.

* [NOD-289] Added GetChainFromBlock to RPCClient.

* [NOD-289] Limited the amount of blocks in GetChainFromBlockResponse.

* [NOD-289] Fixed various issues that were keeping GetChainFromBlocks from working properly.

* [NOD-289] Created blockloop.go.

* [NOD-289] Updated go.mod after merge.

* [NOD-289] Implemented collection of current selected parent chain.

* [NOD-289] Fixed test. Reverted not deleting utxoDiffData from the DB.

* [NOD-289] Implemented GetBlocks.

* [NOD-289] Added comment to BlockHashesFrom.

* [NOD-289] Added GetBlocks to rpcclient.

* [NOD-289] Added verboseBlocks to GetBlocks.

* [NOD-289] Implemented block insertion.

* [NOD-289] Added AUTO_INCREMENT to tables that were missing it.

* [NOD-289] Made gasLimit in subnetwork nullable.

* [NOD-289] Renamed transactions_outputs to transaction_outputs.

* [NOD-289] Fixed weird coinbase behavior in vin.

* [NOD-289] Made collectCurrentBlocks start from the most recent startHash.

* [NOD-289] Added IsChainBlock to GetBlockVerboseResult.

* [NOD-289] Implemented adding a block from onBlockAdded.

* [NOD-289] Added removedParentChainHashes to getChainFromBlock.

* [NOD-289] Implemented updating the selected parent chain from onChainChanged.

* [NOD-289] Implemented some initial logic for updating the UTXO.

* [NOD-289] Fixed merge errors.

* [NOD-326] Fixed some more merge errors.

* [NOD-289] Added error handling for missing required records.

* [NOD-289] Implemented handling removedChainHashes.

* [NOD-289] Implemented handling addedChainBlocks.

* [NOD-289] Fixed incorrect coinbase check.

* [NOD-289] Implemented inserting the transaction output address.

* [NOD-289] Added updating block.IsChainBlock.

* [NOD-289] Split insertBlock into many small functions.

* [NOD-289] Split updateSelectedParentChain into smaller functions.

* [NOD-289] Fixed pointer errors.

* [NOD-289] Fixed a bad exists check.

* [NOD-289] Fixed a couple of small bugs.

* [NOD-289] Fixed a TxID/Hash mixup.

* [NOD-289] Added block/tx mass to getBlockVerboseResponse.

* [NOD-289] Renamed blockLoop.go to sync.go. Added comments.

* [NOD-289] Deleted apiserver README.

* [NOD-289] Fixed golint errors.

* [NOD-289] Renamed findMostRecentBlockHash to findHashOfBluestBlock.

* [NOD-289] Fixed style in syncBlocks and fixed a comment.

* [NOD-289] Copied NewErrorFromDBErrors over from NOD-324.

* [NOD-289] Created a couple of utils to make error handling with gorm slightly less painful.

* [NOD-289] Added error handling for database calls.

* [NOD-289] Fixed some more style/comments.

* [NOD-289] Fixed comments.

* [NOD-289] Renamed TransactionInput.TransactionOutput to TransactionInput.PreviousTransactionOutput.

* [NOD-289] Added a commends about pagination in getBlocks and getChainFromBlock.

* [NOD-289] Removed the coinbase field from Vin.

* [NOD-289] Deferred handling chainChangedMsgs until we have the appropriate data.

* [NOD-289] Optimized queries in updateRemovedChainHashes and updateAddedChainBlocks.

* [NOD-289] Optimized queries in insertBlockParents.

* [NOD-289] Optimized queries in insertTransactionInput.

* [NOD-289] Split Where calls to separate lines.

* [NOD-289] Fixed merge errors.

* [NOD-289] Exited early from insertBlockParents if we're the genesis block.

* [NOD-289] Improved nextChainChangedChan mechanism.

* [NOD-289] Fixed the above sync mechanism a bit.

* [NOD-289] Renamed IsDBRecordNotFoundError to HasDBRecordNotFoundError and IsDBError to HasDBError.

* [NOD-289] Replaced old error handling for db errors with the lovely new stuff.

* [NOD-289] Exited early if we already inserted a block. This saves us checking if a record already exists for some record types.

* [NOD-289] Decoupled syncBlocks from syncSelectedParentChain.

* [NOD-289] Made a comment more explicit.

* [NOD-289] Extracted net resolution to a separate function.

* [NOD-289] Extracted syncing to a separate function.

* [NOD-289] Fixed a comment.

* [NOD-289] Fixed merge erros.

* [NOD-289] Fixed a couple of bugs.

* [NOD-289] Fixed another bug.

* [NOD-289] Extracted ChainChangedMsg conversion to a separate function.

* [NOD-289] Optimized queries in canHandleChainChangedMsg.

* [NOD-289] Moved the sync function closer to its call site.

* [NOD-289] Renamed HasDBRecordNotFoundError to IsDBRecordNotFoundError.

* [NOD-289] Used count instead of first.

* [NOD-289] Renamed address to hexAddress.
2019-09-22 13:14:51 +03:00
Ori Newman
7371120481 [NOD-333] Make ExtractScriptPubKeyAddrs return single address (#415)
* [NOD-333] Make ExtractScriptPubKeyAddrs return single address

* [NOD-333] Remove reference to required signatures from ExtractScriptPubKeyAddrs
2019-09-19 11:19:51 +03:00
stasatdaglabs
1064b5009d [NOD-315] Implement acceptance index (#413)
* [NOD-315] Created acceptanceindex.go including boilerplate.

* [NOD-315] Disallowed calls to notifyChainChanges and getChainFromBlock if the acceptance index is not on.

* [NOD-315] Implemented the acceptance index.

* [NOD-315] Fixed serialization/deserialization. Added test.

* [NOD-315] Fixed/added comments.

* [NOD-315] Fixed copy/paste errors.

* [NOD-315] Added an empty line for readability.
2019-09-19 10:38:33 +03:00
stasatdaglabs
850876e6a7 [NOD-335] Don't print stack-trace when cli flags are invalid (#416)
* [NOD-335] Made it not write a stack trace if the command line flags are wrong.

* [NOD-335] Fixed panic not printing the right error.

* [NOD-335] Removed code duplication.
2019-09-18 17:22:32 +03:00
Svarog
d4083cbdbe [NOD-309] post transaction (#403)
* [NOD-319] Add query params to api server route handler

* Temp commit

* [NOD-322] Make database.DB a function

* [NOD-322] Move context to be the first parameter in all functions

* [NOD-322] Set db to nil on database.Close()

* [NOD-322] Tidy go.mod/go.sum

* [NOD-323] Move rpc-client to separate package

* [NOD-309] Add controller for POST /transaction

* [NOD-309] Added route for POST /transaction

* [NOD-309] in POST /transaction: Forward reject errors to client

* [NOD-309] Added custom client messages to errors in POST /transaction

* [NOD-309] Use utils.NewInternalServerHandlerError where appropriate
2019-09-18 16:09:48 +03:00
Ori Newman
47c5eddf38 [NOD-329] Separate connect timeout and request timeout to JSON-RPC server (#411) 2019-09-18 15:01:31 +03:00
Ori Newman
f6a6508eff [NOD-328] Make API server mainHandler return an object (#412) 2019-09-18 14:47:59 +03:00
Ori Newman
a036618b44 [NOD-324] Properly handle GORM errors in API server (#409)
* [NOD-324] Properly handle GORM errors in API server

* [NOD-324] Handle RecordNotFound error in GetBlockByHashHandler

* [NOD-324] Make a separate function for NewErrorFromDBErrors
2019-09-18 14:09:07 +03:00
Ori Newman
2429b623fc [NOD-327] Add --migrate cli flag to API server (#407)
* [NOD-327] Add --migrate cli flag to API server

* [NOD-327] Change log messages

* [NOD-327] Remove `required` flag from API server RPC CLI arguments

* [NOD-327] Add database version in migrations logs
2019-09-18 13:51:20 +03:00
Ori Newman
f4850b9e7a [NOD-330] Use BTCD logs for gorm (#410) 2019-09-18 11:47:54 +03:00
Ori Newman
e81ac5f19e [NOD-307] Implement get blocks for api server (#405)
* [NOD-307] Implement API-Server GET /blocks

* [NOD-307] Implement API-Server GET /blocks

* [NOD-307] Add comments to exported constants

* [NOD-307] Flatten GET query values and check that 'order' value is valid

* [NOD-307] Validate order values in GetBlocksHandler

* [NOD-307] Add convertQueryParamToInt function
2019-09-16 16:53:57 +03:00
Ori Newman
31ccedf136 [NOD-325] Enable separate error messages for logging and client (#406)
* [NOD-325] Enable separate error messages for logging and client

* [NOD-325] Add json annotation to clientError
2019-09-16 13:26:05 +03:00
Svarog
502b510ccd [NOD-322] Minor api-server refactoring. (#401)
* [NOD-322] Make database.DB a function

* [NOD-322] Move context to be the first parameter in all functions

* [NOD-322] Set db to nil on database.Close()

* [NOD-322] Tidy go.mod/go.sum

* [NOD-322] Use http package const + message for StatusInternalServerError
2019-09-15 12:32:12 +03:00
stasatdaglabs
369031f963 [NOD-326] Replaced the UTXOs table with TransactionOutputs.isSpent (#404)
* [NOD-326] Replaced UTXO table with TransactionOutput.IsSpent.

* [NOD-326] Fixed merge errors.
2019-09-15 12:04:51 +03:00
Ori Newman
a789680db1 [NOD-314] change pkscript to scriptpubkey (#400)
* [NOD-314] Change everywhere PkScript to ScriptPubKey

* [NOD-314] Change everywhere PkScript to ScriptPubKey

* [NOD-314] Rename pkPops -> scriptPubKeyPops
2019-09-15 11:09:36 +03:00
Svarog
90bda69931 [NOD-323] Move rpc-client to separate package (#402) 2019-09-15 10:21:42 +03:00
Svarog
9647cb3e08 [NOD-318] Upgrade everything to Go1.13 (#393) 2019-09-09 15:57:31 +03:00
Ori Newman
79c9060909 [NOD-308] Implement API-Server GET /fee-estimates (#399) 2019-09-09 11:21:56 +03:00
Svarog
20206789e0 [NOD-299] Add waitgroup to wait for all spawns to complete before calling teardown (#385)
* [NOD-299] Add waitgroup to wait for all `spawn`s to complete before calling teardown

* [NOD-299] Restore spawn on teardown + mark spawn done in the correct thread
2019-09-09 11:02:31 +03:00
Ori Newman
1ddae35277 [NOD-305] Implement API-Server GET /utxos/{address} (#398)
* [NOD-305] Implement API-Server GET /utxos/{address}

* [NOD-305] Add accepting block blue score to the resulted utxo
2019-09-08 16:58:28 +03:00
Ori Newman
75a8c6459a [NOD-306] Implement API-Server GET /block/{hash} (#397)
* [NOD-306] Implement API-Server GET /block/{hash}

* [NOD-306] Validate that hash string is a valid hex

* [NOD-306] Unite invalid hash errors
2019-09-08 16:09:09 +03:00
Ori Newman
7fc2430ab1 [NOD-304] Implement get transactions by address for api server (#395)
* [NOD-303] Implement get transactions by address for API server

* [NOD-304] Implement get transactions by address

* [NOD-304] Implement get transactions by address

* [NOD-304] Use structs for where if possible

* [NOD-304] Auto increment IDs

* [NOD-304] Make defaultGetTransactionsLimit constant

* [NOD-304] Delete db directory

* [NOD-304] change db var to query

* [NOD-304] Extract route handle function from addRoutes

* [NOD-304] Order transactions by ID

* [NOD-304] Add error for passing arrays to GET
2019-09-08 10:52:03 +03:00
Ori Newman
cf9af0fb5d [NOD-320] Make txgen explicitly skip mempool transactions (#396) 2019-09-05 16:14:55 +03:00
Ori Newman
db6d6293c7 [NOD-319] Add query params to api server route handler (#394) 2019-09-04 17:34:36 +03:00
Ori Newman
ae25ec2e6b [NOD-303] Implement get transaction by id for api server (#391)
* [NOD-303] Implement get transaction by id for api server

* [NOD-303] Make routeParamTxID a constant

* [NOD-303] Change database is not current error.

* [NOD-303] Add ID to TransactionInput and TransactionOutput models

* [NOD-303] Change transactions_outputs table name to transaction_outputs and transactions_inputs to transaction_inputs

* [NOD-303] Add json annotations to transaction response types

* [NOD-303] Split server package

* [NOD-303] Add GetTransactionByHashHandler

* [NOD-303] Add comments to exported functions and variables

* [NOD-303] Put response types in a separate file

* [NOD-303] Rename functions
2019-09-03 15:54:59 +03:00
Svarog
7521545682 [NOD-311] Removed JSONRPCfyer (#392) 2019-09-03 11:24:25 +03:00
Ori Newman
169e96e851 [NOD-310] Implement REST server in API server (#389)
* [NOD-310] Implement REST server in API server

* [NOD-310] MetaData -> Metadata

* [NOD-310] Make custom context methods instead of custom request functions

* [NOD-310] change "Request ID" prefix to "RID" and convert to apiServerContext with newAPIServerContext everywhere
2019-09-01 17:03:43 +03:00
Ori Newman
893b8a88c8 [NOD-312] Change defaultMinRelayTxFee to 1 satoshi per byte (#390) 2019-09-01 15:21:24 +03:00
Svarog
c60711ab15 [NOD-302] Accept ConnectionEstablished when Pending is expected in TestRetryPermanent (#388) 2019-08-29 15:27:03 +03:00
Ori Newman
1b00e01030 [NOD-301] Don't sync with peer if the rendezvous point is below finality (#387)
* [NOD-301] Don't sync with peer if the rendezvous point is below finality

* [NOD-301] Add block hash and peer address for the warn message

* [NOD-301] Fix perrLog.Warnf arguments order
2019-08-29 10:47:05 +03:00
Ori Newman
f0c80905eb [NOD-300] If node has invalid ancestor set the according status in blockindex (#386)
* [NOD-300] If node has invalid ancestor set the according status in blockindex

* [NOD-300] Test that status is also updated for grand child of an invalid block

* [NOD-300] change make(blockSet) to newSet()
2019-08-28 17:44:33 +03:00
stasatdaglabs
b07a118431 [NOD-292] When writing block to database - also create record in block index (#376)
* [NOD-292] In accept.go, made dbStoreBlock and flushToDB occur within the same transaction.

* [NOD-292] Implemented processing blocks that were not validated on BTCD start.

* [NOD-292] Fixed processing logic on init. Added a test for it.

* [NOD-292] Fixed some comments.

* [NOD-292] Made unlocks deferred in a couple of places.

* [NOD-292] Made unprocessed block reprocess via ProcessBlock rather than maybeAcceptBlock.

* [NOD-292] Fixed grammar in comment. Added an explanation to TestAcceptingInInit.

* [NOD-292] Split flushToDB into two versions.

* [NOD-292] Fixed a bad assignment.

* [NOD-292] Fixed bad spacing.
2019-08-28 14:52:57 +03:00
Ori Newman
0ae06cd277 [NOD-297] fix onchainchanged on rpcclient (#384)
* [NOD-297] Fix onChainChanged on rpcclient

* [NOD-285] create gorm models for db (#378)

* [NOD-285] Map API-Server database using GORM

* [NOD-285] Add accepting block to transactions and blocks models, and remove accepting block model

* [NOD-285] Define model relations

* [NOD-285] Fix many to many for Transaction and Block models

* [NOD-285] Remove redundant main file

* [NOD-296] Send SyncMgr.SubmitBlock errors as rpc errors (#381)

* [NOD-296] Send SyncMgr.SubmitBlock errors as rpc errors

* [NOD-296] Add error message prefix

* [NOD-298] Add comments to gorm models (#382)

* [NOD-294] Fix golint in deploy.sh and fix all lint warnings (#380)

* [NOD-294] Fix golint in deploy.sh and fixed all lint errors

* [NOD-294] Fix typos in comments

* [NOD-294] Convert VirtualForTest into alias of *virtualBlock

* [NOD-294] Fixed some more typos in comments

* [NOD-295] Limit the length of GetData to 50 (#383)

* [NOD-295] Fixed bad break condition in addInvsToGetDataMessageFromQueue.

* [NOD-295] Fixed the fix for bad break condition in addInvsToGetDataMessageFromQueue.

* [NOD-295] Made the check for max invs refer to invsNum instead of MaxInvPerGetDataMsg.

* [NOD-297] Fix onChainChanged on rpcclient

* [NOD-286] Implement API-Server base structure (#379)

* [NOD-286] Implement API-Server base structure

* [NOD-286] Add rpc user and password as command line arguments

* [NOD-286] Make log directory a CLI argument

* [NOD-286] Add db login details as CLI arguments

* [NOD-297] Fix onChainChanged on rpcclient and server

* [NOD-297] Fix variables and functions names

* [NOD-297] Fix AcceptedTxIds -> AcceptedTxIDs
2019-08-28 12:52:07 +03:00
Ori Newman
ed9165f533 [NOD-286] Implement API-Server base structure (#379)
* [NOD-286] Implement API-Server base structure

* [NOD-286] Add rpc user and password as command line arguments

* [NOD-286] Make log directory a CLI argument

* [NOD-286] Add db login details as CLI arguments
2019-08-27 16:19:01 +03:00
stasatdaglabs
c73113a12e [NOD-295] Limit the length of GetData to 50 (#383)
* [NOD-295] Fixed bad break condition in addInvsToGetDataMessageFromQueue.

* [NOD-295] Fixed the fix for bad break condition in addInvsToGetDataMessageFromQueue.

* [NOD-295] Made the check for max invs refer to invsNum instead of MaxInvPerGetDataMsg.
2019-08-27 13:09:36 +03:00
Svarog
480b2ca07c [NOD-294] Fix golint in deploy.sh and fix all lint warnings (#380)
* [NOD-294] Fix golint in deploy.sh and fixed all lint errors

* [NOD-294] Fix typos in comments

* [NOD-294] Convert VirtualForTest into alias of *virtualBlock

* [NOD-294] Fixed some more typos in comments
2019-08-27 12:00:23 +03:00
Ori Newman
c72b914050 [NOD-298] Add comments to gorm models (#382) 2019-08-27 11:48:43 +03:00
Ori Newman
5cf7f01d3f [NOD-296] Send SyncMgr.SubmitBlock errors as rpc errors (#381)
* [NOD-296] Send SyncMgr.SubmitBlock errors as rpc errors

* [NOD-296] Add error message prefix
2019-08-27 11:25:45 +03:00
Ori Newman
552a5917c2 [NOD-285] create gorm models for db (#378)
* [NOD-285] Map API-Server database using GORM

* [NOD-285] Add accepting block to transactions and blocks models, and remove accepting block model

* [NOD-285] Define model relations

* [NOD-285] Fix many to many for Transaction and Block models

* [NOD-285] Remove redundant main file
2019-08-27 11:25:07 +03:00
stasatdaglabs
5c14719f14 [NOD-295] Capped amount of invs in a getData message. (#377) 2019-08-26 15:12:55 +03:00
Svarog
d2353a189a [NOD-291] Remove database check from dag.BlockExists (#375) 2019-08-25 15:00:16 +03:00
Ori Newman
4fcd705ae3 [NOD-290] Remove block_id from transactions table (#374)
* [NOD-290] Remove block_id from transactions table

* [NOD-290] Remove block_id foreign key from transactions table
2019-08-25 10:23:03 +03:00
Ori Newman
744c17b4c8 [NOD-280] Create database for API server (#373)
* [NOD-280] Create database for API server

* [NOD-280] Rename public_key_script to pk_script

* [NOD-280] Change indexes names

* [NOD-280] Add accepting block to blocks and transactions table and remove accepting_blocks table

* [NOD-280] Add readme

* [NOD-280] Change VARCHAR(32) to CHAR(64)

* [NOD-280] Rename location_in_block to index
2019-08-22 17:21:09 +03:00
stasatdaglabs
e2eca24b33 [NOD-282] Fixed Telegram messages not being sent. (#371) 2019-08-22 13:41:48 +03:00
stasatdaglabs
36d5ac189f [NOD-283] Fixed a crash in notifyChainChanged. (#372) 2019-08-22 13:23:30 +03:00
stasatdaglabs
1a569c7bd7 [NOD-270] Implement NotifyChainUpdates api call (#368)
* [NOD-270] Added notifyChainChanges and related commands.

* [NOD-270] Added NTChainChanged to blockdag.

* [NOD-270] Implemented collection and sending of ChainChanged notifications.

* [NOD-270] Fixed an improperly named test.

* [NOD-270] Added a test: TestChainChangedNotification.

* [NOD-270] Fixed a couple copy+paste errors.

* [NOD-270] Added a couple of comments for TestChainChangedNotification.

* [NOD-270] Fixed formatting error.

* [NOD-270] Fixed a comment.

* [NOD-270] Uncoupled chain updates inside blockdag from the concept of a notification.

* [NOD-270] Removed intermediary ChainUpdates object from ChainChangedNotificationData.
2019-08-21 12:58:32 +03:00
Ori Newman
6bb53eaae3 [NOD-256] add error log (#369)
* [NOD-256] Add error log

* [NOD-256] Add error log

* [NOD-256] Fix typo and comment

* [NOD-256] Remove btclog dir

* [NOD-256] Format project

* [NOD-256] Add error log files

* [NOD-256] Add an option to add a log file to write into to an existing backend logger

* [NOD-256] Get rid of redundant logs initialization

* [NOD-256] rename initLogRotators to initLog

* [NOD-256] Get rid ExampleSignTxOutput and convert ExampleBlockDAG_ProcessBlock to a regular test

* [NOD-256] Show error message if os.Exiting from initLog
2019-08-21 11:26:21 +03:00
Svarog
747a9bb944 [NOD-278] Added default for MinRelayTxFee (#367) 2019-08-19 17:52:58 +03:00
Ori Newman
d2daf334a5 [NOD-241] Implement lower resolution peer rendezvous point discovery (#353)
* [NOD-241] Implement lower resolution peer rendezvous point discovery

* [NOD-241] Implement lower resolution peer rendezvous point discovery

* [NOD-241] Find exact rendezvous point

* [NOD-241] Find exact rendezvous point

* [NOD-241] Fix tests

* [NOD-241] Remove hash stop from MsgBlockLocator and add tests to MsgBlockLocator and MsgGetBlockLocator

* [NOD-241] Change everywhere startHash to hashStart and change comments

* [NOD-241] Fix locateBlockNodes to stop at hashStop

* [NOD-241] Formatted locatorSummary.

* [NOD-241] Fix node reversal

* [NOD-241] Fix hash start and hash stop order, and don't include startNode in dag.blockLocator

* [NOD-241] rename locateBlockNodes -> getBlueBlocksBetween and add a comment to it

* [NOD-241] change hash start to start hash and hash stop to stop hash

* [NOD-241] Move block locator stuff to a different file

* [NOD-241] Rename msggetblocks.go to msggetblockinvs.go

* [NOD-241] Format project

* [NOD-241] Rename rpcserverSyncManager.LocateHeaders to GetBlueBlocksHeadersBetween

* [NOD-241] Move the logic of finding the highest shared block to OnBlockLocator

* [NOD-241] Rename chainHeight -> nextChainHeight

* [NOD-241] Fix typo in comment
2019-08-19 15:35:13 +03:00
stasatdaglabs
70737e4e94 [NOD-264] Implement tx-selection algorithm (#358)
* [NOD-264] Implemented calcTxSelectionValue.

* [NOD-264] Fixed bad subnetworkID in calcTxSelectionValue.

* [NOD-264] Implemented sorting the txDescs by value.

* [NOD-264] Got rid of txPrioItem.

* [NOD-264] Moved transaction selection to a separate file.

* [NOD-264] Renamed the result object to txsForBlockTemplate.

* [NOD-264] Implemented tx selection.

* [NOD-264] Fixed trying to get the gas limit for built-in subnetworks.

* [NOD-264] Wrote comments where appropriate.

* [NOD-264] Moved calcTxSelectionValue to the mining package. (Non-mining nodes shouldn't be forced to calc selection value for every transaction)

* [NOD-264] Wrote a test for selectTxs.

* [NOD-264] Fixed a comment.

* [NOD-264] Fixed misunderstood test.

* [NOD-264] Added zero fee check. Added a couple more tests.

* [NOD-264] Added probabilistic tests. Fixed a couple of bugs in tx selection.

* [NOD-264] Fixed tests with missing fees.

* [NOD-264] Added a test over a range of txs with different gas/mass.

* [NOD-264] Added expected probability to the rest of the test cases.

* [NOD-264] Tightened bounds in probability test.

* [NOD-264] Fixed values in probabily test.

* [NOD-264] Added a comments for alpha and rebalanceThreshold.

* [NOD-264] Fixed a couple of comments, renamed result to txsForBlockTemplate.

* [NOD-264] Removed an irrelevant comment. Changed Tracef to Warnf in some logs.

* [NOD-264] Renamed selectionValue -> txValue.

* [NOD-264] Moved rebalancing to the start of the tx selection loop.

* [NOD-264] Added overflow check for gasUsage.

* [NOD-264] Renamed blockSigOps and blockMass to totalSigOps and totalMass.

* [NOD-264] Removed the need to pass usedCount to reblanaceCandidates. Also relaxed bounds in a test.

* [NOD-264] Split selectTxs into smaller functions. Also relaxed bounds in a test some more.

* [NOD-264] Added a comment for findTx.

* [NOD-264] Ordered candidateTxs by subnetwork instead of txValue.

* [NOD-264] Disallowed zero tx fees in mempool and config. Renamed iterateCandidateTxs to populateTemplateFromCandidates.

* [NOD-264] Changed isFinalizedTransaction log level from Warn to Debug.

* [NOD-264] Removed references to SigOps in txSelection.

* [NOD-264] Removed SigOps validation. Validating mass should suffice.

* [NOD-264] Renamed wasUsed to isMarkedForDeletion.

* [NOD-264] Renamed markCandidateTxUsed to markCandidateTxForDeletion.

* [NOD-264] Made some probabilistic tests less likely to fail when they shouldn't.

* [NOD-264] Added a message warning people about probabilistic tests.

* [NOD-264] Rephrased a comment about rebalanceThreshold.

* [NOD-264] Removed IsCoinBase, CheckTransactionInputsAndCalulateFee, and ValidateTransactionScripts from txSelection.

* [NOD-264] Removed a condition that is no longer relevant.

* [NOD-264] "which's" -> "whose"

* [NOD-264] Removed wasteful preallocations.

* [NOD-264] Fixed a comment referring to "used" transactions.
2019-08-19 12:08:48 +03:00
stasatdaglabs
5f49115cac [NOD-269] Implement GetChainFromBlock api-call (#364)
* [NOD-269] Added a skeleton for getChainFromBlock.

* [NOD-269] Made startHash and includeBlocks optional.

* [NOD-269] Implemented chainBlock collection.

* [NOD-269] Extracted GetBlockVerboseResult building to its own method.

* [NOD-269] Implemented the IncludeBlocks part of GetChainFromBlock.

* [NOD-269] Added a comment for NewGetChainFromBlockCmd.

* [NOD-269] Made IsInSelectedPathChain return an error.

* [NOD-269] Fixed a very wrong comment.

* [NOD-269] Made SelectedPathChain allocate only the required amount of space.

* [NOD-269] Renamed pathChain to parentChain.

* [NOD-269] Split handleGetChainFromBlock to separate functions.

* [NOD-269] Fixed some grammar.
2019-08-18 13:31:54 +03:00
stasatdaglabs
534cb2bf5b [NOD-272] In CheckTransactionSanity, multiply mass limit check by massPerTxByte. (#365) 2019-08-15 15:05:26 +03:00
stasatdaglabs
187c525667 [NOD-234] In getBlockTemplate check if node is current (#362)
* [NOD-234] Added an IsCurrent check to handleGetBlockTemplate.

* [NOD-234] Removed IsCurrent check from handleGetBlockTemplateRequest. Added an explanation for why we're checking the chainHeight.

* [NOD-234] Added ShouldMineOnGenesis to the IsCurrent check.

* [NOD-234] Flipped && operands to fail fast.
2019-08-14 15:09:35 +03:00
stasatdaglabs
6032727965 [NOD-268] Implement selectedParentChain-related structures in btcjson. (#363) 2019-08-13 11:15:51 +03:00
stasatdaglabs
bb3f23b6dc [NOD-240] Get rid of all references for wallet related RPC commands (#361)
* [NOD-240] Removed references to the wallet in rpcserver.go.

* [NOD-240] Began removing btcwalletxxx.go.

* [NOD-240] Got rid of rpcclient/wallet.go and walletsvrcmds.go.

* [NOD-240] Moved GetBestBlockResult to dagsvrresults.go.

* [NOD-240] Finished removing walletsvrXXX.go.

* [NOD-240] Removed wallet stuff from btcctl.

* [NOD-240] Removed a few last things that I've missed.
2019-08-12 09:54:07 +03:00
stasatdaglabs
e5485ac5e6 [NOD-262] Renamed all instances of GetBlocks to GetBlockInvs. (#359) 2019-08-11 12:25:29 +03:00
stasatdaglabs
594a209f83 [NOD-263] Rename all instances of hashStop to stopHash. (#360) 2019-08-11 11:20:21 +03:00
Svarog
9981ce7adb [NOD-255] When adding orphans during netsync process - report only to debug log, unless number of orphans > k*2 (#357)
* [NOD-255] When orphan blocks arrive from netsync - don't write log unless we are in Debug

* [NOD-255] If there are more than K*2 orphans in pool - report as a potential problem anyway

* [NOD-255] Update comment to explain the K*2 figure
2019-08-07 11:30:32 +03:00
Svarog
49ac97c7db [NOD-265] Return an empty array for searchRawTransactions when no txs were found (#356) 2019-08-06 17:54:56 +03:00
stasatdaglabs
bfdf7a2cf2 [NOD-237] Implement transaction mass (#355)
* [NOD-237] Implemented transaction mass.

* [NOD-237] Added transaction mass validation to the mempool.

* [NOD-237] Made blockMaxMassMax not rely on MaxBlockPayload.

* [NOD-237] Added comments describing the new constants in validate.go.

* [NOD-237] Changed the default blockmaxmass to 10,000,000.

* [NOD-237] Fixed a comment that erroneously didn't refer to mass.

* [NOD-237] Added comments to ValidateTxMass and CalcTxMass.

* [NOD-237] Renamed "size" to "byte". Made validateBlockMass exit early if validation fails. Fixed unit names in comments. In CalcTxMass, moved summing of mass to the bottom of the function.

* [NOD-237] Instead of ErrMassTooHigh, renamed ErrBlockTooBig and ErrTxTooBig. Replaced wire.MaxBlockPayload with MaxMassPerBlock.

* [NOD-237] Fixed sanity checks related to block size in commands.

* [NOD-237] To use up less memory during testing, made the mass in the "too big" test come from pkScripts rather than input bytes.

* [NOD-237] Added an overflow check to validateBlockMass.
2019-08-05 16:04:24 +03:00
Ori Newman
54b681460d [NOD-244] Don't validate subnetwork registry transactions when they are being registered (#354) 2019-08-01 12:40:01 +03:00
stasatdaglabs
2147d16c1f [NOD-220] When handling an INV message, made it skip tx invs that are currently being requested. (#352) 2019-08-01 12:31:40 +03:00
Ori Newman
7c1cb47bd0 [NOD-249] Change WaitGroup to use channels (#350)
* [NOD-248] Implement waitgroup to enable waiting while adding

* [NOD-248] fix waitGroup.done() error message

* [NOD-248] atomically read wg.counter

* [NOD-248] return lowPriorityMutex

* [NOD-249] Add tests to waitgroup

* [NOD-249] Change waitgroup to use channels

* [NOD-249] Format project

* [NOD-249] Add comments and logs to waitGroup, and remove timeouts from
prioritymutex_test.go

* [NOD-249] Fix comments
2019-07-28 18:23:26 +03:00
stasatdaglabs
6acfa18d7c [NOD-220] Fixed handleSearchRawTransactions trying to get confirmations of mempool transactions, which made txgen crash on start. (#351) 2019-07-28 13:02:16 +03:00
Svarog
f0a675162c [NOD-253] Use spawn instead of go (#349) 2019-07-22 12:06:13 +03:00
Svarog
7a4deb6f18 [NOD-251] Use bluest parent anywhere validating difficulty, instead of selectedTip (#348) 2019-07-17 14:46:58 +03:00
Svarog
96842353de [NOD-250] Calculate UTXOCommitment when loading DAG + add UTXOCommitment to getBlockDagInfo (#347) 2019-07-16 16:53:28 +03:00
Svarog
5ce8875ce0 [NOD-243] Optimize validateGasLimit (#346)
* [NOD-243] Optimize validateGasLimit to only remember the current subnetwork gasUsage

* [NOD-243] Fix a typo

* [NOD-243] Rephrased comment
2019-07-16 11:07:58 +03:00
Ori Newman
812819e92f [NOD-248] Implement waitgroup to enable waiting while adding (#345)
* [NOD-248] Implement waitgroup to enable waiting while adding

* [NOD-248] fix waitGroup.done() error message

* [NOD-248] atomically read wg.counter

* [NOD-248] return lowPriorityMutex
2019-07-14 18:50:09 +03:00
Ori Newman
5cb536643e [NOD-236] Add secondary address to txgen (#341)
* [NOD-236] Add secondary address to txgen

* [NOD-236] Add description to secondary address cli argument

* [NOD-236] Remove unnecessary empty line
2019-07-14 11:26:32 +03:00
Ori Newman
4c6b8969d3 [NOD-245] Increase MaxInvPerMsg and MaxBlocksPerMsg to 65536 (#343)
* [NOD-245] Increase MaxInvPerMsg and MaxBlocksPerMsg to 65536

* [NOD-245] Fix MaxInvPerMsg to 1 << 16
2019-07-14 10:47:41 +03:00
Svarog
8ccc63752c [NOD-246] Increate RequestTimeout in mining simulator (#344) 2019-07-14 10:36:55 +03:00
Ori Newman
1088b69616 [NOD-239] Use custom priority mutex for utxo diff store (#340)
* [NOD-239] Use custom priority mutex for utxo diff store

* [NOD-239] Add shared slice to TestMutex

* [NOD-239] Add TestHighPriorityReadLock

* [NOD-239] Change comments

* [NOD-239] Rename LowPriorityLock -> LowPriorityWriteLock

* [NOD-239] Rename lock functions to write lock

* [NOD-239] Make TestHighPriorityReadLock use channels
2019-07-14 10:17:26 +03:00
Ori Newman
541119dda2 [NOD-238] Check if the incoming block is the newset orphan (#339) 2019-07-08 10:31:00 +03:00
stasatdaglabs
7400eabc6d [NOD-233] Fixed iteration order when iterating over the blockIndex bucket. (#338) 2019-07-04 18:29:56 +03:00
stasatdaglabs
c3c429494f [NOD-228] Added JSONRPCifyer to the project and created a Dockerfile for it. (#337) 2019-07-04 11:16:05 +03:00
stasatdaglabs
6d20202354 [NOD-222] Use accepting block blue score instead of containing block blue score for sequence lock and block maturity (#333)
* [NOD-222] Added constant: UnacceptedBlueScore.

* [NOD-222] Made it so that block transactions always have UnacceptedBlueScore.

* [NOD-222] Implemented updating unaccepted UTXO entries with accepted ones in the virtual.

* [NOD-222] Fixed an unclear comment.

* [NOD-222] Fixed diffFromAcceptanceData not receiving the right blue score.

* [NOD-222] Fixed various issues with the implementation. It appears to work now.

* [NOD-222] Removed debug logs.

* [NOD-222] Fixed tests that relied on utxoCollection.String().

* [NOD-222] Fixed TestChainedTransactions.

* [NOD-222] Fixed tests that relied on GetVirtualFromParentsForTest.

* [NOD-222] Fixed having identical entries in toAdd and toRemove.

* [NOD-222] Fixed logic in diffFrom that I previously broke.

* [NOD-222] Fixed a wrong check.

* [NOD-222] Figured out the magical invocation to make everything work.

* [NOD-222] Fixed blockDB tests.

* [NOD-222] Removed debug method.

* [NOD-222] Fixed comments related to setting coinbase maturity to 0.

* [NOD-222] Fixed a typo in a comment.

* [NOD-222] Added a comment that explains the new addition in GetVirtualFromParentsForTest.

* [NOD-222] Added a comment to DiffUTXOSet.Get().

* [NOD-222] Fixed a nuance in DiffUTXOSet.containsInputs.

* [NOD-222] Replaced nonsense in GetVirtualFromParentsForTest with diffFromAcceptanceData.

* [NOD-222] Renamed newVirtualUTXO -> newVirtualPastUTXO.

* [NOD-222] Fixed a comment.

* [NOD-222] Extracted checking utxoCollection with blueScore to a method.

* [NOD-222] Added tests where the same entry is in both toAdd and toRemove.

* [NOD-222] Used Add/RemoveEntry inside diffFromAcceptedTx.

* [NOD-222] Removed superfluous test for UnacceptedBlueScore.

* [NOD-222] Added/Updated comments.

* [NOD-222] Added tests to TestUTXODiffRules.

* [NOD-222] Added appropriate protection against impossible "from"s in diffFrom.

* [NOD-222] Added a comment explaining why we diffFrom acceptanceData in verifyAndBuildUTXO.

* [NOD-222] Fixed comments and equal() in utxoset.
2019-07-02 16:28:54 +03:00
Ori Newman
d6297a3192 [NOD-225] Finalize nodes below finality point (#335)
* [NOD-225] Finalize nodes below finality point

* [NOD-225] finalizeNodesBelowFinalityPoint only if dag.lastFinalityPoint is changed

* [NOD-225] change comment in validateParents

* [NOD-225] add string to ErrInvalidParentsRelation error

* [NOD-225] Change comment in validateParents

* [NOD-225] Change comment in validateParents

* [NOD-225] change comment in validateParents

* [NOD-225] Delete diff data from db directly from finalizeNodesBelowFinalityPoint

* [NOD-225] Refactor updateFinalityPoint
2019-07-02 16:10:33 +03:00
Ori Newman
e2f8d4e0aa [NOD-232] Remove diff and diffChild from blockNode (#336) 2019-07-02 11:01:41 +03:00
stasatdaglabs
589763e8ec [NOD-226] Fix comments around BlockLocator (#334)
* [NOD-226] Corrected blockLocator-related comments.

* [NOD-226] Fixed "current tips" -> "selected tip".
2019-06-30 12:34:53 +03:00
Ori Newman
c14c64d534 [NOD-224] Make P2PK and raw Multisig non-standard (#332) 2019-06-27 12:44:22 +03:00
Ori Newman
f7f44995d6 [NOD-215] implement difficulty adjustment algorithm (#331)
* [NOD-215] Implement difficulty adjustment algorithm

* [NOD-215] Handle blocks with genesis parent, and fix adjustment factor calculation

* [NOD-215] Fix tests

* [NOD-215] fix calcNextRequiredDifficulty

* [NOD-215] Add TestDifficulty

* [NOD-215] Fix delay to be positive, and add tests for delayed blocks

* [NOD-215] Split calcBlockWindowMinMaxAndMedianTimestamps to two functions

* [NOD-215] Make explicit loop for padding blue block window with genesis

* [NOD-215] Name return values

* [NOD-215] Fix delay != 0 error messages

* [NOD-215] Fix comments

* [NOD-215] Fix blueBlockWindow

* [NOD-215] Add TestBlueBlockWindow

* [NOD-215] Rename PowLimit -> PowMax

* [NOD-215] Fix delay != 0 error messages

* [NOD-215] Move PowMaxBits to BlockDAG

* [NOD-215] Make blockWindow type

* [NOD-215] Make blueBlockWindow always pad with genesis

* [NOD-215] Remove redundant line in checkWindowIDs

* [NOD-215] Make medianTimestamp return error for empty window
2019-06-26 15:47:39 +03:00
Ori Newman
263737b3fb [NOD-196] move coinbase scriptpukey to payload (#330)
* [NOD-196] move coinbase scriptpukey to payload (no tests) (#311)

* [NOD-196] Move coinbase scriptPubKey to payload

* [NOD-196] Rename SubnetworkID.IsFull to SubnetworkID.IsBuiltIn

* [NOD-196] Fix comments

* [NOD-196] Add block subsidy to fee transaction

* [NOD-196] Fix comments

* [NOD-217] Merge coinbase and fee transaction (#328)

* [NOD-196] Fix tests

* [NOD-196] Fix tests

* [NOD-217] Add error to getBluesFeeData

* [NOD-217] Merge Coinbase and fee transaction

* [NOD-217] Format project

* [NOD-217] Remove OpTrue default for mining.NewBlockTemplate

* [NOD-196] Format project

* [NOD-217] Add missing space before comment

* [NOD-196] Change MaxCoinbasePayloadLen to 150
2019-06-17 17:43:13 +03:00
Svarog
0c5f3d72bd [NOD-223] Removed Nulldata as standard tx type (#329)
* [NOD-223] Removed Nulldata as standard tx type

* [NOD-223] Removed redundant space
2019-06-16 16:31:38 +03:00
stasatdaglabs
ffd886498a [NOD-208] Make block reward maturity use the same mechanism as confirmations (#327)
* [NOD-208] Added blockBlueScore to UTXOEntry.

* [NOD-208] Added blueBlockScore to NewUTXOEntry.

* [NOD-208] Fixed compilation errors in policy, utxoset, and dag tests.

* [NOD-208] Changed validateBlockRewardMaturity and CheckTransactionInputsAndCalulateFee to use blueScore.

* [NOD-208] Changed CalcBlockSubsidy to use blueScore.

* [NOD-208] Changed SequenceLockActive to use blueScore.

* [NOD-208] Removed ExtractCoinbaseHeight.

* [NOD-208] Removed reference to block height in ensureNoDuplicateTx.

* [NOD-208] Changed IsFinalizedTransaction to use blueScore.

* [NOD-208] Fixed merge errors.

* [NOD-208] Made UTXOEntry serialization use blueScore.

* [NOD-208] Changed CalcPriority and calcInputValueAge to use blueScore.

* [NOD-208] Changed calcSequenceLock to use blueScore.

* [NOD-208] Removed blockChainHeight from UTXOEntry.

* [NOD-208] Fixed compilation errors in feeEstimator. Fixed a bug in the test pool hardness.

* [NOD-208] Fixed oldestChainBlockWithBlueScoreGreaterThan not handling an extreme case.

* [NOD-208] Fixed TestDiffFromTx.

* [NOD-208] Got rid of priority and support of free transactions.

* [NOD-208] Fixed TestProcessTransaction.

* [NOD-208] Fixed TestTxFeePrioHeap.

* [NOD-208] Fixed TestAddrIndex and TestFeeEstimatorCfg.

* [NOD-208] Removed unused rateLimit parameter from ProcessTransaction.

* [NOD-208] Fixed tests that rely on CreateTxChain.

* [NOD-208] Fixed tests that rely on CreateSignedTxForSubnetwork.

* [NOD-208] Fixed TestFetchTransaction.

* [NOD-208] Fixed TestHandleNewBlock. Fixed HandleNewBlock erroneously processing fee transactions.

* [NOD-208] Fixed TestTxIndexConnectBlock.

* [NOD-208] Removed the use of Height() from the fee estimator.

* [NOD-208] Removed unused methods from rpcwebsocket.go.

* [NOD-208] Removed Height from util.Block.

* [NOD-208] Removed ErrForkTooOld. It doesn't make sense in a DAG.

* [NOD-208] Made blockHeap use blueScore instead of height.

* [NOD-208] Removed fee estimator.

* [NOD-208] Removed DAG.Height.

* [NOD-208] Made TestAncestorErrors test chainHeight instead of height.

* [NOD-208] Fixed a couple of comments that were still speaking about block height.

* [NOD-208] Replaced all uses of HighestTipHash with SelectedTipHash.

* [NOD-208] Remove blockNode highest and some remaining erroneous uses of height.

* [NOD-208] Fixed a couple of comments. Fixed outPoint -> outpoint merge error.

* [NOD-208] Fixed a couple more comments.

* [NOD-208] Used calcMinRequiredTxRelayFee instead of DefaultMinRelayTxFee for mempool tests.

* [NOD-208] Renamed mempool Config BestHeight to DAGChainHeight.

* [NOD-208] Fixed a bug in oldestChainBlockWithBlueScoreGreaterThan. Made calcSequenceLock use the node's selected parent chain rather than the virtual block's.

* [NOD-208] Removed chainHeight from blockNode String().
Renamed checkpointsByHeight to checkpointsByChainHeight and prevCheckpointHeight to prevCheckpointChainHeight.
Removed reference to chainHeight in blockIndexKey.
Fixed comments in dagio.go.

* [NOD-208] Removed indexers/blocklogger.go, as no one was using it.

* [NOD-208] Made blocklogger.go log blueScore instead of height.

* [NOD-208] Fixed typo.

* [NOD-208] Fixed comments, did minor renaming.

* [NOD-208] Made a "common sense" wrapper around sort.Search.

* [NOD-208] Fixed comment in SearchSlice.
2019-06-16 14:12:02 +03:00
Svarog
76f5619de7 [NOD-211] Add concurrent-safe version of BlockConfirmationsByHash (#326) 2019-06-10 13:30:16 +03:00
Svarog
35703e7956 [NOD-218] Fix the order of txgen log message arguments (#325) 2019-06-06 16:10:21 +03:00
Ori Newman
29231d8d14 [NOD-213] add customization to txgen (#324)
* [NOD-213] Add customization to txgen

* [NOD-213] Add fee rate as an argument

* [NOD-213] Don't delay transaction emission if there's no need

* [NOD-213] enqueueTransactions -> queueTransactions

* [NOD-213] reuse delay variable

* [NOD-213] Add ExtractGasLimit function

* [NOD-213] Use time.Ticket in sendTransactionLoop
2019-06-06 14:01:28 +03:00
Svarog
396842ae40 [NOD-207] Rename any place that says 'OutPoint' to 'Outpoint' (#323)
* [NOD-207] Rename any place that says 'OutPoint' to 'Outpoint'

* [NOD-207] Fix any place that says output point
2019-06-05 16:23:57 +03:00
Svarog
072c753323 [NOD-216] Revert implicit fee transaction (#322)
* Revert "[NOD-214] Remove Fee transaction from addrindex (#321)"

This reverts commit e4b2d869d4.

* Revert "[NOD-195] Make fee tx implicit (#315)"

This reverts commit ccca580a4b.
2019-06-05 12:54:51 +03:00
Ori Newman
6250342b86 [NOD-205] Reimplement txgen (#320)
* [NOD-205] Reimplement txgen

* [NOD-205] remove prev outpoints of all initial transactions

* [NOD-205] break txloop to smaller functions

* [NOD-205] Limit collectTransactions iterations

* [NOD-205] Use requiredConfirmations constant instead of inline number

* [NOD-205] Rename wTx -> walletTx

* [NOD-205] Remove handleNewBlock

* [NOD-205] Fix search and replace error
2019-06-04 18:06:35 +03:00
Ori Newman
e4b2d869d4 [NOD-214] Remove Fee transaction from addrindex (#321) 2019-06-04 16:12:00 +03:00
Svarog
ccca580a4b [NOD-195] Make fee tx implicit (#315)
* [NOD-195] Made fee tx implicit

* [NOD-195] Removed redundant checks for fee transactions

* [NOD-195] Add fee tx data into acceptence data and fee data

* [NOD-195] Fix some tests

* [NOD-195] Update Block100000 with new data

* [NOD-195] Fixed remaining tests

* [NOD-195] Save and load feeTx to/from database

* [NOD-195] Remove DisconnectBlock methods from indexers, since they are not used anywhere

* [NOD-195] Add fee tx to addrindex

* [NOD-195] Don't populate inputs for fee transactions

* [NOD-195] Delete feeTxBucket in removeDAGState

* [NOD-195] Got rid of util.FeeTRansactionIndex
2019-06-03 17:30:57 +03:00
stasatdaglabs
84970a8378 [NOD-201] Create AddSubnetwork cli tool (#319)
* [NOD-201] Implemented the AddSubnetwork CLI tool.

* [NOD-201] Fixed various bugs in AddSubnetwork.

* [NOD-201] Fixed mempool maybeAcceptTransaction verifying gasLimit for a subnetwork registry transaction.

* [NOD-201] Fixed serialization/deserialization bugs in addrIndex.

* [NOD-201] Fixed BlockConfirmationsByHash not handling the zeroHash.

* [NOD-201] Used btclog instead of go log.

* [NOD-201] Made gasLimit a command-line flag. Made waitForSubnetworkToBecomeAccepted only return an error.

* [NOD-201] Filtered out mempool transactions.

* [NOD-201] Fixed embarrassing typos.

* [NOD-201] Added subnetwork registry tx fee + appropriate cli flag.

* [NOD-201] Skipped TXOs that can't pay for registration.
2019-06-03 15:44:43 +03:00
Ori Newman
901bde1fd4 [NOD-202] undo createDAGState if blockdag new fails (#318)
* [NOD-204] Add UTXOCommitment to GetBlockTemplateResult

* [NOD-204] Add UTXOCommitment to GetBlockTemplateResult

* [NOD-206] Avoid leaking blocks from previous miner when switching miners

* [NOD-202] Undo createDAGState if blockdag.New fails

* [NOD-202] Fix gofmt errors
2019-05-30 18:14:27 +03:00
Ori Newman
33a4183bfa [NOD-206] Avoid leaking blocks from previous miner when switching miners (#317)
* [NOD-204] Add UTXOCommitment to GetBlockTemplateResult

* [NOD-204] Add UTXOCommitment to GetBlockTemplateResult

* [NOD-206] Avoid leaking blocks from previous miner when switching miners
2019-05-30 17:25:53 +03:00
Ori Newman
0bc6e5bc92 [NOD-204] Add utxo commitment to get block template result (#316)
* [NOD-204] Add UTXOCommitment to GetBlockTemplateResult

* [NOD-204] Add UTXOCommitment to GetBlockTemplateResult
2019-05-30 16:59:26 +03:00
stasatdaglabs
8323e468da [NOD-200] Add GetSubNetwork command to JSON-RPC (#314)
* [NOD-200] Implemented the GetSubnetwork JSON-RPC command.

* [NOD-200] Fixed a copy+paste error in a comment.
2019-05-29 17:59:18 +03:00
Ori Newman
7912fe4c35 [NOD-203] Add UTXO commitment to devnet genesis (#313) 2019-05-29 15:06:10 +03:00
stasatdaglabs
266e471941 [NOD-190] Implement Confirmations counting algorithm (#312)
* [NOD-192] Add method to compute confirmations of a single transaction (#306)

* [NOD-192] Implemented txConfirmations.

* [NOD-192] Renamed acceptedBy -> acceptingBlock and ConfirmationsByHash -> BlockConfirmationsByHash.

* [NOD-194 + NOD-199] Update all JSON-RPC methods to use new methods for computing confirmations + Remove the x1.5 factor when counting confirmations in txgen (#309)

* [NOD-194] Connected JSON-RPC commands with new confirmations logic.

* [NOD-194] Fixed failing tests.

* [NOD-194] Removed x1.5 from isTxMatured.

* [NOD-194] Made isTxMatured panic if it receives nil confirmations.

* [NOD-194] Added isInMempool to RPC methods that require it.

* [NOD-194] Fixed a typo.

* [NOD-194] Made the declaration of isInMempool more clear.

* [NOD-194] Removed some unnecessary complexity from isTxMatured.

* [NOD-193] Update Tx-Index to accomodate correct Confirmations structure (#308)

* [NOD-193] Uploaded BlockID to be uint64 in txIndex and addrIndex.

* [NOD-193] Removed the inclusion of current block transactions to txsAcceptanceData.

* [NOD-193] Implemented writing to the tx index txs with the virtual as the accepting block.

* [NOD-193] Added test for txs accepted by the virtual block.

* [NOD-193] Removed the requirement for subnetwork registry transactions to be accepted.

* [NOD-194] Made in-memory the txsAcceptedByVirtual part of txIndex.

* [NOD-193] Optimized txsAcceptedByVirtual initialization.

* [NOD-193] Fixed weird loop in txsAcceptedByVirtual initialization.

* [NOD-190] Fixed merge errors.
2019-05-29 13:09:16 +03:00
stasatdaglabs
4e6edd4ffd [NOD-189] Optimize UTXOCollection operations (#307)
* [NOD-189] Made UTXODiff WithDiff and DiffFrom allocate collections with appropriate sizes.
In mempool HandleNewBlock, Replaced removeTransaction loop with removeTransactions.

* [NOD-189] Removed code duplication between removeTransaction and removeTransactions.

* [NOD-189] Fixed a merge error.

* [NOD-189] Fixed another merge error.

* [NOD-189] Renamed removeRedeemers to removeDependants.

* [NOD-189] Removed superfluous check inside removeTransactionWithDiff.

* [NOD-189] Added a comment to removeTransactions detailing what it optimizes.

* [NOD-189] Added documentation to removeTransactionWithDiff and split it into smaller methods.
2019-05-29 11:46:55 +03:00
Ori Newman
7069d173c6 [NOD-180] Add validation of utxo commitments (#310)
* [NOD-172] Port EMCH from bchd

* [NOD-172] Fix hdkeychain.TestErrors and add btcec.TestRecoverCompact

* [NOD-172] Make ECMH immutable

* [NOD-172] Fix gofmt errors

* [NOD-172] Add TestMultiset_NewMultisetFromDataSlice and fix Point to be immutable

* [NOD-172] Fix gofmt errors

* [NOD-172] Add test for checking that the Union of a multiset and its inverse is zero

* [NOD-179] Add ECMH Point to all UTXO-structs

* [NOD-179] Fix utxo set tests

* [NOD-179] Fix mempool tests

* [NOD-179] Remove RemoveTxOuts

* [NOD-179] Move serializeBlockUTXODiffData to the top of the file

* [NOD-179] Fix serializeBlockUTXODiffData comment format

* [NOD-179] Fix AddTx comment and name return values

* [NOD-180] Validate utxo commitments

* [NOD-179] Fix TestAcceptingBlock and TestConfirmations to not use the block hash as phantom break even

* [NOD-180] Fix typo

* [NOD-180] move most of the logic in calcUTXOCommitment to UTXOSet.WithTransactions

* [NOD-180] Optionally return error when a transaction in WithTransactions is double spent

* [NOD-180] Rename allowDoubleSpends to ignoreDoubleSpends
2019-05-28 11:33:11 +03:00
Ori Newman
aa51b5f071 [NOD-179] Added ECMH-Multiset to all UTXO structs (#304)
* [NOD-172] Port EMCH from bchd

* [NOD-172] Fix hdkeychain.TestErrors and add btcec.TestRecoverCompact

* [NOD-172] Make ECMH immutable

* [NOD-172] Fix gofmt errors

* [NOD-172] Add TestMultiset_NewMultisetFromDataSlice and fix Point to be immutable

* [NOD-172] Fix gofmt errors

* [NOD-172] Add test for checking that the Union of a multiset and its inverse is zero

* [NOD-179] Add ECMH Point to all UTXO-structs

* [NOD-179] Fix utxo set tests

* [NOD-179] Fix mempool tests

* [NOD-179] Remove RemoveTxOuts

* [NOD-179] Move serializeBlockUTXODiffData to the top of the file

* [NOD-179] Fix serializeBlockUTXODiffData comment format

* [NOD-179] Fix AddTx comment and name return values
2019-05-23 15:11:42 +03:00
stasatdaglabs
da7c9c7dfb [NOD-191] Added .acceptingBlock and .confirmations methods to blockNode (#305)
* [NOD-191] Added selectedPathChainSlice to virtualBlock.

* [NOD-191] Implemented acceptingBlock().

* [NOD-191] Implemented confirmations().

* [NOD-191] Added selectedPathChainSlice tests to TestSelectedPath.

* [NOD-191] Fixed a bug in acceptingBlock(). Written tests for confirmations().

* [NOD-191] Written tests for acceptingBlock().

* [NOD-191] Added test to make sure that acceptingBlock(tip) returns the virtual block.

* [NOD-191] Added a panic if we somehow feed a childless block that isn't the virtual to acceptingBlock.

* [NOD-191] Fixed comments.

* [NOD-191] Fixed a bug in acceptingBlock. Added red block tests for acceptingBlock.

* [NOD-191] Added red block tests for confirmations.

* [NOD-191] Fixed misleading comment and error message.
2019-05-23 10:57:03 +03:00
Svarog
ec10346e79 [NOD-184] Use lock-less NextAcceptedIDMerkleRoot in NewBlockTemplate (#302) 2019-05-16 17:33:04 +03:00
stasatdaglabs
2481871c10 [NOD-175] When resolving orphans - don't send inv (#300)
* [NOD-175] Added BlockAddedNotificationData and sent it instead of just a block on BlockAdded.

* [NOD-175] Added BFWasUnorphaned and raised it when an unorphaned block was to be accepted.

* [NOD-175] Fixed a typo.

* [NOD-175] Made it so that only the mempool gets updated if we're not current or the block was just now unorphaned.
2019-05-16 13:05:30 +03:00
Svarog
ac1fd11a42 [NOD-182] Added AcceptedIDMerkleRoot to GetBlockTemplateResult (#301) 2019-05-16 12:44:25 +03:00
stasatdaglabs
b1d3ca0206 [NOD-177] Remove idMerkleRoot (#299)
* [NOD-177] Removed references to idMerkleRoot.

* [NOD-177] Generated new genesis hashes.

* [NOD-177] Generated new blk_ blocks.

* [NOD-177] Fixed TestHaveBlock.

* [NOD-177] Fixed The rest of the tests.

* [NOD-177] Fixed a couple of comments and a duplicate test.

* [NOD-177] Fixed blocks1-256.bz2.
2019-05-15 16:16:57 +03:00
Ori Newman
5c5491e1e4 [NOD-172] Port ECMH from bchd and fix Remove to preserve commutativity (#292)
* [NOD-172] Port EMCH from bchd

* [NOD-172] Fix hdkeychain.TestErrors and add btcec.TestRecoverCompact

* [NOD-172] Make ECMH immutable

* [NOD-172] Fix gofmt errors

* [NOD-172] Add TestMultiset_NewMultisetFromDataSlice and fix Point to be immutable

* [NOD-172] Fix gofmt errors

* [NOD-172] Add test for checking that the Union of a multiset and its inverse is zero
2019-05-15 16:07:37 +03:00
Evgeny Khirin
8dedca693e [NOD-164 + NOD-167] AcceptedIDMerkleRoot validation and newBlockTemplate (#295)
* [NOD-164] Added validation routine

* [NOD-167] Extracted acceptedIDMerkleRoot calculation to its own method and implemented NextAcceptedIDMerkleRoot.

* [NOD-164] Fixed TestValidateFeeTransaction.

* [NOD-164] Fixed TestFinality.

* [NOD-164] Fixed blk_ tests.

* [NOD-164] Fixed if -> iff in a comment.

* [NOD-164] Minor style changes in comments.

* [NOD-164] Moved validateAcceptedIDMerkleRoot to before its population with the block's own transactions.
Replaced heavy call to verifyAndBuildUTXO in NextBlockFeeTransaction and NextAcceptedIDMerkleRoot with a call to pastUTXO on the virtual.

* [NOD-164] Fixed erroneous comment.

* [NOD-164] Inserted the logic from buildAndSortAcceptedTxs into calculateAcceptedIDMerkleRoot, since the former was meaningless on its own.

* [NOD-164] Changed looping over txsAcceptanceData instead of over node.blues.
2019-05-14 15:31:23 +03:00
Svarog
ca0619bbcf [NOD-176] Moved daghash from dagconfig to util (#298)
* [NOD-176] Moved daghash from dagconfig to util

* [NOD-176] Fixed order of includes with gofmt
2019-05-14 14:05:36 +03:00
Ori Newman
d7a2ab52a1 [NOD-173] Add UTXO commitment to block header (#297) 2019-05-13 16:23:28 +03:00
Ori Newman
3b72aafbc6 [NOD-174] Measure difficulty window with chain height instead of height (#296) 2019-05-12 18:14:35 +03:00
stasatdaglabs
dfd12cdaac [NOD-165] In getBlockVerboseResult and getBlockHeaderVerboseResult, renamed merkleRoot to HashMerkleRoot and added idMerkleRoot and acceptedIdMerkleRoot. (#294) 2019-05-12 15:05:33 +03:00
Evgeny Khirin
08d94c7a47 [NOD-163] Added BlockHeader.AcceptedIDMerkleRoot (#293) 2019-05-12 11:53:47 +03:00
Ori Newman
b7b41f1a94 [NOD-159] Wrap all goroutines to handle panics (#290)
* [NOD-159] Wrap all goroutines to handle panics

* [NOD-159] Fix gofmt errors

* [NOD-159] Add comment to HandlePanic

* [NOD-159] Merge panics and gowrapper packages

* [NOD-159] Added missing initialization
2019-05-07 16:13:06 +03:00
stasatdaglabs
42109ec4d5 [NOD-162] Fixed calculateNodeHeight not handling the genesis block. (#291) 2019-05-07 14:36:27 +03:00
Ori Newman
39ccc4b225 [NOD-166] Remove dag.RLock() from addTransaction (#289) 2019-05-06 17:42:39 +03:00
Ori Newman
8acc738b27 [NOD-146] Remove unnecessary dag notifications (#288) 2019-05-06 11:19:19 +03:00
stasatdaglabs
945b3f8fbf [NOD-13] Notify failing builds in Telegram (#287)
* [NOD-13] Added notify_telegram to deploy.sh.

* [NOD-13] Made a test fail for testing.

* [NOD-13] Added some temporary logging.

* [NOD-13] Wrote a nice message for the bot to send.

* [NOD-13] Made the message nicer.

* [NOD-13] Made the message nicer still.

* [NOD-13] Added the build log as an attachment.

* [NOD-13] Actually added the build log as an attachment.

* [NOD-13] Added a delay to allow the build log to properly flush.

* [NOD-13] Disowned notify_telegram.

* [NOD-13] Disowning doesn't work. Using the "at" command instead.

* [NOD-13] Properly using the at command.

* [NOD-13] Actually properly using the at command.

* [NOD-13] Added a couple of prints to see whether the script is even being called.

* [NOD-13] More printouts...

* [NOD-13] Added a command to start atd if it stopped for some reason.

* [NOD-13] Added slashes in multiline echo command.

* [NOD-13] Added quotes where required and removed debug comments.

* [NOD-13] Revert "[NOD-13] Made a test fail for testing."

This reverts commit 9701e30e

* [NOD-13] Added some comments.
2019-05-05 17:12:55 +03:00
Evgeny Khirin
a73f218402 [NOD-150] Removed blockNode.workSum (#286)
* [NOD-150] Removed blockNode.workSum

* [NOD-150] Fixed comment
2019-05-05 16:02:07 +03:00
Ori Newman
eded4c2285 [NOD-161] Add flushDbCache rpc command (#285)
* [NOD-161] Add flushDbCache rpc command

* [NOD-161] Fix tests
2019-05-02 17:08:00 +03:00
Svarog
33036278ac [NOD-144] Use chainHeight in SelectedAncestor, and update all logic that uses it (#281)
* [NOD-144] Use chainHeight in SelectedAncestor, and update all logic that uses it

* [NOD-144] Moved UnminedHeight to blockdag, and updated all references
2019-05-02 16:50:54 +03:00
Ori Newman
6163d3b4ec [NOD-87] Rename hash to txID when necessary (#283)
* [NOD-87] Rename hash to txID when necessary

* [NOD-87] Fix NewTxIDFromStr error messages in msgtx_test.go
2019-05-02 14:59:59 +03:00
Evgeny Khirin
22046bebc5 [NOD-93] Removed "make go vet happy" comments (#282) 2019-05-02 13:45:40 +03:00
Evgeny Khirin
c67d4507b6 [NOD-132] Fixed getaddr command summary (#278) 2019-05-02 12:06:23 +03:00
Ori Newman
ea5e18ea11 [NOD-96] Convert txid to pointer where possible (#279)
* [NOD-96] Convert txid to pointer where possible

* [NOD-96] Make msgTx.TxID return a pointer

* [NOD-96] observedTransaction.id -> observedTransaction.txID
2019-05-02 10:54:18 +03:00
stasatdaglabs
1cc479dbf8 [NOD-152] Netsync correctly syncs only 500 first blocks (#276)
* [NOD-152] Stopped pushBlockMsg from sending tip inv to syncing nodes.

* [NOD-152] Fixed restartSyncIfNeeded not restarting if sync is needed.

* [NOD-152] Removed continueHash, as it is no longer required.
2019-05-01 18:01:58 +03:00
Ori Newman
b4e7b59e7b [NOD-155] Increase timeout in test.sh (#280) 2019-05-01 17:36:19 +03:00
Evgeny Khirin
8592ae9641 [NOD-126] Removed BlockHeader.SelectedParentHash() method (#274)
* [NOD-126] Removed BlockHeader.SelectedParentHash() method

* [NOD-126] Added TODO comments
2019-05-01 14:51:16 +03:00
Ori Newman
1362fc45e0 [NOD-148] Delete block index (#275) 2019-05-01 13:10:32 +03:00
stasatdaglabs
b34894e4da [NOD-140] Convert all logging to use btclog (#273)
* [NOD-140] Converted DNSSeeder to use btclog.

* [NOD-140] Converted MiningSimulator to use btclog.

* [NOD-140] Converted TxGen to use btclog.

* [NOD-140] Fixed log level in handlePanic in txgen.

* [NOD-140] Renamed logger to log everywhere. Removed superfluous flag-setting to go-log.
2019-05-01 11:15:45 +03:00
stasatdaglabs
30f5ebd6d1 [NOD-139] Fix the unorphaning mechanism (#271)
* [NOD-139] Made processOrphans not turn an error if one of the parents is still missing.

* [NOD-139] Made addOrphanBlock and removeOrphanBlock process all parents instead of only the selected parent.

* [NOD-139] Made addOrphanBlock remove excess orphans by their timestamp rather than their discovery time. Fixed orphans being added more than once.

* [NOD-139] Simplified removal from slice in removeOrphanBlock.

* [NOD-139] Made check for no-orphans-left come before assignment to prevOrphans.

* [NOD-139] Added Timestamp() to util.Block.

* [NOD-139] Fixed merge errors.
2019-04-30 18:53:03 +03:00
Ori Newman
4292bcac72 [NOD-149] Store phantom values in the database (#270)
* [NOD-149] Store phantom values in the database

* [NOD-149] Explain when zero hash is used when serializing blockNode

* [NOD-149] make deserializeBlockNode return a blockNode

* [NOD-149] use blockNode initializer instead of lots of assignments
2019-04-30 17:42:26 +03:00
Evgeny Khirin
8683258e4a [NOD-151] Removed VerifyDAG RPC API (#272) 2019-04-30 13:22:29 +03:00
Svarog
e9ec8cd39c [NOD-142] Convert Height and ChainHeight to uint64 (#269)
* [NOD-142] Updated util.FastLog2Floor to work on uint64

* [NOD-142] Convert height and chainHeight to uint64

* [NOD-142] A couple fixes in comments of TestFastLog2Floor

* [NOD-142] Make spendableOutOffset uint64 too
2019-04-30 12:50:46 +03:00
Evgeny Khirin
068a8d117d [NOD-145] Fixed deadlock in mempool (#268) 2019-04-29 11:57:50 +03:00
Ori Newman
83a012de12 [NOD-69] Delete BlockAddedNtfnMethod (#267) 2019-04-29 11:50:56 +03:00
Evgeny Khirin
f36ae25baf [NOD-121] Fix empty output script in fundTx (#266)
* [NOD-121] Fix empty output script in fundTx

* [NOD-121] Remove debugging stuff

* [NOD-121] Refactored minRelayTxFee
2019-04-29 11:19:45 +03:00
stasatdaglabs
298cda0617 Merge pull request #265 from daglabs/nod-143-get-block-template-bug
[NOD-143] Multiple fixes for GetBlockTemplate
2019-04-28 12:32:12 +03:00
Mike Zak
b9e3fff5d1 [NOD-143] Move check for sm.syncPeer==nil to syncManager.current() 2019-04-28 12:23:42 +03:00
Mike Zak
ed76e2c962 [NOD-143] SelectedAncestor now returns the first chain block under height if there's no chain block with exact hight 2019-04-28 11:53:48 +03:00
Svarog
77fae7b522 [NOD-138] Request relayed blocks if not recent but no syncPeer available (which usually means everybody are on genesis) (#264) 2019-04-24 15:17:50 +03:00
stasatdaglabs
cd71e80eb3 [NOD-133] BTCD node connects multiple times to the same address (#263)
* [NOD-133] Added addrTrying.

* [NOD-133] Fixed infinite look inside getAddress.

* [NOD-133] Reverted log level to trace.

* [NOD-133] Fixed failing test.

* [NOD-133] Added an explanation as to why devnet is exempt from same-CIDR checking.

* [NOD-133] Changed config.DevNet with activeNetParams.AcceptUnroutable for same-CIDR checking.
2019-04-24 14:27:53 +03:00
Svarog
3f7c73f331 [NOD-135] Even if net params allow unroutable - don't allow local IPs (#262) 2019-04-24 11:52:19 +03:00
Svarog
4845a7f16c [NOD-131] Allow override of dnsseed by command line or config (#261)
* [NOD-131] Allow override of dnsseed by command line or config

* [NOD-131] Moved tor.go from connmgr to util/network, to prevent dependancy loop

* [NOD-131] Typo fix

* [NOD-131] Clarify description for --dnsseed cli flag

* [NOD-131] Removed redundant line that somehow got into go.sum
2019-04-23 14:22:46 +03:00
stasatdaglabs
77fb901706 [NOD-129] Revert NOD-114 (#260) 2019-04-22 15:26:10 +03:00
Evgeny Khirin
d3e70810af [NOD-121] Do not handle transaction inputs for reward transactions in SearchRawTransactions RPC call (#258)
* [NOD-121] Do not handle transaction inputs for reward transactions in SearchRawTransactions RPC call

* [NOD-121] Do not get transaction inputs for fee transactions in SearchRawTransactions RPC call
2019-04-22 12:57:50 +03:00
Evgeny Khirin
daa4481282 [NOD-66] Transaction generator (#247)
* [NOD-66] Created TX generator

* [NOD-66] Created transaction generator

* [NOD-66] Improved TX generator against double spend. Created genaddr utility. Refactored

* [NOD-66] Save chenges before branch switch

* [NOD-66] Use log package instead of fmt

* [NOD-66] Fixed/restored docker files

* [NOD-66] Changed according to new WithLock/NoLock convention
2019-04-21 15:05:03 +03:00
Ori Newman
a3735da12a [NOD-122] Fix timeout for get block template requests (#254)
* [NOD-122] Handle each message in rpcclient with a separate goroutine

* [NOD-122] Stop listening to new blocks when not mining

* [NOD-122] Made RPC logging in mining simulator more explicit + some styling enhencement
2019-04-21 10:11:36 +03:00
stasatdaglabs
311c96122e [NOD-119] Add a call to "go mod download" before copying all files in Dockerfiles. (#255) 2019-04-18 17:20:58 +03:00
Ori Newman
b612426ead [NOD-67] Unexport blockheap (#257) 2019-04-18 17:19:10 +03:00
Ori Newman
e99af346bf [NOD-120] Invert WithLock standard (#256) 2019-04-18 17:06:34 +03:00
Ori Newman
e22bc9af8f [NOD-115] add timeout to rpcclient requests (#252)
* [NOD-115] Add timeout to rpcclient requests

* [NOD-115] Add timeout of half a second to mining simulator requests

* [NOD-115] Remove redundant allocation of responseChan
2019-04-17 17:51:50 +03:00
stasatdaglabs
89ca293dc1 [NOD-113] Added graceful shutdown to mining simulator. (#253) 2019-04-17 17:00:23 +03:00
stasatdaglabs
194ceace6f [NOD-71] Add support for Go modules (#251)
* [NOD-71] Replaced Gopkg.lock and Gopkg.toml with go.mod and go.sum.

* [NOD-71] Updated Dockerfiles to use go-modules instead of dep.
2019-04-17 13:45:29 +03:00
Ori Newman
a79c6cecdb [NOD-90] Update mining simulator to pull latest block template (#248)
* [NOD-90] Update mining simulator to pull latest block template

* [NOD-90] Refactor to reduce global state

* [NOD-90] Split onMinerSwitch func

* [NOD-90] Replace chooseClient with getRandomClient

* [NOD-90] Stop ranging over foundBlock to avoid code repetition
2019-04-17 12:19:14 +03:00
stasatdaglabs
c5827febf7 [NOD-114] On start, DNSSeeder should update its addresses more frequently (#250)
* [NOD-114] Added a minimum address amount GetAddrs.

* [NOD-114] Added smallNetwork intervals for when the network is small.

* [NOD-114] Fixed bad minimum address calculation.
2019-04-15 15:57:43 +03:00
Ori Newman
7353a49469 [NOD-109] Reverse the order of .PastUTXO() and .RestoreUTXO() parameters (#249) 2019-04-15 10:37:06 +03:00
963 changed files with 68210 additions and 118921 deletions

3
.gitignore vendored
View File

@@ -2,7 +2,7 @@
*~
# Databases
btcd.db
kaspad.db
*-shm
*-wal
@@ -38,6 +38,7 @@ _testmain.go
.vscode
debug
debug.test
__debug_bin
# CI
version.txt

955
CHANGES
View File

@@ -1,955 +0,0 @@
============================================================================
User visible changes for btcd
A full-node bitcoin implementation written in Go
============================================================================
Changes in 0.12.0 (Fri Nov 20 2015)
- Protocol and network related changes:
- Add a new checkpoint at block height 382320 (#555)
- Implement BIP0065 which includes support for version 4 blocks, a new
consensus opcode (OP_CHECKLOCKTIMEVERIFY) that enforces transaction
lock times, and a double-threshold switchover mechanism (#535, #459,
#455)
- Implement BIP0111 which provides a new bloom filter service flag and
hence provides support for protocol version 70011 (#499)
- Add a new parameter --nopeerbloomfilters to allow disabling bloom
filter support (#499)
- Reject non-canonically encoded variable length integers (#507)
- Add mainnet peer discovery DNS seed (seed.bitcoin.jonasschnelli.ch)
(#496)
- Correct reconnect handling for persistent peers (#463, #464)
- Ignore requests for block headers if not fully synced (#444)
- Add CLI support for specifying the zone id on IPv6 addresses (#538)
- Fix a couple of issues where the initial block sync could stall (#518,
#229, #486)
- Fix an issue which prevented the --onion option from working as
intended (#446)
- Transaction relay (memory pool) changes:
- Require transactions to only include signatures encoded with the
canonical 'low-s' encoding (#512)
- Add a new parameter --minrelaytxfee to allow the minimum transaction
fee in BTC/kB to be overridden (#520)
- Retain memory pool transactions when they redeem another one that is
removed when a block is accepted (#539)
- Do not send reject messages for a transaction if it is valid but
causes an orphan transaction which depends on it to be determined
as invalid (#546)
- Refrain from attempting to add orphans to the memory pool multiple
times when the transaction they redeem is added (#551)
- Modify minimum transaction fee calculations to scale based on bytes
instead of full kilobyte boundaries (#521, #537)
- Implement signature cache:
- Provides a limited memory cache of validated signatures which is a
huge optimization when verifying blocks for transactions that are
already in the memory pool (#506)
- Add a new parameter '--sigcachemaxsize' which allows the size of the
new cache to be manually changed if desired (#506)
- Mining support changes:
- Notify getblocktemplate long polling clients when a block is pushed
via submitblock (#488)
- Speed up getblocktemplate by making use of the new signature cache
(#506)
- RPC changes:
- Implement getmempoolinfo command (#453)
- Implement getblockheader command (#461)
- Modify createrawtransaction command to accept a new optional parameter
'locktime' (#529)
- Modify listunspent result to include the 'spendable' field (#440)
- Modify getinfo command to include 'errors' field (#511)
- Add timestamps to blockconnected and blockdisconnected notifications
(#450)
- Several modifications to searchrawtranscations command:
- Accept a new optional parameter 'vinextra' which causes the results
to include information about the outputs referenced by a transaction's
inputs (#485, #487)
- Skip entries in the mempool too (#495)
- Accept a new optional parameter 'reverse' to return the results in
reverse order (most recent to oldest) (#497)
- Accept a new optional parameter 'filteraddrs' which causes the
results to only include inputs and outputs which involve the
provided addresses (#516)
- Change the notification order to notify clients about mined
transactions (recvtx, redeemingtx) before the blockconnected
notification (#449)
- Update verifymessage RPC to use the standard algorithm so it is
compatible with other implementations (#515)
- Improve ping statistics by pinging on an interval (#517)
- Websocket changes:
- Implement session command which returns a per-session unique id (#500,
#503)
- btcctl utility changes:
- Add getmempoolinfo command (#453)
- Add getblockheader command (#461)
- Add getwalletinfo command (#471)
- Notable developer-related package changes:
- Introduce a new peer package which acts a common base for creating and
concurrently managing bitcoin network peers (#445)
- Various cleanup of the new peer package (#528, #531, #524, #534,
#549)
- Blocks heights now consistently use int32 everywhere (#481)
- The BlockHeader type in the wire package now provides the BtcDecode
and BtcEncode methods (#467)
- Update wire package to recognize BIP0064 (getutxo) service bit (#489)
- Export LockTimeThreshold constant from txscript package (#454)
- Export MaxDataCarrierSize constant from txscript package (#466)
- Provide new IsUnspendable function from the txscript package (#478)
- Export variable length string functions from the wire package (#514)
- Export DNS Seeds for each network from the chaincfg package (#544)
- Preliminary work towards separating the memory pool into a separate
package (#525, #548)
- Misc changes:
- Various documentation updates (#442, #462, #465, #460, #470, #473,
#505, #530, #545)
- Add installation instructions for gentoo (#542)
- Ensure an error is shown if OS limits can't be set at startup (#498)
- Tighten the standardness checks for multisig scripts (#526)
- Test coverage improvement (#468, #494, #527, #543, #550)
- Several optimizations (#457, #474, #475, #476, #508, #509)
- Minor code cleanup and refactoring (#472, #479, #482, #519, #540)
- Contributors (alphabetical order):
- Ben Echols
- Bruno Clermont
- danda
- Daniel Krawisz
- Dario Nieuwenhuis
- Dave Collins
- David Hill
- Javed Khan
- Jonathan Gillham
- Joseph Becher
- Josh Rickmar
- Justus Ranvier
- Mawuli Adzoe
- Olaoluwa Osuntokun
- Rune T. Aune
Changes in 0.11.1 (Wed May 27 2015)
- Protocol and network related changes:
- Use correct sub-command in reject message for rejected transactions
(#436, #437)
- Add a new parameter --torisolation which forces new circuits for each
connection when using tor (#430)
- Transaction relay (memory pool) changes:
- Reduce the default number max number of allowed orphan transactions
to 1000 (#419)
- Add a new parameter --maxorphantx which allows the maximum number of
orphan transactions stored in the mempool to be specified (#419)
- RPC changes:
- Modify listtransactions result to include the 'involveswatchonly' and
'vout' fields (#427)
- Update getrawtransaction result to omit the 'confirmations' field
when it is 0 (#420, #422)
- Update signrawtransaction result to include errors (#423)
- btcctl utility changes:
- Add gettxoutproof command (#428)
- Add verifytxoutproof command (#428)
- Notable developer-related package changes:
- The btcec package now provides the ability to perform ECDH
encryption and decryption (#375)
- The block and header validation in the blockchain package has been
split to help pave the way toward concurrent downloads (#386)
- Misc changes:
- Minor peer optimization (#433)
- Contributors (alphabetical order):
- Dave Collins
- David Hill
- Federico Bond
- Ishbir Singh
- Josh Rickmar
Changes in 0.11.0 (Wed May 06 2015)
- Protocol and network related changes:
- **IMPORTANT: Update is required due to the following point**
- Correct a few corner cases in script handling which could result in
forking from the network on non-standard transactions (#425)
- Add a new checkpoint at block height 352940 (#418)
- Optimized script execution (#395, #400, #404, #409)
- Fix a case that could lead stalled syncs (#138, #296)
- Network address manager changes:
- Implement eclipse attack countermeasures as proposed in
http://cs-people.bu.edu/heilman/eclipse (#370, #373)
- Optional address indexing changes:
- Fix an issue where a reorg could cause an orderly shutdown when the
address index is active (#340, #357)
- Transaction relay (memory pool) changes:
- Increase maximum allowed space for nulldata transactions to 80 bytes
(#331)
- Implement support for the following rules specified by BIP0062:
- The S value in ECDSA signature must be at most half the curve order
(rule 5) (#349)
- Script execution must result in a single non-zero value on the stack
(rule 6) (#347)
- NOTE: All 7 rules of BIP0062 are now implemented
- Use network adjusted time in finalized transaction checks to improve
consistency across nodes (#332)
- Process orphan transactions on acceptance of new transactions (#345)
- RPC changes:
- Add support for a limited RPC user which is not allowed admin level
operations on the server (#363)
- Implement node command for more unified control over connected peers
(#79, #341)
- Implement generate command for regtest/simnet to support
deterministically mining a specified number of blocks (#362, #407)
- Update searchrawtransactions to return the matching transactions in
order (#354)
- Correct an issue with searchrawtransactions where it could return
duplicates (#346, #354)
- Increase precision of 'difficulty' field in getblock result to 8
(#414, #415)
- Omit 'nextblockhash' field from getblock result when it is empty
(#416, #417)
- Add 'id' and 'timeoffset' fields to getpeerinfo result (#335)
- Websocket changes:
- Implement new commands stopnotifyspent, stopnotifyreceived,
stopnotifyblocks, and stopnotifynewtransactions to allow clients to
cancel notification registrations (#122, #342)
- btcctl utility changes:
- A single dash can now be used as an argument to cause that argument to
be read from stdin (#348)
- Add generate command
- Notable developer-related package changes:
- The new version 2 btcjson package has now replaced the deprecated
version 1 package (#368)
- The btcec package now performs all signing using RFC6979 deterministic
signatures (#358, #360)
- The txscript package has been significantly cleaned up and had a few
API changes (#387, #388, #389, #390, #391, #392, #393, #395, #396,
#400, #403, #404, #405, #406, #408, #409, #410, #412)
- A new PkScriptLocs function has been added to the wire package MsgTx
type which provides callers that deal with scripts optimization
opportunities (#343)
- Misc changes:
- Minor wire hashing optimizations (#366, #367)
- Other minor internal optimizations
- Contributors (alphabetical order):
- Alex Akselrod
- Arne Brutschy
- Chris Jepson
- Daniel Krawisz
- Dave Collins
- David Hill
- Jimmy Song
- Jonas Nick
- Josh Rickmar
- Olaoluwa Osuntokun
- Oleg Andreev
Changes in 0.10.0 (Sun Mar 01 2015)
- Protocol and network related changes:
- Add a new checkpoint at block height 343185
- Implement BIP066 which includes support for version 3 blocks, a new
consensus rule which prevents non-DER encoded signatures, and a
double-threshold switchover mechanism
- Rather than announcing all known addresses on getaddr requests which
can possibly result in multiple messages, randomize the results and
limit them to the max allowed by a single message (1000 addresses)
- Add more reserved IP spaces to the address manager
- Transaction relay (memory pool) changes:
- Make transactions which contain reserved opcodes nonstandard
- No longer accept or relay free and low-fee transactions that have
insufficient priority to be mined in the next block
- Implement support for the following rules specified by BIP0062:
- ECDSA signature must use strict DER encoding (rule 1)
- The signature script must only contain push operations (rule 2)
- All push operations must use the smallest possible encoding (rule 3)
- All stack values interpreted as a number must be encoding using the
shortest possible form (rule 4)
- NOTE: Rule 1 was already enforced, however the entire script now
evaluates to false rather than only the signature verification as
required by BIP0062
- Allow transactions with nulldata transaction outputs to be treated as
standard
- Mining support changes:
- Modify the getblocktemplate RPC to generate and return block templates
for version 3 blocks which are compatible with BIP0066
- Allow getblocktemplate to serve blocks when the current time is
less than the minimum allowed time for a generated block template
(https://github.com/btcsuite/btcd/issues/209)
- Crypto changes:
- Optimize scalar multiplication by the base point by using a
pre-computed table which results in approximately a 35% speedup
(https://github.com/btcsuite/btcec/issues/2)
- Optimize general scalar multiplication by using the secp256k1
endomorphism which results in approximately a 17-20% speedup
(https://github.com/btcsuite/btcec/issues/1)
- Optimize general scalar multiplication by using non-adjacent form
which results in approximately an additional 8% speedup
(https://github.com/btcsuite/btcec/issues/3)
- Implement optional address indexing:
- Add a new parameter --addrindex which will enable the creation of an
address index which can be queried to determine all transactions which
involve a given address
(https://github.com/btcsuite/btcd/issues/190)
- Add a new logging subsystem for address index related operations
- Support new searchrawtransactions RPC
(https://github.com/btcsuite/btcd/issues/185)
- RPC changes:
- Require TLS version 1.2 as the minimum version for all TLS connections
- Provide support for disabling TLS when only listening on localhost
(https://github.com/btcsuite/btcd/pull/192)
- Modify help output for all commands to provide much more consistent
and detailed information
- Correct case in getrawtransaction which would refuse to serve certain
transactions with invalid scripts
(https://github.com/btcsuite/btcd/issues/210)
- Correct error handling in the getrawtransaction RPC which could lead
to a crash in rare cases
(https://github.com/btcsuite/btcd/issues/196)
- Update getinfo RPC to include the appropriate 'timeoffset' calculated
from the median network time
- Modify listreceivedbyaddress result type to include txids field so it
is compatible
- Add 'iswatchonly' field to validateaddress result
- Add 'startingpriority' and 'currentpriority' fields to getrawmempool
(https://github.com/btcsuite/btcd/issues/178)
- Don't omit the 'confirmations' field from getrawtransaction when it is
zero
- Websocket changes:
- Modify the behavior of the rescan command to automatically register
for notifications about transactions paying to rescanned addresses
or spending outputs from the final rescan utxo set when the rescan
is through the best block in the chain
- btcctl utility changes:
- Make the list of commands available via the -l option rather than
dumping the entire list on usage errors
- Alphabetize and categorize the list of commands by chain and wallet
- Make the help option only show the help options instead of also
dumping all of the commands
- Make the usage syntax much more consistent and correct a few cases of
misnamed fields
(https://github.com/btcsuite/btcd/issues/305)
- Improve usage errors to show the specific parameter number, reason,
and error code
- Only show the usage for specific command is shown when a valid command
is provided with invalid parameters
- Add support for a SOCK5 proxy
- Modify output for integer fields (such as timestamps) to display
normally instead in scientific notation
- Add invalidateblock command
- Add reconsiderblock command
- Add createnewaccount command
- Add renameaccount command
- Add searchrawtransactions command
- Add importaddress command
- Add importpubkey command
- showblock utility changes:
- Remove utility in favor of the RPC getblock method
- Notable developer-related package changes:
- Many of the core packages have been relocated into the btcd repository
(https://github.com/btcsuite/btcd/issues/214)
- A new version of the btcjson package that has been completely
redesigned from the ground up based based upon how the project has
evolved and lessons learned while using it since it was first written
is now available in the btcjson/v2/btcjson directory
- This will ultimately replace the current version so anyone making
use of this package will need to update their code accordingly
- The btcec package now provides better facilities for working directly
with its public and private keys without having to mix elements from
the ecdsa package
- Update the script builder to ensure all rules specified by BIP0062 are
adhered to when creating scripts
- The blockchain package now provides a MedianTimeSource interface and
concrete implementation for providing time samples from remote peers
and using that data to calculate an offset against the local time
- Misc changes:
- Fix a slow memory leak due to tickers not being stopped
(https://github.com/btcsuite/btcd/issues/189)
- Fix an issue where a mix of orphans and SPV clients could trigger a
condition where peers would no longer be served
(https://github.com/btcsuite/btcd/issues/231)
- The RPC username and password can now contain symbols which previously
conflicted with special symbols used in URLs
- Improve handling of obtaining random nonces to prevent cases where it
could error when not enough entropy was available
- Improve handling of home directory creation errors such as in the case
of unmounted symlinks (https://github.com/btcsuite/btcd/issues/193)
- Improve the error reporting for rejected transactions to include the
inputs which are missing and/or being double spent
- Update sample config file with new options and correct a comment
regarding the fact the RPC server only listens on localhost by default
(https://github.com/btcsuite/btcd/issues/218)
- Update the continuous integration builds to run several tools which
help keep code quality high
- Significant amount of internal code cleanup and improvements
- Other minor internal optimizations
- Code Contributors (alphabetical order):
- Beldur
- Ben Holden-Crowther
- Dave Collins
- David Evans
- David Hill
- Guilherme Salgado
- Javed Khan
- Jimmy Song
- John C. Vernaleo
- Jonathan Gillham
- Josh Rickmar
- Michael Ford
- Michail Kargakis
- kac
- Olaoluwa Osuntokun
Changes in 0.9.0 (Sat Sep 20 2014)
- Protocol and network related changes:
- Add a new checkpoint at block height 319400
- Add support for BIP0037 bloom filters
(https://github.com/conformal/btcd/issues/132)
- Implement BIP0061 reject handling and hence support for protocol
version 70002 (https://github.com/conformal/btcd/issues/133)
- Add testnet DNS seeds for peer discovery (testnet-seed.alexykot.me
and testnet-seed.bitcoin.schildbach.de)
- Add mainnet DNS seed for peer discovery (seeds.bitcoin.open-nodes.org)
- Make multisig transactions with non-null dummy data nonstandard
(https://github.com/conformal/btcd/issues/131)
- Make transactions with an excessive number of signature operations
nonstandard
- Perform initial DNS lookups concurrently which allows connections
more quickly
- Improve the address manager to significantly reduce memory usage and
add tests
- Remove orphan transactions when they appear in a mined block
(https://github.com/conformal/btcd/issues/166)
- Apply incremental back off on connection retries for persistent peers
that give invalid replies to mirror the logic used for failed
connections (https://github.com/conformal/btcd/issues/103)
- Correct rate-limiting of free and low-fee transactions
- Mining support changes:
- Implement getblocktemplate RPC with the following support:
(https://github.com/conformal/btcd/issues/124)
- BIP0022 Non-Optional Sections
- BIP0022 Long Polling
- BIP0023 Basic Pool Extensions
- BIP0023 Mutation coinbase/append
- BIP0023 Mutations time, time/increment, and time/decrement
- BIP0023 Mutation transactions/add
- BIP0023 Mutations prevblock, coinbase, and generation
- BIP0023 Block Proposals
- Implement built-in concurrent CPU miner
(https://github.com/conformal/btcd/issues/137)
NOTE: CPU mining on mainnet is pointless. This has been provided
for testing purposes such as for the new simulation test network
- Add --generate flag to enable CPU mining
- Deprecate the --getworkkey flag in favor of --miningaddr which
specifies which addresses generated blocks will choose from to pay
the subsidy to
- RPC changes:
- Implement gettxout command
(https://github.com/conformal/btcd/issues/141)
- Implement validateaddress command
- Implement verifymessage command
- Mark getunconfirmedbalance RPC as wallet-only
- Mark getwalletinfo RPC as wallet-only
- Update getgenerate, setgenerate, gethashespersec, and getmininginfo
to return the appropriate information about new CPU mining status
- Modify getpeerinfo pingtime and pingwait field types to float64 so
they are compatible
- Improve disconnect handling for normal HTTP clients
- Make error code returns for invalid hex more consistent
- Websocket changes:
- Switch to a new more efficient websocket package
(https://github.com/conformal/btcd/issues/134)
- Add rescanfinished notification
- Modify the rescanprogress notification to include block hash as well
as height (https://github.com/conformal/btcd/issues/151)
- btcctl utility changes:
- Accept --simnet flag which automatically selects the appropriate port
and TLS certificates needed to communicate with btcd and btcwallet on
the simulation test network
- Fix createrawtransaction command to send amounts denominated in BTC
- Add estimatefee command
- Add estimatepriority command
- Add getmininginfo command
- Add getnetworkinfo command
- Add gettxout command
- Add lockunspent command
- Add signrawtransaction command
- addblock utility changes:
- Accept --simnet flag which automatically selects the appropriate port
and TLS certificates needed to communicate with btcd and btcwallet on
the simulation test network
- Notable developer-related package changes:
- Provide a new bloom package in btcutil which allows creating and
working with BIP0037 bloom filters
- Provide a new hdkeychain package in btcutil which allows working with
BIP0032 hierarchical deterministic key chains
- Introduce a new btcnet package which houses network parameters
- Provide new simnet network (--simnet) which is useful for private
simulation testing
- Enforce low S values in serialized signatures as detailed in BIP0062
- Return errors from all methods on the btcdb.Db interface
(https://github.com/conformal/btcdb/issues/5)
- Allow behavior flags to alter btcchain.ProcessBlock
(https://github.com/conformal/btcchain/issues/5)
- Provide a new SerializeSize API for blocks
(https://github.com/conformal/btcwire/issues/19)
- Several of the core packages now work with Google App Engine
- Misc changes:
- Correct an issue where the database could corrupt under certain
circumstances which would require a new chain download
- Slightly optimize deserialization
- Use the correct IP block for he.net
- Fix an issue where it was possible the block manager could hang on
shutdown
- Update sample config file so the comments are on a separate line
rather than the end of a line so they are not interpreted as settings
(https://github.com/conformal/btcd/issues/135)
- Correct an issue where getdata requests were not being properly
throttled which could lead to larger than necessary memory usage
- Always show help when given the help flag even when the config file
contains invalid entries
- General code cleanup and minor optimizations
Changes in 0.8.0-beta (Sun May 25 2014)
- Btcd is now Beta (https://github.com/conformal/btcd/issues/130)
- Add a new checkpoint at block height 300255
- Protocol and network related changes:
- Lower the minimum transaction relay fee to 1000 satoshi to match
recent reference client changes
(https://github.com/conformal/btcd/issues/100)
- Raise the maximum signature script size to support standard 15-of-15
multi-signature pay-to-sript-hash transactions with compressed pubkeys
to remain compatible with the reference client
(https://github.com/conformal/btcd/issues/128)
- Reduce max bytes allowed for a standard nulldata transaction to 40 for
compatibility with the reference client
- Introduce a new btcnet package which houses all of the network params
for each network (mainnet, testnet3, regtest) to ultimately enable
easier addition and tweaking of networks without needing to change
several packages
- Fix several script discrepancies found by reference client test data
- Add new DNS seed for peer discovery (seed.bitnodes.io)
- Reduce the max known inventory cache from 20000 items to 1000 items
- Fix an issue where unknown inventory types could lead to a hung peer
- Implement inventory rebroadcast handler for sendrawtransaction
(https://github.com/conformal/btcd/issues/99)
- Update user agent to fully support BIP0014
(https://github.com/conformal/btcwire/issues/10)
- Implement initial mining support:
- Add a new logging subsystem for mining related operations
- Implement infrastructure for creating block templates
- Provide options to control block template creation settings
- Support the getwork RPC
- Allow address identifiers to apply to more than one network since both
testnet3 and the regression test network unfortunately use the same
identifier
- RPC changes:
- Set the content type for HTTP POST RPC connections to application/json
(https://github.com/conformal/btcd/issues/121)
- Modified the RPC server startup so it only requires at least one valid
listen interface
- Correct an error path where it was possible certain errors would not
be returned
- Implement getwork command
(https://github.com/conformal/btcd/issues/125)
- Update sendrawtransaction command to reject orphans
- Update sendrawtransaction command to include the reason a transaction
was rejected
- Update getinfo command to populate connection count field
- Update getinfo command to include relay fee field
(https://github.com/conformal/btcd/issues/107)
- Allow transactions submitted with sendrawtransaction to bypass the
rate limiter
- Allow the getcurrentnet and getbestblock extensions to be accessed via
HTTP POST in addition to Websockets
(https://github.com/conformal/btcd/issues/127)
- Websocket changes:
- Rework notifications to ensure they are delivered in the order they
occur
- Rename notifynewtxs command to notifyreceived (funds received)
- Rename notifyallnewtxs command to notifynewtransactions
- Rename alltx notification to txaccepted
- Rename allverbosetx notification to txacceptedverbose
(https://github.com/conformal/btcd/issues/98)
- Add rescan progress notification
- Add recvtx notification
- Add redeemingtx notification
- Modify notifyspent command to accept an array of outpoints
(https://github.com/conformal/btcd/issues/123)
- Significantly optimize the rescan command to yield up to a 60x speed
increase
- btcctl utility changes:
- Add createencryptedwallet command
- Add getblockchaininfo command
- Add importwallet command
- Add addmultisigaddress command
- Add setgenerate command
- Accept --testnet and --wallet flags which automatically select
the appropriate port and TLS certificates needed to communicate
with btcd and btcwallet (https://github.com/conformal/btcd/issues/112)
- Allow path expansion from config file entries
(https://github.com/conformal/btcd/issues/113)
- Minor refactor simplify handling of options
- addblock utility changes:
- Improve logging by making it consistent with the logging provided by
btcd (https://github.com/conformal/btcd/issues/90)
- Improve several package APIs for developers:
- Add new amount type for consistently handling monetary values
- Add new coin selector API
- Add new WIF (Wallet Import Format) API
- Add new crypto types for private keys and signatures
- Add new API to sign transactions including script merging and hash
types
- Expose function to extract all pushed data from a script
(https://github.com/conformal/btcscript/issues/8)
- Misc changes:
- Optimize address manager shuffling to do 67% less work on average
- Resolve a couple of benign data races found by the race detector
(https://github.com/conformal/btcd/issues/101)
- Add IP address to all peer related errors to clarify which peer is the
cause (https://github.com/conformal/btcd/issues/102)
- Fix a UPNP case issue that prevented the --upnp option from working
with some UPNP servers
- Update documentation in the sample config file regarding debug levels
- Adjust some logging levels to improve debug messages
- Improve the throughput of query messages to the block manager
- Several minor optimizations to reduce GC churn and enhance speed
- Other minor refactoring
- General code cleanup
Changes in 0.7.0 (Thu Feb 20 2014)
- Fix an issue when parsing scripts which contain a multi-signature script
which require zero signatures such as testnet block
000000001881dccfeda317393c261f76d09e399e15e27d280e5368420f442632
(https://github.com/conformal/btcscript/issues/7)
- Add check to ensure all transactions accepted to mempool only contain
canonical data pushes (https://github.com/conformal/btcscript/issues/6)
- Fix an issue causing excessive memory consumption
- Significantly rework and improve the websocket notification system:
- Each client is now independent so slow clients no longer limit the
speed of other connected clients
- Potentially long-running operations such as rescans are now run in
their own handler and rate-limited to one operation at a time without
preventing simultaneous requests from the same client for the faster
requests or notifications
- A couple of scenarios which could cause shutdown to hang have been
resolved
- Update notifynewtx notifications to support all address types instead
of only pay-to-pubkey-hash
- Provide a --rpcmaxwebsockets option to allow limiting the number of
concurrent websocket clients
- Add a new websocket command notifyallnewtxs to request notifications
(https://github.com/conformal/btcd/issues/86) (thanks @flammit)
- Improve btcctl utility in the following ways:
- Add getnetworkhashps command
- Add gettransaction command (wallet-specific)
- Add signmessage command (wallet-specific)
- Update getwork command to accept
- Continue cleanup and work on implementing the RPC API:
- Implement getnettotals command
(https://github.com/conformal/btcd/issues/84)
- Implement networkhashps command
(https://github.com/conformal/btcd/issues/87)
- Update getpeerinfo to always include syncnode field even when false
- Remove help addenda for getpeerinfo now that it supports all fields
- Close standard RPC connections on auth failure
- Provide a --rpcmaxclients option to allow limiting the number of
concurrent RPC clients (https://github.com/conformal/btcd/issues/68)
- Include IP address in RPC auth failure log messages
- Resolve a rather harmless data races found by the race detector
(https://github.com/conformal/btcd/issues/94)
- Increase block priority size and max standard transaction size to 50k
and 100k, respectively (https://github.com/conformal/btcd/issues/71)
- Add rate limiting of free transactions to the memory pool to prevent
penny flooding (https://github.com/conformal/btcd/issues/40)
- Provide a --logdir option (https://github.com/conformal/btcd/issues/95)
- Change the default log file path to include the network
- Add a new ScriptBuilder interface to btcscript to support creation of
custom scripts (https://github.com/conformal/btcscript/issues/5)
- General code cleanup
Changes in 0.6.0 (Tue Feb 04 2014)
- Fix an issue when parsing scripts which contain invalid signatures that
caused a chain fork on block
0000000000000001e4241fd0b3469a713f41c5682605451c05d3033288fb2244
- Correct an issue which could lead to an error in removeBlockNode
(https://github.com/conformal/btcchain/issues/4)
- Improve addblock utility as follows:
- Check imported blocks against all chain rules and checkpoints
- Skip blocks which are already known so you can stop and restart the
import or start the import after you have already downloaded a portion
of the chain
- Correct an issue where the utility did not shutdown cleanly after
processing all blocks
- Add error on attempt to import orphan blocks
- Improve error handling and reporting
- Display statistics after input file has been fully processed
- Rework, optimize, and improve headers-first mode:
- Resuming the chain sync from any point before the final checkpoint
will now use headers-first mode
(https://github.com/conformal/btcd/issues/69)
- Verify all checkpoints as opposed to only the final one
- Reduce and bound memory usage
- Rollback to the last known good point when a header does not match a
checkpoint
- Log information about what is happening with headers
- Improve btcctl utility in the following ways:
- Add getaddednodeinfo command
- Add getnettotals command
- Add getblocktemplate command (wallet-specific)
- Add getwork command (wallet-specific)
- Add getnewaddress command (wallet-specific)
- Add walletpassphrasechange command (wallet-specific)
- Add walletlock command (wallet-specific)
- Add sendfrom command (wallet-specific)
- Add sendmany command (wallet-specific)
- Add settxfee command (wallet-specific)
- Add listsinceblock command (wallet-specific)
- Add listaccounts command (wallet-specific)
- Add keypoolrefill command (wallet-specific)
- Add getreceivedbyaccount command (wallet-specific)
- Add getrawchangeaddress command (wallet-specific)
- Add gettxoutsetinfo command (wallet-specific)
- Add listaddressgroupings command (wallet-specific)
- Add listlockunspent command (wallet-specific)
- Add listlock command (wallet-specific)
- Add listreceivedbyaccount command (wallet-specific)
- Add validateaddress command (wallet-specific)
- Add verifymessage command (wallet-specific)
- Add sendtoaddress command (wallet-specific)
- Continue cleanup and work on implementing the RPC API:
- Implement submitblock command
(https://github.com/conformal/btcd/issues/61)
- Implement help command
- Implement ping command
- Implement getaddednodeinfo command
(https://github.com/conformal/btcd/issues/78)
- Implement getinfo command
- Update getpeerinfo to support bytesrecv and bytessent
(https://github.com/conformal/btcd/issues/83)
- Improve and correct several RPC server and websocket areas:
- Change the connection endpoint for websockets from /wallet to /ws
(https://github.com/conformal/btcd/issues/80)
- Implement an alternative authentication for websockets so clients
such as javascript from browsers that don't support setting HTTP
headers can authenticate (https://github.com/conformal/btcd/issues/77)
- Add an authentication deadline for RPC connections
(https://github.com/conformal/btcd/issues/68)
- Use standard authentication failure responses for RPC connections
- Make automatically generated certificate more standard so it works
from client such as node.js and Firefox
- Correct some minor issues which could prevent the RPC server from
shutting down in an orderly fashion
- Make all websocket notifications require registration
- Change the data sent over websockets to text since it is JSON-RPC
- Allow connections that do not have an Origin header set
- Expose and track the number of bytes read and written per peer
(https://github.com/conformal/btcwire/issues/6)
- Correct an issue with sendrawtransaction when invoked via websockets
which prevented a minedtx notification from being added
- Rescan operations issued from remote wallets are no stopped when
the wallet disconnects mid-operation
(https://github.com/conformal/btcd/issues/66)
- Several optimizations related to fetching block information from the
database
- General code cleanup
Changes in 0.5.0 (Mon Jan 13 2014)
- Optimize initial block download by introducing a new mode which
downloads the block headers first (up to the final checkpoint)
- Improve peer handling to remove the potential for slow peers to cause
sluggishness amongst all peers
(https://github.com/conformal/btcd/issues/63)
- Fix an issue where the initial block sync could stall when the sync peer
disconnects (https://github.com/conformal/btcd/issues/62)
- Correct an issue where --externalip was doing a DNS lookup on the full
host:port instead of just the host portion
(https://github.com/conformal/btcd/issues/38)
- Fix an issue which could lead to a panic on chain switches
(https://github.com/conformal/btcd/issues/70)
- Improve btcctl utility in the following ways:
- Show getdifficulty output as floating point to 6 digits of precision
- Show all JSON object replies formatted as standard JSON
- Allow btcctl getblock to accept optional params
- Add getaccount command (wallet-specific)
- Add getaccountaddress command (wallet-specific)
- Add sendrawtransaction command
- Continue cleanup and work on implementing RPC API calls
- Update getrawmempool to support new optional verbose flag
- Update getrawtransaction to match the reference client
- Update getblock to support new optional verbose flag
- Update raw transactions to fully match the reference client including
support for all transaction types and address types
- Correct getrawmempool fee field to return BTC instead of Satoshi
- Correct getpeerinfo service flag to return 8 digit string so it
matches the reference client
- Correct verifychain to return a boolean
- Implement decoderawtransaction command
- Implement createrawtransaction command
- Implement decodescript command
- Implement gethashespersec command
- Allow RPC handler overrides when invoked via a websocket versus
legacy connection
- Add new DNS seed for peer discovery
- Display user agent on new valid peer log message
(https://github.com/conformal/btcd/issues/64)
- Notify wallet when new transactions that pay to registered addresses
show up in the mempool before being mined into a block
- Support a tor-specific proxy in addition to a normal proxy
(https://github.com/conformal/btcd/issues/47)
- Remove deprecated sqlite3 imports from utilities
- Remove leftover profile write from addblock utility
- Quite a bit of code cleanup and refactoring to improve maintainability
Changes in 0.4.0 (Thu Dec 12 2013)
- Allow listen interfaces to be specified via --listen instead of only the
port (https://github.com/conformal/btcd/issues/33)
- Allow listen interfaces for the RPC server to be specified via
--rpclisten instead of only the port
(https://github.com/conformal/btcd/issues/34)
- Only disable listening when --connect or --proxy are used when no
--listen interface are specified
(https://github.com/conformal/btcd/issues/10)
- Add several new standard transaction checks to transaction memory pool:
- Support nulldata scripts as standard
- Only allow a max of one nulldata output per transaction
- Enforce a maximum of 3 public keys in multi-signature transactions
- The number of signatures in multi-signature transactions must not
exceed the number of public keys
- The number of inputs to a signature script must match the expected
number of inputs for the script type
- The number of inputs pushed onto the stack by a redeeming signature
script must match the number of inputs consumed by the referenced
public key script
- When a block is connected, remove any transactions from the memory pool
which are now double spends as a result of the newly connected
transactions
- Don't relay transactions resurrected during a chain switch since
other peers will also be switching chains and therefore already know
about them
- Cleanup a few cases where rejected transactions showed as an error
rather than as a rejected transaction
- Ignore the default configuration file when --regtest (regression test
mode) is specified
- Implement TLS support for RPC including automatic certificate generation
- Support HTTP authentication headers for web sockets
- Update address manager to recognize and properly work with Tor
addresses (https://github.com/conformal/btcd/issues/36) and
(https://github.com/conformal/btcd/issues/37)
- Improve btcctl utility in the following ways:
- Add the ability to specify a configuration file
- Add a default entry for the RPC cert to point to the location
it will likely be in the btcd home directory
- Implement --version flag
- Provide a --notls option to support non-TLS configurations
- Fix a couple of minor races found by the Go race detector
- Improve logging
- Allow logging level to be specified on a per subsystem basis
(https://github.com/conformal/btcd/issues/48)
- Allow logging levels to be dynamically changed via RPC
(https://github.com/conformal/btcd/issues/15)
- Implement a rolling log file with a max of 10MB per file and a
rotation size of 3 which results in a max logging size of 30 MB
- Correct a minor issue with the rescanning websocket call
(https://github.com/conformal/btcd/issues/54)
- Fix a race with pushing address messages that could lead to a panic
(https://github.com/conformal/btcd/issues/58)
- Improve which external IP address is reported to peers based on which
interface they are connected through
(https://github.com/conformal/btcd/issues/35)
- Add --externalip option to allow an external IP address to be specified
for cases such as tor hidden services or advanced network configurations
(https://github.com/conformal/btcd/issues/38)
- Add --upnp option to support automatic port mapping via UPnP
(https://github.com/conformal/btcd/issues/51)
- Update Ctrl+C interrupt handler to properly sync address manager and
remove the UPnP port mapping (if needed)
- Continue cleanup and work on implementing RPC API calls
- Add importprivkey (import private key) command to btcctl
- Update getrawtransaction to provide addresses properly, support
new verbose param, and match the reference implementation with the
exception of MULTISIG (thanks @flammit)
- Update getblock with new verbose flag (thanks @flammit)
- Add listtransactions command to btcctl
- Add getbalance command to btcctl
- Add basic support for btcd to run as a native Windows service
(https://github.com/conformal/btcd/issues/42)
- Package addblock utility with Windows MSIs
- Add support for TravisCI (continuous build integration)
- Cleanup some documentation and usage
- Several other minor bug fixes and general code cleanup
Changes in 0.3.3 (Wed Nov 13 2013)
- Significantly improve initial block chain download speed
(https://github.com/conformal/btcd/issues/20)
- Add a new checkpoint at block height 267300
- Optimize most recently used inventory handling
(https://github.com/conformal/btcd/issues/21)
- Optimize duplicate transaction input check
(https://github.com/conformal/btcchain/issues/2)
- Optimize transaction hashing
(https://github.com/conformal/btcd/issues/25)
- Rework and optimize wallet listener notifications
(https://github.com/conformal/btcd/issues/22)
- Optimize serialization and deserialization
(https://github.com/conformal/btcd/issues/27)
- Add support for minimum transaction fee to memory pool acceptance
(https://github.com/conformal/btcd/issues/29)
- Improve leveldb database performance by removing explicit GC call
- Fix an issue where Ctrl+C was not always finishing orderly database
shutdown
- Fix an issue in the script handling for OP_CHECKSIG
- Impose max limits on all variable length protocol entries to prevent
abuse from malicious peers
- Enforce DER signatures for transactions allowed into the memory pool
- Separate the debug profile http server from the RPC server
- Rework of the RPC code to improve performance and make the code cleaner
- The getrawtransaction RPC call now properly checks the memory pool
before consulting the db (https://github.com/conformal/btcd/issues/26)
- Add support for the following RPC calls: getpeerinfo, getconnectedcount,
addnode, verifychain
(https://github.com/conformal/btcd/issues/13)
(https://github.com/conformal/btcd/issues/17)
- Implement rescan websocket extension to allow wallet rescans
- Use correct paths for application data storage for all supported
operating systems (https://github.com/conformal/btcd/issues/30)
- Add a default redirect to the http profiling page when accessing the
http profile server
- Add a new --cpuprofile option which can be used to generate CPU
profiling data on platforms that support it
- Several other minor performance optimizations
- Other minor bug fixes and general code cleanup
Changes in 0.3.2 (Tue Oct 22 2013)
- Fix an issue that could cause the download of the block chain to stall
(https://github.com/conformal/btcd/issues/12)
- Remove deprecated sqlite as an available database backend
- Close sqlite compile issue as sqlite has now been removed
(https://github.com/conformal/btcd/issues/11)
- Change default RPC ports to 8334 (mainnet) and 18334 (testnet)
- Continue cleanup and work on implementing RPC API calls
- Add support for the following RPC calls: getrawmempool,
getbestblockhash, decoderawtransaction, getdifficulty,
getconnectioncount, getpeerinfo, and addnode
- Improve the btcctl utility that is used to issue JSON-RPC commands
- Fix an issue preventing btcd from cleanly shutting down with the RPC
stop command
- Add a number of database interface tests to ensure backends implement
the expected interface
- Expose some additional information from btcscript to be used for
identifying "standard"" transactions
- Add support for plan9 - thanks @mischief
(https://github.com/conformal/btcd/pull/19)
- Other minor bug fixes and general code cleanup
Changes in 0.3.1-alpha (Tue Oct 15 2013)
- Change default database to leveldb
NOTE: This does mean you will have to redownload the block chain. Since we
are still in alpha, we didn't feel writing a converter was worth the time as
it would take away from more important issues at this stage
- Add a warning if there are multiple block chain databases of different types
- Fix issue with unexpected EOF in leveldb -- https://github.com/conformal/btcd/issues/18
- Fix issue preventing block 21066 on testnet -- https://github.com/conformal/btcchain/issues/1
- Fix issue preventing block 96464 on testnet -- https://github.com/conformal/btcscript/issues/1
- Optimize transaction lookups
- Correct a few cases of list removal that could result in improper cleanup
of no longer needed orphans
- Add functionality to increase ulimits on non-Windows platforms
- Add support for mempool command which allows remote peers to query the
transaction memory pool via the bitcoin protocol
- Clean up logging a bit
- Add a flag to disable checkpoints for developers
- Add a lot of useful debug logging such as message summaries
- Other minor bug fixes and general code cleanup
Initial Release 0.3.0-alpha (Sat Oct 05 2013):
- Initial release

105
Gopkg.lock generated
View File

@@ -1,105 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "bou.ke/monkey"
packages = ["."]
revision = "bdf6dea004c6fd1cdf4b25da8ad45a606c09409a"
version = "v1.0.1"
[[projects]]
name = "github.com/aead/siphash"
packages = ["."]
revision = "83563a290f60225eb120d724600b9690c3fb536f"
version = "v1.0.1"
[[projects]]
branch = "master"
name = "github.com/btcsuite/btclog"
packages = ["."]
revision = "84c8d2346e9fc8c7b947e243b9c24e6df9fd206a"
[[projects]]
branch = "master"
name = "github.com/btcsuite/go-socks"
packages = ["socks"]
revision = "4720035b7bfd2a9bb130b1c184f8bbe41b6f0d0f"
[[projects]]
name = "github.com/btcsuite/goleveldb"
packages = ["leveldb","leveldb/cache","leveldb/comparer","leveldb/errors","leveldb/filter","leveldb/iterator","leveldb/journal","leveldb/memdb","leveldb/opt","leveldb/storage","leveldb/table","leveldb/util"]
revision = "3fd0373267b6461dbefe91cef614278064d05465"
version = "v1.0.0"
[[projects]]
name = "github.com/btcsuite/snappy-go"
packages = ["."]
revision = "b3db38edf0a9a11a115eb6b022d8c946024a9ac0"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/btcsuite/websocket"
packages = ["."]
revision = "31079b6807923eb23992c421b114992b95131b55"
[[projects]]
name = "github.com/btcsuite/winsvc"
packages = ["eventlog","mgr","registry","svc","winapi"]
revision = "f8fb11f83f7e860e3769a08e6811d1b399a43722"
version = "v1.0.0"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
name = "github.com/jessevdk/go-flags"
packages = ["."]
revision = "c6ca198ec95c841fdb89fc0de7496fed11ab854e"
version = "v1.4.0"
[[projects]]
name = "github.com/jrick/logrotate"
packages = ["rotator"]
revision = "a93b200c26cbae3bb09dd0dc2c7c7fe1468a034a"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/kkdai/bstream"
packages = ["."]
revision = "b3251f7901ec4dd4ec66b3210e8f4bd5c0f1c5a3"
[[projects]]
name = "github.com/miekg/dns"
packages = ["."]
revision = "cc8cd02140663157ce797c6650488d6c8563f31f"
version = "v1.1.6"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["ed25519","ed25519/internal/edwards25519","ripemd160"]
revision = "c2843e01d9a2bc60bb26ad24e09734fdc2d9ec58"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6"]
revision = "d8887717615a059821345a5c23649351b52a1c0b"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix"]
revision = "fead79001313d15903fb4605b4a1b781532cd93e"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "00392a00928f96fc94e2c8c65ce3a98cc6f5e2f93dda64d3c4502f2f38026e96"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -1,78 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "bou.ke/monkey"
version = "1.0.1"
[[constraint]]
name = "github.com/aead/siphash"
version = "1.0.1"
[[constraint]]
branch = "master"
name = "github.com/btcsuite/btclog"
[[constraint]]
branch = "master"
name = "github.com/btcsuite/go-socks"
[[constraint]]
name = "github.com/btcsuite/goleveldb"
version = "1.0.0"
[[constraint]]
branch = "master"
name = "github.com/btcsuite/websocket"
[[constraint]]
name = "github.com/btcsuite/winsvc"
version = "1.0.0"
[[constraint]]
name = "github.com/davecgh/go-spew"
version = "1.1.1"
[[constraint]]
name = "github.com/jessevdk/go-flags"
version = "1.4.0"
[[constraint]]
name = "github.com/jrick/logrotate"
version = "1.0.0"
[[constraint]]
branch = "master"
name = "github.com/kkdai/bstream"
[[constraint]]
name = "github.com/miekg/dns"
version = "1.1.6"
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
[prune]
go-tests = true
unused-packages = true

10
Jenkinsfile vendored
View File

@@ -1,10 +0,0 @@
node {
stage 'Checkout'
checkout scm
stage 'Version'
sh './deploy.sh version'
stage 'Build'
sh "./deploy.sh build"
}

View File

@@ -1,8 +1,9 @@
ISC License
Copyright (c) 2018-2019 DAGLabs
Copyright (c) 2018-2019 The kaspanet developers
Copyright (c) 2013-2018 The btcsuite developers
Copyright (c) 2015-2016 The Decred developers
Copyright (c) 2013-2014 Conformal Systems LLC.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above

123
README.md
View File

@@ -1,49 +1,24 @@
btcd
Kaspad
====
Warning: This is pre-alpha software. There's no guarantee anything works.
====
[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/daglabs/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad)
btcd is an alternative full node bitcoin implementation written in Go (golang).
Kaspad is the reference full node Kaspa implementation written in Go (golang).
This project is currently under active development and is in a Beta state. It
is extremely stable and has been in production use since October 2013.
It properly downloads, validates, and serves the block chain using the exact
rules (including consensus bugs) for block acceptance as Bitcoin Core. We have
taken great care to avoid btcd causing a fork to the block chain. It includes a
full block validation testing framework which contains all of the 'official'
block acceptance tests (and some additional ones) that is run on every pull
request to help ensure it properly follows consensus. Also, it passes all of
the JSON test data in the Bitcoin Core code.
It also properly relays newly mined blocks, maintains a transaction pool, and
relays individual transactions that have not yet made it into a block. It
ensures all individual transactions admitted to the pool follow the rules
required by the block chain and also includes more strict checks which filter
transactions based on miner requirements ("standard" transactions).
One key difference between btcd and Bitcoin Core is that btcd does *NOT* include
wallet functionality and this was a very intentional design decision. See the
blog entry [here](https://blog.conformal.com/btcd-not-your-moms-bitcoin-daemon)
for more details. This means you can't actually make or receive payments
directly with btcd. That functionality is provided by the
[btcwallet](https://github.com/btcsuite/btcwallet) and
[Paymetheus](https://github.com/btcsuite/Paymetheus) (Windows-only) projects
which are both under active development.
This project is currently under active development and is in a pre-Alpha state.
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
## Requirements
[Go](http://golang.org) 1.8 or newer.
Latest version of [Go](http://golang.org) (currently 1.13).
## Installation
#### Windows - MSI Available
https://github.com/daglabs/btcd/releases
#### Linux/BSD/MacOSX/POSIX - Build from Source
#### Build from Source
- Install Go according to the installation instructions here:
http://golang.org/doc/install
@@ -55,92 +30,48 @@ $ go version
$ go env GOROOT GOPATH
```
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
recommended that `GOPATH` is set to a directory in your home directory such as
`~/goprojects` to avoid write permission issues. It is also recommended to add
`~/dev/go` to avoid write permission issues. It is also recommended to add
`$GOPATH/bin` to your `PATH` at this point.
- Run the following commands to obtain btcd, all dependencies, and install it:
- Run the following commands to obtain and install kaspad including all dependencies:
```bash
$ # Install dep: https://golang.github.io/dep/docs/installation.html
$ git clone https://github.com/daglabs/btcd $GOPATH/src/github.com/daglabs/btcd
$ cd $GOPATH/src/github.com/daglabs/btcd
$ dep ensure
$ git clone https://github.com/kaspanet/kaspad $GOPATH/src/github.com/kaspanet/kaspad
$ cd $GOPATH/src/github.com/kaspanet/kaspad
$ go install . ./cmd/...
```
- btcd (and utilities) will now be installed in ```$GOPATH/bin```. If you did
- Kaspad (and utilities) should now be installed in `$GOPATH/bin`. If you did
not already add the bin directory to your system path during Go installation,
we recommend you do so now.
you are encouraged to do so now.
## Updating
#### Windows
Install a newer MSI
#### Linux/BSD/MacOSX/POSIX - Build from Source
- Run the following commands to update btcd, all dependencies, and install it:
```bash
$ cd $GOPATH/src/github.com/daglabs/btcd
$ git pull && dep ensure
$ go install . ./cmd/...
```
## Getting Started
btcd has several configuration options available to tweak how it runs, but all
of the basic operations described in the intro section work with zero
configuration.
#### Windows (Installed from MSI)
Launch btcd from your Start menu.
Kaspad has several configuration options available to tweak how it runs, but all
of the basic operations work with zero configuration.
#### Linux/BSD/POSIX/Source
```bash
$ ./btcd
$ ./kaspad
```
## IRC
- irc.freenode.net
- channel #btcd
- [webchat](https://webchat.freenode.net/?channels=btcd)
## Discord
Join our discord server using the following link: https://discord.gg/WmGhhzk
## Issue Tracker
The [integrated github issue tracker](https://github.com/daglabs/btcd/issues)
The [integrated github issue tracker](https://github.com/kaspanet/kaspad/issues)
is used for this project.
## Documentation
The documentation is a work-in-progress. It is located in the [docs](https://github.com/daglabs/btcd/tree/master/docs) folder.
## GPG Verification Key
All official release tags are signed by Conformal so users can ensure the code
has not been tampered with and is coming from the btcsuite developers. To
verify the signature perform the following:
- Download the Conformal public key:
https://raw.githubusercontent.com/btcsuite/btcd/master/release/GIT-GPG-KEY-conformal.txt
- Import the public key into your GPG keyring:
```bash
gpg --import GIT-GPG-KEY-conformal.txt
```
- Verify the release tag with the following command where `TAG_NAME` is a
placeholder for the specific tag:
```bash
git tag -v TAG_NAME
```
The documentation is a work-in-progress. It is located in the [docs](https://github.com/kaspanet/kaspad/tree/master/docs) folder.
## License
btcd is licensed under the [copyfree](http://copyfree.org) ISC License.
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
#!/bin/sh
# This script uses gocov to generate a test coverage report.
# The gocov tool my be obtained with the following command:
# go get github.com/axw/gocov/gocov
#
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
# Check for gocov.
type gocov >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo >&2 "This script requires the gocov tool."
echo >&2 "You may obtain it with the following command:"
echo >&2 "go get github.com/axw/gocov/gocov"
exit 1
fi
gocov test | gocov report

View File

@@ -1,38 +0,0 @@
// Copyright (c) 2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package addrmgr implements concurrency safe Bitcoin address manager.
Address Manager Overview
In order maintain the peer-to-peer Bitcoin network, there needs to be a source
of addresses to connect to as nodes come and go. The Bitcoin protocol provides
the getaddr and addr messages to allow peers to communicate known addresses with
each other. However, there needs to a mechanism to store those results and
select peers from them. It is also important to note that remote peers can't
be trusted to send valid peers nor attempt to provide you with only peers they
control with malicious intent.
With that in mind, this package provides a concurrency safe address manager for
caching and selecting peers in a non-deterministic manner. The general idea is
the caller adds addresses to the address manager and notifies it when addresses
are connected, known good, and attempted. The caller also requests addresses as
it needs them.
The address manager internally segregates the addresses into groups and
non-deterministically selects groups in a cryptographically random manner. This
reduce the chances multiple addresses from the same nets are selected which
generally helps provide greater peer diversity, and perhaps more importantly,
drastically reduces the chances an attacker is able to coerce your peer into
only connecting to nodes they control.
The address manager also understands routability and Tor addresses and tries
hard to only return routable addresses. In addition, it uses the information
provided by the caller about connected, known good, and attempted addresses to
periodically purge peers which no longer appear to be good peers as well as
bias the selection toward known good peers. The general idea is to make a best
effort at only providing usable addresses.
*/
package addrmgr

View File

@@ -1,25 +0,0 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addrmgr
import (
"time"
"github.com/daglabs/btcd/wire"
)
func TstKnownAddressIsBad(ka *KnownAddress) bool {
return ka.isBad()
}
func TstKnownAddressChance(ka *KnownAddress) float64 {
return ka.chance()
}
func TstNewKnownAddress(na *wire.NetAddress, attempts int,
lastattempt, lastsuccess time.Time, tried bool, refs int) *KnownAddress {
return &KnownAddress{na: na, attempts: attempts, lastattempt: lastattempt,
lastsuccess: lastsuccess, tried: tried, refs: refs}
}

View File

@@ -1,114 +0,0 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addrmgr_test
import (
"math"
"testing"
"time"
"github.com/daglabs/btcd/addrmgr"
"github.com/daglabs/btcd/wire"
)
func TestChance(t *testing.T) {
now := time.Unix(time.Now().Unix(), 0)
var tests = []struct {
addr *addrmgr.KnownAddress
expected float64
}{
{
//Test normal case
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1.0,
}, {
//Test case in which lastseen < 0
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)},
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1.0,
}, {
//Test case in which lastattempt < 0
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(30*time.Minute), time.Now(), false, 0),
1.0 * .01,
}, {
//Test case in which lastattempt < ten minutes
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(-5*time.Minute), time.Now(), false, 0),
1.0 * .01,
}, {
//Test case with several failed attempts.
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
2, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1 / 1.5 / 1.5,
},
}
err := .0001
for i, test := range tests {
chance := addrmgr.TstKnownAddressChance(test.addr)
if math.Abs(test.expected-chance) >= err {
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
}
}
}
func TestIsBad(t *testing.T) {
now := time.Unix(time.Now().Unix(), 0)
future := now.Add(35 * time.Minute)
monthOld := now.Add(-43 * time.Hour * 24)
secondsOld := now.Add(-2 * time.Second)
minutesOld := now.Add(-27 * time.Minute)
hoursOld := now.Add(-5 * time.Hour)
zeroTime := time.Time{}
futureNa := &wire.NetAddress{Timestamp: future}
minutesOldNa := &wire.NetAddress{Timestamp: minutesOld}
monthOldNa := &wire.NetAddress{Timestamp: monthOld}
currentNa := &wire.NetAddress{Timestamp: secondsOld}
//Test addresses that have been tried in the last minute.
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
}
//Test address that claims to be from the future.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
}
//Test address that has not been seen in over a month.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 7: addresses more than a month old are bad.")
}
//It has failed at least three times and never succeeded.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
t.Errorf("test case 8: addresses that have never succeeded are bad.")
}
//It has failed ten times in the last week
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
}
//Test an address that should work.
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 10: This should be a valid address.")
}
}

View File

@@ -1,19 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addrmgr
import (
"github.com/btcsuite/btclog"
"github.com/daglabs/btcd/logger"
)
// log is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
var log btclog.Logger
func init() {
log, _ = logger.Get(logger.SubsystemTags.ADXR)
}

View File

@@ -1,62 +0,0 @@
github.com/conformal/btcd/addrmgr/network.go GroupKey 100.00% (23/23)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.reset 100.00% (6/6)
github.com/conformal/btcd/addrmgr/network.go IsRFC5737 100.00% (4/4)
github.com/conformal/btcd/addrmgr/network.go IsRFC1918 100.00% (4/4)
github.com/conformal/btcd/addrmgr/addrmanager.go New 100.00% (3/3)
github.com/conformal/btcd/addrmgr/addrmanager.go NetAddressKey 100.00% (2/2)
github.com/conformal/btcd/addrmgr/network.go IsRFC4862 100.00% (1/1)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.numAddresses 100.00% (1/1)
github.com/conformal/btcd/addrmgr/log.go init 100.00% (1/1)
github.com/conformal/btcd/addrmgr/log.go DisableLog 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go ipNet 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsIPv4 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsLocal 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsOnionCatTor 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC2544 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC3849 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC3927 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC3964 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC4193 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC4380 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC4843 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC6052 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC6145 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRFC6598 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsValid 100.00% (1/1)
github.com/conformal/btcd/addrmgr/network.go IsRoutable 100.00% (1/1)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetBestLocalAddress 94.74% (18/19)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddLocalAddress 90.91% (10/11)
github.com/conformal/btcd/addrmgr/addrmanager.go getReachabilityFrom 51.52% (17/33)
github.com/conformal/btcd/addrmgr/addrmanager.go ipString 50.00% (2/4)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.GetAddress 9.30% (4/43)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.deserializePeers 0.00% (0/50)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Good 0.00% (0/44)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.savePeers 0.00% (0/39)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.updateAddress 0.00% (0/30)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.expireNew 0.00% (0/22)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddressCache 0.00% (0/16)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.HostToNetAddress 0.00% (0/15)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getNewBucket 0.00% (0/15)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddressByIP 0.00% (0/14)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.getTriedBucket 0.00% (0/14)
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.chance 0.00% (0/13)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.loadPeers 0.00% (0/11)
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.isBad 0.00% (0/11)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Connected 0.00% (0/10)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.addressHandler 0.00% (0/9)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.pickTried 0.00% (0/8)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.DeserializeNetAddress 0.00% (0/7)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Stop 0.00% (0/7)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Attempt 0.00% (0/7)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.Start 0.00% (0/6)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddresses 0.00% (0/4)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NeedMoreAddresses 0.00% (0/3)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.NumAddresses 0.00% (0/3)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.AddAddress 0.00% (0/3)
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.LastAttempt 0.00% (0/1)
github.com/conformal/btcd/addrmgr/knownaddress.go knownAddress.NetAddress 0.00% (0/1)
github.com/conformal/btcd/addrmgr/addrmanager.go AddrManager.find 0.00% (0/1)
github.com/conformal/btcd/addrmgr/log.go UseLogger 0.00% (0/1)
github.com/conformal/btcd/addrmgr --------------------------------- 21.04% (113/537)

248
app/app.go Normal file
View File

@@ -0,0 +1,248 @@
package app
import (
"fmt"
"sync/atomic"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain/blockdag/indexers"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/domain/mining"
"github.com/kaspanet/kaspad/domain/txscript"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/dnsseed"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/rpc"
"github.com/kaspanet/kaspad/infrastructure/os/signal"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/panics"
)
// App is a wrapper for all the kaspad services
type App struct {
cfg *config.Config
rpcServer *rpc.Server
addressManager *addressmanager.AddressManager
protocolManager *protocol.Manager
connectionManager *connmanager.ConnectionManager
netAdapter *netadapter.NetAdapter
started, shutdown int32
}
// Start launches all the kaspad services.
func (a *App) Start() {
// Already started?
if atomic.AddInt32(&a.started, 1) != 1 {
return
}
log.Trace("Starting kaspad")
err := a.protocolManager.Start()
if err != nil {
panics.Exit(log, fmt.Sprintf("Error starting the p2p protocol: %+v", err))
}
a.maybeSeedFromDNS()
a.connectionManager.Start()
if !a.cfg.DisableRPC {
a.rpcServer.Start()
}
}
// Stop gracefully shuts down all the kaspad services.
func (a *App) Stop() {
// Make sure this only happens once.
if atomic.AddInt32(&a.shutdown, 1) != 1 {
log.Infof("Kaspad is already in the process of shutting down")
return
}
log.Warnf("Kaspad shutting down")
a.connectionManager.Stop()
err := a.protocolManager.Stop()
if err != nil {
log.Errorf("Error stopping the p2p protocol: %+v", err)
}
// Shutdown the RPC server if it's not disabled.
if !a.cfg.DisableRPC {
err := a.rpcServer.Stop()
if err != nil {
log.Errorf("Error stopping rpcServer: %+v", err)
}
}
err = a.addressManager.Stop()
if err != nil {
log.Errorf("Error stopping address manager: %s", err)
}
return
}
// New returns a new App instance configured to listen on addr for the
// kaspa network type specified by dagParams. Use start to begin accepting
// connections from peers.
func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{}) (*App, error) {
indexManager, acceptanceIndex := setupIndexes(cfg)
sigCache := txscript.NewSigCache(cfg.SigCacheMaxSize)
// Create a new block DAG instance with the appropriate configuration.
dag, err := setupDAG(cfg, databaseContext, interrupt, sigCache, indexManager)
if err != nil {
return nil, err
}
txMempool := setupMempool(cfg, dag, sigCache)
netAdapter, err := netadapter.NewNetAdapter(cfg)
if err != nil {
return nil, err
}
addressManager, err := addressmanager.New(cfg, databaseContext)
if err != nil {
return nil, err
}
connectionManager, err := connmanager.New(cfg, netAdapter, addressManager)
if err != nil {
return nil, err
}
protocolManager, err := protocol.NewManager(cfg, dag, netAdapter, addressManager, txMempool, connectionManager)
if err != nil {
return nil, err
}
rpcServer, err := setupRPC(
cfg, dag, txMempool, sigCache, acceptanceIndex, connectionManager, addressManager, protocolManager)
if err != nil {
return nil, err
}
return &App{
cfg: cfg,
rpcServer: rpcServer,
protocolManager: protocolManager,
connectionManager: connectionManager,
netAdapter: netAdapter,
addressManager: addressManager,
}, nil
}
func (a *App) maybeSeedFromDNS() {
if !a.cfg.DisableDNSSeed {
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, appmessage.SFNodeNetwork, false, nil,
a.cfg.Lookup, func(addresses []*appmessage.NetAddress) {
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
// IPs of nodes and not its own IP, we can not know real IP of
// source. So we'll take first returned address as source.
a.addressManager.AddAddresses(addresses, addresses[0], nil)
})
}
}
func setupDAG(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{},
sigCache *txscript.SigCache, indexManager blockdag.IndexManager) (*blockdag.BlockDAG, error) {
dag, err := blockdag.New(&blockdag.Config{
Interrupt: interrupt,
DatabaseContext: databaseContext,
DAGParams: cfg.NetParams(),
TimeSource: blockdag.NewTimeSource(),
SigCache: sigCache,
IndexManager: indexManager,
SubnetworkID: cfg.SubnetworkID,
})
return dag, err
}
func setupIndexes(cfg *config.Config) (blockdag.IndexManager, *indexers.AcceptanceIndex) {
// Create indexes if needed.
var indexes []indexers.Indexer
var acceptanceIndex *indexers.AcceptanceIndex
if cfg.AcceptanceIndex {
log.Info("acceptance index is enabled")
acceptanceIndex = indexers.NewAcceptanceIndex()
indexes = append(indexes, acceptanceIndex)
}
// Create an index manager if any of the optional indexes are enabled.
if len(indexes) < 0 {
return nil, nil
}
indexManager := indexers.NewManager(indexes)
return indexManager, acceptanceIndex
}
func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript.SigCache) *mempool.TxPool {
mempoolConfig := mempool.Config{
Policy: mempool.Policy{
AcceptNonStd: cfg.RelayNonStd,
MaxOrphanTxs: cfg.MaxOrphanTxs,
MaxOrphanTxSize: config.DefaultMaxOrphanTxSize,
MinRelayTxFee: cfg.MinRelayTxFee,
MaxTxVersion: 1,
},
CalcSequenceLockNoLock: func(tx *util.Tx, utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
return dag.CalcSequenceLockNoLock(tx, utxoSet)
},
SigCache: sigCache,
DAG: dag,
}
return mempool.New(&mempoolConfig)
}
func setupRPC(cfg *config.Config,
dag *blockdag.BlockDAG,
txMempool *mempool.TxPool,
sigCache *txscript.SigCache,
acceptanceIndex *indexers.AcceptanceIndex,
connectionManager *connmanager.ConnectionManager,
addressManager *addressmanager.AddressManager,
protocolManager *protocol.Manager) (*rpc.Server, error) {
if !cfg.DisableRPC {
policy := mining.Policy{
BlockMaxMass: cfg.BlockMaxMass,
}
blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy, txMempool, dag, sigCache)
rpcServer, err := rpc.NewRPCServer(cfg, dag, txMempool, acceptanceIndex, blockTemplateGenerator,
connectionManager, addressManager, protocolManager)
if err != nil {
return nil, err
}
// Signal process shutdown when the RPC server requests it.
spawn("setupRPC-handleShutdownRequest", func() {
<-rpcServer.RequestedProcessShutdown()
signal.ShutdownRequestChannel <- struct{}{}
})
return rpcServer, nil
}
return nil, nil
}
// P2PNodeID returns the network ID associated with this App
func (a *App) P2PNodeID() *id.ID {
return a.netAdapter.ID()
}
// AddressManager returns the AddressManager associated with this App
func (a *App) AddressManager() *addressmanager.AddressManager {
return a.addressManager
}

72
app/appmessage/README.md Normal file
View File

@@ -0,0 +1,72 @@
wire
====
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/wire)
=======
Package wire implements the kaspa wire protocol.
## Kaspa Message Overview
The kaspa protocol consists of exchanging messages between peers. Each message
is preceded by a header which identifies information about it such as which
kaspa network it is a part of, its type, how big it is, and a checksum to
verify validity. All encoding and decoding of message headers is handled by this
package.
To accomplish this, there is a generic interface for kaspa messages named
`Message` which allows messages of any type to be read, written, or passed
around through channels, functions, etc. In addition, concrete implementations
of most all kaspa messages are provided. All of the details of marshalling and
unmarshalling to and from the wire using kaspa encoding are handled so the
caller doesn't have to concern themselves with the specifics.
## Reading Messages Example
In order to unmarshal kaspa messages from the wire, use the `ReadMessage`
function. It accepts any `io.Reader`, but typically this will be a `net.Conn`
to a remote node running a kaspa peer. Example syntax is:
```Go
// Use the most recent protocol version supported by the package and the
// main kaspa network.
pver := wire.ProtocolVersion
kaspanet := wire.Mainnet
// Reads and validates the next kaspa message from conn using the
// protocol version pver and the kaspa network kaspanet. The returns
// are a appmessage.Message, a []byte which contains the unmarshalled
// raw payload, and a possible error.
msg, rawPayload, err := wire.ReadMessage(conn, pver, kaspanet)
if err != nil {
// Log and handle the error
}
```
See the package documentation for details on determining the message type.
## Writing Messages Example
In order to marshal kaspa messages to the wire, use the `WriteMessage`
function. It accepts any `io.Writer`, but typically this will be a `net.Conn`
to a remote node running a kaspa peer. Example syntax to request addresses
from a remote peer is:
```Go
// Use the most recent protocol version supported by the package and the
// main bitcoin network.
pver := wire.ProtocolVersion
kaspanet := wire.Mainnet
// Create a new getaddr kaspa message.
msg := wire.NewMsgGetAddr()
// Writes a kaspa message msg to conn using the protocol version
// pver, and the kaspa network kaspanet. The return is a possible
// error.
err := wire.WriteMessage(conn, msg, pver, kaspanet)
if err != nil {
// Log and handle the error
}
```

View File

@@ -0,0 +1,24 @@
package appmessage
import "time"
type baseMessage struct {
messageNumber uint64
receivedAt time.Time
}
func (b *baseMessage) MessageNumber() uint64 {
return b.messageNumber
}
func (b *baseMessage) SetMessageNumber(messageNumber uint64) {
b.messageNumber = messageNumber
}
func (b *baseMessage) ReceivedAt() time.Time {
return b.receivedAt
}
func (b *baseMessage) SetReceivedAt(receivedAt time.Time) {
b.receivedAt = receivedAt
}

View File

@@ -2,26 +2,24 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"compress/bzip2"
"fmt"
"io/ioutil"
"math"
"net"
"os"
"testing"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/kaspanet/kaspad/util/daghash"
)
// genesisCoinbaseTx is the coinbase transaction for the genesis blocks for
// the main network, regression test network, and test network (version 3).
// the main network and test network.
var genesisCoinbaseTxIns = []*TxIn{
{
PreviousOutPoint: OutPoint{
PreviousOutpoint: Outpoint{
TxID: daghash.TxID{},
Index: 0xffffffff,
},
@@ -43,7 +41,7 @@ var genesisCoinbaseTxIns = []*TxIn{
var genesisCoinbaseTxOuts = []*TxOut{
{
Value: 0x12a05f200,
PkScript: []byte{
ScriptPubKey: []byte{
0x41, 0x04, 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, /* |A.g....U| */
0x48, 0x27, 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, /* |H'.g..q0| */
0xb7, 0x10, 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, /* |..\..(.9| */
@@ -172,9 +170,9 @@ func BenchmarkWriteVarStr10(b *testing.B) {
}
}
// BenchmarkReadOutPoint performs a benchmark on how long it takes to read a
// transaction output point.
func BenchmarkReadOutPoint(b *testing.B) {
// BenchmarkReadOutpoint performs a benchmark on how long it takes to read a
// transaction outpoint.
func BenchmarkReadOutpoint(b *testing.B) {
buf := []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -183,22 +181,22 @@ func BenchmarkReadOutPoint(b *testing.B) {
0xff, 0xff, 0xff, 0xff, // Previous output index
}
r := bytes.NewReader(buf)
var op OutPoint
var op Outpoint
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
readOutPoint(r, 0, 0, &op)
readOutpoint(r, 0, 0, &op)
}
}
// BenchmarkWriteOutPoint performs a benchmark on how long it takes to write a
// transaction output point.
func BenchmarkWriteOutPoint(b *testing.B) {
op := &OutPoint{
// BenchmarkWriteOutpoint performs a benchmark on how long it takes to write a
// transaction outpoint.
func BenchmarkWriteOutpoint(b *testing.B) {
op := &Outpoint{
TxID: daghash.TxID{},
Index: 0,
}
for i := 0; i < b.N; i++ {
writeOutPoint(ioutil.Discard, 0, 0, op)
writeOutpoint(ioutil.Discard, 0, 0, op)
}
}
@@ -207,7 +205,7 @@ func BenchmarkWriteOutPoint(b *testing.B) {
func BenchmarkReadTxOut(b *testing.B) {
buf := []byte{
0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of pk script
0x43, // Varint for length of scriptPubKey
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
@@ -225,7 +223,7 @@ func BenchmarkReadTxOut(b *testing.B) {
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
readTxOut(r, 0, 0, &txOut)
scriptPool.Return(txOut.PkScript)
scriptPool.Return(txOut.ScriptPubKey)
}
}
@@ -285,7 +283,7 @@ func BenchmarkDeserializeTxSmall(b *testing.B) {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Sequence
0x01, // Varint for number of output transactions
0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of pk script
0x43, // Varint for length of scriptPubKey
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
@@ -311,8 +309,6 @@ func BenchmarkDeserializeTxSmall(b *testing.B) {
// BenchmarkDeserializeTxLarge performs a benchmark on how long it takes to
// deserialize a very large transaction.
func BenchmarkDeserializeTxLarge(b *testing.B) {
// tx bb41a757f405890fb0f5856228e23b715702d714d59bf2b1feb70d8b2b4e3e08
// from the main block chain.
fi, err := os.Open("testdata/megatx.bin.bz2")
if err != nil {
b.Fatalf("Failed to read transaction data: %v", err)
@@ -356,7 +352,7 @@ func BenchmarkReadBlockHeader(b *testing.B) {
0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // MerkleRoot
0x29, 0xab, 0x5f, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0x00, // TxnCount Varint
}
r := bytes.NewReader(buf)
@@ -376,230 +372,6 @@ func BenchmarkWriteBlockHeader(b *testing.B) {
}
}
// BenchmarkDecodeGetHeaders performs a benchmark on how long it takes to
// decode a getheaders message with the maximum number of block locator hashes.
func BenchmarkDecodeGetHeaders(b *testing.B) {
// Create a message with the maximum number of block locators.
pver := ProtocolVersion
var m MsgGetHeaders
for i := 0; i < MaxBlockLocatorsPerMsg; i++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
m.AddBlockLocatorHash(hash)
}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := m.BtcEncode(&bb, pver); err != nil {
b.Fatalf("MsgGetHeaders.BtcEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgGetHeaders
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.BtcDecode(r, pver)
}
}
// BenchmarkDecodeHeaders performs a benchmark on how long it takes to
// decode a headers message with the maximum number of headers and maximum number of
// parent hashes per header.
func BenchmarkDecodeHeaders(b *testing.B) {
// Create a message with the maximum number of headers.
pver := ProtocolVersion
var m MsgHeaders
for i := 0; i < MaxBlockHeadersPerMsg; i++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
parentHashes := make([]*daghash.Hash, MaxNumParentBlocks)
for j := byte(0); j < MaxNumParentBlocks; j++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x%x", i, j))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
parentHashes[i] = hash
}
m.AddBlockHeader(NewBlockHeader(1, parentHashes, hash, hash, 0, uint64(i)))
}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := m.BtcEncode(&bb, pver); err != nil {
b.Fatalf("MsgHeaders.BtcEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgHeaders
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.BtcDecode(r, pver)
}
}
// BenchmarkDecodeGetBlocks performs a benchmark on how long it takes to
// decode a getblocks message with the maximum number of block locator hashes.
func BenchmarkDecodeGetBlocks(b *testing.B) {
// Create a message with the maximum number of block locators.
pver := ProtocolVersion
var m MsgGetBlocks
for i := 0; i < MaxBlockLocatorsPerMsg; i++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
m.AddBlockLocatorHash(hash)
}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := m.BtcEncode(&bb, pver); err != nil {
b.Fatalf("MsgGetBlocks.BtcEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgGetBlocks
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.BtcDecode(r, pver)
}
}
// BenchmarkDecodeAddr performs a benchmark on how long it takes to decode an
// addr message with the maximum number of addresses.
func BenchmarkDecodeAddr(b *testing.B) {
// Create a message with the maximum number of addresses.
pver := ProtocolVersion
ip := net.ParseIP("127.0.0.1")
ma := NewMsgAddr(false, nil)
for port := uint16(0); port < MaxAddrPerMsg; port++ {
ma.AddAddress(NewNetAddressIPPort(ip, port, SFNodeNetwork))
}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := ma.BtcEncode(&bb, pver); err != nil {
b.Fatalf("MsgAddr.BtcEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgAddr
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.BtcDecode(r, pver)
}
}
// BenchmarkDecodeInv performs a benchmark on how long it takes to decode an inv
// message with the maximum number of entries.
func BenchmarkDecodeInv(b *testing.B) {
// Create a message with the maximum number of entries.
pver := ProtocolVersion
var m MsgInv
for i := 0; i < MaxInvPerMsg; i++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
m.AddInvVect(NewInvVect(InvTypeBlock, hash))
}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := m.BtcEncode(&bb, pver); err != nil {
b.Fatalf("MsgInv.BtcEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgInv
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.BtcDecode(r, pver)
}
}
// BenchmarkDecodeNotFound performs a benchmark on how long it takes to decode
// a notfound message with the maximum number of entries.
func BenchmarkDecodeNotFound(b *testing.B) {
// Create a message with the maximum number of entries.
pver := ProtocolVersion
var m MsgNotFound
for i := 0; i < MaxInvPerMsg; i++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
m.AddInvVect(NewInvVect(InvTypeBlock, hash))
}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := m.BtcEncode(&bb, pver); err != nil {
b.Fatalf("MsgNotFound.BtcEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgNotFound
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.BtcDecode(r, pver)
}
}
// BenchmarkDecodeMerkleBlock performs a benchmark on how long it takes to
// decode a reasonably sized merkleblock message.
func BenchmarkDecodeMerkleBlock(b *testing.B) {
// Create a message with random data.
pver := ProtocolVersion
var m MsgMerkleBlock
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", 10000))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
m.Header = *NewBlockHeader(1, []*daghash.Hash{hash}, hash, hash, 0, uint64(10000))
for i := 0; i < 105; i++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
m.AddTxHash(hash)
if i%8 == 0 {
m.Flags = append(m.Flags, uint8(i))
}
}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := m.BtcEncode(&bb, pver); err != nil {
b.Fatalf("MsgMerkleBlock.BtcEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgMerkleBlock
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.BtcDecode(r, pver)
}
}
// BenchmarkTxHash performs a benchmark on how long it takes to hash a
// transaction.
func BenchmarkTxHash(b *testing.B) {
@@ -639,3 +411,21 @@ func BenchmarkDoubleHashH(b *testing.B) {
_ = daghash.DoubleHashH(txBytes)
}
}
// BenchmarkDoubleHashWriter performs a benchmark on how long it takes to perform
// a double hash via the writer returning a daghash.Hash.
func BenchmarkDoubleHashWriter(b *testing.B) {
var buf bytes.Buffer
err := genesisCoinbaseTx.Serialize(&buf)
if err != nil {
b.Fatalf("Serialize: unexpected error: %+v", err)
}
txBytes := buf.Bytes()
b.ResetTimer()
for i := 0; i < b.N; i++ {
writer := daghash.NewDoubleHashWriter()
_, _ = writer.Write(txBytes)
writer.Finalize()
}
}

View File

@@ -2,23 +2,24 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"fmt"
"io"
"time"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
)
// BaseBlockHeaderPayload is the base number of bytes a block header can be,
// not including the list of parent block headers.
// Version 4 bytes + Timestamp 8 bytes + Bits 4 bytes + Nonce 8 bytes +
// + NumParentBlocks 1 byte + HashMerkleRoot hash + IDMerkleRoot hash.
// + NumParentBlocks 1 byte + HashMerkleRoot hash +
// + AcceptedIDMerkleRoot hash + UTXOCommitment hash.
// To get total size of block header len(ParentHashes) * daghash.HashSize should be
// added to this value
const BaseBlockHeaderPayload = 25 + 2*(daghash.HashSize)
const BaseBlockHeaderPayload = 25 + 3*(daghash.HashSize)
// MaxNumParentBlocks is the maximum number of parent blocks a block can reference.
// Currently set to 255 as the maximum number NumParentBlocks can be due to it being a byte
@@ -28,10 +29,10 @@ const MaxNumParentBlocks = 255
// BaseBlockHeaderPayload + up to MaxNumParentBlocks hashes of parent blocks
const MaxBlockHeaderPayload = BaseBlockHeaderPayload + (MaxNumParentBlocks * daghash.HashSize)
// BlockHeader defines information about a block and is used in the bitcoin
// BlockHeader defines information about a block and is used in the kaspa
// block (MsgBlock) and headers (MsgHeader) messages.
type BlockHeader struct {
// Version of the block. This is not the same as the protocol version.
// Version of the block. This is not the same as the protocol version.
Version int32
// Hashes of the parent block headers in the blockDAG.
@@ -40,11 +41,15 @@ type BlockHeader struct {
// HashMerkleRoot is the merkle tree reference to hash of all transactions for the block.
HashMerkleRoot *daghash.Hash
// IDMerkleRoot is the merkle tree reference to hash of all transactions' IDs for the block.
IDMerkleRoot *daghash.Hash
// AcceptedIDMerkleRoot is merkle tree reference to hash all transactions
// accepted form the block.Blues
AcceptedIDMerkleRoot *daghash.Hash
// UTXOCommitment is an ECMH UTXO commitment to the block UTXO.
UTXOCommitment *daghash.Hash
// Time the block was created.
Timestamp time.Time
Timestamp mstime.Time
// Difficulty target for the block.
Bits uint32
@@ -61,22 +66,18 @@ func (h *BlockHeader) NumParentBlocks() byte {
// BlockHash computes the block identifier hash for the given block header.
func (h *BlockHeader) BlockHash() *daghash.Hash {
// Encode the header and double sha256 everything prior to the number of
// transactions. Ignore the error returns since there is no way the
// encode could fail except being out of memory which would cause a
// run-time panic.
buf := bytes.NewBuffer(make([]byte, 0, BaseBlockHeaderPayload+h.NumParentBlocks()))
_ = writeBlockHeader(buf, 0, h)
return daghash.DoubleHashP(buf.Bytes())
}
// SelectedParentHash returns the hash of the selected block header.
func (h *BlockHeader) SelectedParentHash() *daghash.Hash {
if h.NumParentBlocks() == 0 {
return nil
// transactions.
writer := daghash.NewDoubleHashWriter()
err := writeBlockHeader(writer, 0, h)
if err != nil {
// It seems like this could only happen if the writer returned an error.
// and this writer should never return an error (no allocations or possible failures)
// the only non-writer error path here is unknown types in `WriteElement`
panic(fmt.Sprintf("BlockHash() failed. this should never fail unless BlockHeader was changed. err: %+v", err))
}
return h.ParentHashes[0]
res := writer.Finalize()
return &res
}
// IsGenesis returns true iff this block is a genesis block
@@ -84,19 +85,19 @@ func (h *BlockHeader) IsGenesis() bool {
return h.NumParentBlocks() == 0
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding block headers stored to disk, such as in a
// database, as opposed to decoding block headers from the wire.
func (h *BlockHeader) BtcDecode(r io.Reader, pver uint32) error {
// database, as opposed to decoding block headers from the appmessage.
func (h *BlockHeader) KaspaDecode(r io.Reader, pver uint32) error {
return readBlockHeader(r, pver, h)
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding block headers to be stored to disk, such as in a
// database, as opposed to encoding block headers for the wire.
func (h *BlockHeader) BtcEncode(w io.Writer, pver uint32) error {
// database, as opposed to encoding block headers for the appmessage.
func (h *BlockHeader) KaspaEncode(w io.Writer, pver uint32) error {
return writeBlockHeader(w, pver, h)
}
@@ -104,8 +105,8 @@ func (h *BlockHeader) BtcEncode(w io.Writer, pver uint32) error {
// that is suitable for long-term storage such as a database while respecting
// the Version field.
func (h *BlockHeader) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of readBlockHeader.
return readBlockHeader(r, 0, h)
}
@@ -114,8 +115,8 @@ func (h *BlockHeader) Deserialize(r io.Reader) error {
// that is suitable for long-term storage such as a database while respecting
// the Version field.
func (h *BlockHeader) Serialize(w io.Writer) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of writeBlockHeader.
return writeBlockHeader(w, 0, h)
}
@@ -127,27 +128,28 @@ func (h *BlockHeader) SerializeSize() int {
}
// NewBlockHeader returns a new BlockHeader using the provided version, previous
// block hash, hash merkle root, ID merkle root difficulty bits, and nonce used to generate the
// block hash, hash merkle root, accepted ID merkle root, difficulty bits, and nonce used to generate the
// block with defaults or calclulated values for the remaining fields.
func NewBlockHeader(version int32, parentHashes []*daghash.Hash, hashMerkleRoot *daghash.Hash,
idMerkleRoot *daghash.Hash, bits uint32, nonce uint64) *BlockHeader {
acceptedIDMerkleRoot *daghash.Hash, utxoCommitment *daghash.Hash, bits uint32, nonce uint64) *BlockHeader {
// Limit the timestamp to one second precision since the protocol
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
return &BlockHeader{
Version: version,
ParentHashes: parentHashes,
HashMerkleRoot: hashMerkleRoot,
IDMerkleRoot: idMerkleRoot,
Timestamp: time.Unix(time.Now().Unix(), 0),
Bits: bits,
Nonce: nonce,
Version: version,
ParentHashes: parentHashes,
HashMerkleRoot: hashMerkleRoot,
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
UTXOCommitment: utxoCommitment,
Timestamp: mstime.Now(),
Bits: bits,
Nonce: nonce,
}
}
// readBlockHeader reads a bitcoin block header from r. See Deserialize for
// readBlockHeader reads a kaspa block header from r. See Deserialize for
// decoding block headers stored to disk, such as in a database, as opposed to
// decoding from the wire.
// decoding from the appmessage.
func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
var numParentBlocks byte
err := readElements(r, &bh.Version, &numParentBlocks)
@@ -165,15 +167,17 @@ func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
bh.ParentHashes[i] = hash
}
bh.HashMerkleRoot = &daghash.Hash{}
bh.IDMerkleRoot = &daghash.Hash{}
return readElements(r, bh.HashMerkleRoot, bh.IDMerkleRoot, (*int64Time)(&bh.Timestamp), &bh.Bits, &bh.Nonce)
bh.AcceptedIDMerkleRoot = &daghash.Hash{}
bh.UTXOCommitment = &daghash.Hash{}
return readElements(r, bh.HashMerkleRoot, bh.AcceptedIDMerkleRoot, bh.UTXOCommitment,
(*int64Time)(&bh.Timestamp), &bh.Bits, &bh.Nonce)
}
// writeBlockHeader writes a bitcoin block header to w. See Serialize for
// writeBlockHeader writes a kaspa block header to w. See Serialize for
// encoding block headers to be stored to disk, such as in a database, as
// opposed to encoding for the wire.
// opposed to encoding for the appmessage.
func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error {
sec := int64(bh.Timestamp.Unix())
timestamp := bh.Timestamp.UnixMilliseconds()
if err := writeElements(w, bh.Version, bh.NumParentBlocks()); err != nil {
return err
}
@@ -182,6 +186,5 @@ func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error {
return err
}
}
return writeElements(w, bh.HashMerkleRoot, bh.IDMerkleRoot,
sec, bh.Bits, bh.Nonce)
return writeElements(w, bh.HashMerkleRoot, bh.AcceptedIDMerkleRoot, bh.UTXOCommitment, timestamp, bh.Bits, bh.Nonce)
}

View File

@@ -2,17 +2,16 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/random"
"reflect"
"testing"
"time"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util/random"
"github.com/davecgh/go-spew/spew"
)
// TestBlockHeader tests the BlockHeader API.
@@ -22,12 +21,12 @@ func TestBlockHeader(t *testing.T) {
t.Errorf("random.Uint64: Error generating nonce: %v", err)
}
hashes := []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash}
hashes := []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash}
merkleHash := mainNetGenesisMerkleRoot
idMerkleRoot := exampleIDMerkleRoot
merkleHash := mainnetGenesisMerkleRoot
acceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot
bits := uint32(0x1d00ffff)
bh := NewBlockHeader(1, hashes, merkleHash, idMerkleRoot, bits, nonce)
bh := NewBlockHeader(1, hashes, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce)
// Ensure we get the same data back out.
if !reflect.DeepEqual(bh.ParentHashes, hashes) {
@@ -48,33 +47,34 @@ func TestBlockHeader(t *testing.T) {
}
}
// TestBlockHeaderWire tests the BlockHeader wire encode and decode for various
// TestBlockHeaderEncoding tests the BlockHeader appmessage encode and decode for various
// protocol versions.
func TestBlockHeaderWire(t *testing.T) {
func TestBlockHeaderEncoding(t *testing.T) {
nonce := uint64(123123) // 0x000000000001e0f3
pver := ProtocolVersion
// baseBlockHdr is used in the various tests as a baseline BlockHeader.
bits := uint32(0x1d00ffff)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
HashMerkleRoot: mainNetGenesisMerkleRoot,
IDMerkleRoot: exampleIDMerkleRoot,
Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST
Bits: bits,
Nonce: nonce,
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment,
Timestamp: mstime.UnixMilliseconds(0x17315ed0f99),
Bits: bits,
Nonce: nonce,
}
// baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr.
// baseBlockHdrEncoded is the appmessage encoded bytes of baseBlockHdr.
baseBlockHdrEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25, // mainnetGenesisHash
0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,
0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1,
0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simnetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@@ -82,20 +82,24 @@ func TestBlockHeaderWire(t *testing.T) {
0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f,
0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a,
0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63, // IDMerkleRoot
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // AcceptedIDMerkleRoot
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x29, 0xab, 0x5f, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // UTXOCommitment
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
}
tests := []struct {
in *BlockHeader // Data to encode
out *BlockHeader // Expected decoded data
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded data
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
{
@@ -108,7 +112,7 @@ func TestBlockHeaderWire(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
var buf bytes.Buffer
err := writeBlockHeader(&buf, test.pver, test.in)
if err != nil {
@@ -122,18 +126,18 @@ func TestBlockHeaderWire(t *testing.T) {
}
buf.Reset()
err = test.in.BtcEncode(&buf, pver)
err = test.in.KaspaEncode(&buf, pver)
if err != nil {
t.Errorf("BtcEncode #%d error %v", i, err)
t.Errorf("KaspaEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("BtcEncode #%d\n got: %s want: %s", i,
t.Errorf("KaspaEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the block header from wire format.
// Decode the block header from appmessage format.
var bh BlockHeader
rbuf := bytes.NewReader(test.buf)
err = readBlockHeader(rbuf, test.pver, &bh)
@@ -148,13 +152,13 @@ func TestBlockHeaderWire(t *testing.T) {
}
rbuf = bytes.NewReader(test.buf)
err = bh.BtcDecode(rbuf, pver)
err = bh.KaspaDecode(rbuf, pver)
if err != nil {
t.Errorf("BtcDecode #%d error %v", i, err)
t.Errorf("KaspaDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&bh, test.out) {
t.Errorf("BtcDecode #%d\n got: %s want: %s", i,
t.Errorf("KaspaDecode #%d\n got: %s want: %s", i,
spew.Sdump(&bh), spew.Sdump(test.out))
continue
}
@@ -168,24 +172,25 @@ func TestBlockHeaderSerialize(t *testing.T) {
// baseBlockHdr is used in the various tests as a baseline BlockHeader.
bits := uint32(0x1d00ffff)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
HashMerkleRoot: mainNetGenesisMerkleRoot,
IDMerkleRoot: exampleIDMerkleRoot,
Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST
Bits: bits,
Nonce: nonce,
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment,
Timestamp: mstime.UnixMilliseconds(0x17315ed0f99),
Bits: bits,
Nonce: nonce,
}
// baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr.
// baseBlockHdrEncoded is the appmessage encoded bytes of baseBlockHdr.
baseBlockHdrEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25, // mainnetGenesisHash
0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,
0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1,
0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simnetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@@ -193,13 +198,17 @@ func TestBlockHeaderSerialize(t *testing.T) {
0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f,
0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a,
0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63, // IDMerkleRoot
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // AcceptedIDMerkleRoot
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x29, 0xab, 0x5f, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // UTXOCommitment
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
}
tests := []struct {
@@ -250,35 +259,37 @@ func TestBlockHeaderSerialize(t *testing.T) {
func TestBlockHeaderSerializeSize(t *testing.T) {
nonce := uint64(123123) // 0x1e0f3
bits := uint32(0x1d00ffff)
timestamp := time.Unix(0x495fab29, 0) // 2009-01-03 12:15:05 -0600 CST
timestamp := mstime.UnixMilliseconds(0x495fab29000)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
HashMerkleRoot: mainNetGenesisMerkleRoot,
IDMerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
genesisBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: mainNetGenesisMerkleRoot,
IDMerkleRoot: mainNetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
Version: 1,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
tests := []struct {
in *BlockHeader // Block header to encode
size int // Expected serialized size
}{
// Block with no transactions.
{genesisBlockHdr, 89},
{genesisBlockHdr, 121},
// First block in the mainnet block DAG.
{baseBlockHdr, 153},
{baseBlockHdr, 185},
}
t.Logf("Running %d tests", len(tests))
@@ -296,12 +307,12 @@ func TestBlockHeaderSerializeSize(t *testing.T) {
func TestIsGenesis(t *testing.T) {
nonce := uint64(123123) // 0x1e0f3
bits := uint32(0x1d00ffff)
timestamp := time.Unix(0x495fab29, 0) // 2009-01-03 12:15:05 -0600 CST
timestamp := mstime.UnixMilliseconds(0x495fab29000)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
HashMerkleRoot: mainNetGenesisMerkleRoot,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
@@ -309,7 +320,7 @@ func TestIsGenesis(t *testing.T) {
genesisBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: mainNetGenesisMerkleRoot,
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,

View File

@@ -2,23 +2,27 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"encoding/binary"
"fmt"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
"time"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util/binaryserializer"
"github.com/daglabs/btcd/util/subnetworkid"
)
// MaxVarIntPayload is the maximum payload size for a variable length integer.
const MaxVarIntPayload = 9
// MaxInvPerMsg is the maximum number of inventory vectors that can be in any type of kaspa inv message.
const MaxInvPerMsg = 1 << 17
var (
// littleEndian is a convenience variable since binary.LittleEndian is
// quite long.
@@ -34,10 +38,13 @@ var (
var errNonCanonicalVarInt = "non-canonical varint %x - discriminant %x must " +
"encode a value greater than %x"
// int64Time represents a unix timestamp encoded with an int64. It is used as
// a way to signal the readElement function how to decode a timestamp into a Go
// time.Time since it is otherwise ambiguous.
type int64Time time.Time
// errNoEncodingForType signifies that there's no encoding for the given type.
var errNoEncodingForType = errors.New("there's no encoding for this type")
// int64Time represents a unix timestamp with milliseconds precision encoded with
// an int64. It is used as a way to signal the readElement function how to decode
// a timestamp into a Go mstime.Time since it is otherwise ambiguous.
type int64Time mstime.Time
// ReadElement reads the next sequence of bytes from r using little endian
// depending on the concrete type of element pointed to.
@@ -77,6 +84,14 @@ func ReadElement(r io.Reader, element interface{}) error {
*e = rv
return nil
case *uint8:
rv, err := binaryserializer.Uint8(r)
if err != nil {
return err
}
*e = rv
return nil
case *bool:
rv, err := binaryserializer.Uint8(r)
if err != nil {
@@ -95,7 +110,7 @@ func ReadElement(r io.Reader, element interface{}) error {
if err != nil {
return err
}
*e = int64Time(time.Unix(int64(rv), 0))
*e = int64Time(mstime.UnixMilliseconds(int64(rv)))
return nil
// Message header checksum.
@@ -107,11 +122,12 @@ func ReadElement(r io.Reader, element interface{}) error {
return nil
// Message header command.
case *[CommandSize]uint8:
_, err := io.ReadFull(r, e[:])
case *MessageCommand:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = MessageCommand(rv)
return nil
// IP address.
@@ -129,6 +145,9 @@ func ReadElement(r io.Reader, element interface{}) error {
}
return nil
case *id.ID:
return e.Deserialize(r)
case *subnetworkid.SubnetworkID:
_, err := io.ReadFull(r, e[:])
if err != nil {
@@ -144,45 +163,19 @@ func ReadElement(r io.Reader, element interface{}) error {
*e = ServiceFlag(rv)
return nil
case *InvType:
case *KaspaNet:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = InvType(rv)
return nil
case *BitcoinNet:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = BitcoinNet(rv)
return nil
case *BloomUpdateType:
rv, err := binaryserializer.Uint8(r)
if err != nil {
return err
}
*e = BloomUpdateType(rv)
return nil
case *RejectCode:
rv, err := binaryserializer.Uint8(r)
if err != nil {
return err
}
*e = RejectCode(rv)
*e = KaspaNet(rv)
return nil
}
// Fall back to the slower binary.Read if a fast path was not available
// above.
return binary.Read(r, littleEndian, element)
return errors.Wrapf(errNoEncodingForType, "couldn't find a way to read type %T", element)
}
// readElements reads multiple items from r. It is equivalent to multiple
// readElements reads multiple items from r. It is equivalent to multiple
// calls to readElement.
func readElements(r io.Reader, elements ...interface{}) error {
for _, element := range elements {
@@ -227,6 +220,13 @@ func WriteElement(w io.Writer, element interface{}) error {
}
return nil
case uint8:
err := binaryserializer.PutUint8(w, e)
if err != nil {
return err
}
return nil
case bool:
var err error
if e {
@@ -248,8 +248,8 @@ func WriteElement(w io.Writer, element interface{}) error {
return nil
// Message header command.
case [CommandSize]uint8:
_, err := w.Write(e[:])
case MessageCommand:
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
@@ -270,6 +270,9 @@ func WriteElement(w io.Writer, element interface{}) error {
}
return nil
case *id.ID:
return e.Serialize(w)
case *subnetworkid.SubnetworkID:
_, err := w.Write(e[:])
if err != nil {
@@ -284,41 +287,18 @@ func WriteElement(w io.Writer, element interface{}) error {
}
return nil
case InvType:
case KaspaNet:
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
return nil
case BitcoinNet:
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
return nil
case BloomUpdateType:
err := binaryserializer.PutUint8(w, uint8(e))
if err != nil {
return err
}
return nil
case RejectCode:
err := binaryserializer.PutUint8(w, uint8(e))
if err != nil {
return err
}
return nil
}
// Fall back to the slower binary.Write if a fast path was not available
// above.
return binary.Write(w, littleEndian, element)
return errors.Wrapf(errNoEncodingForType, "couldn't find a way to write type %T", element)
}
// writeElements writes multiple items to w. It is equivalent to multiple
// writeElements writes multiple items to w. It is equivalent to multiple
// calls to writeElement.
func writeElements(w io.Writer, elements ...interface{}) error {
for _, element := range elements {
@@ -350,7 +330,7 @@ func ReadVarInt(r io.Reader) (uint64, error) {
// encoded using fewer bytes.
min := uint64(0x100000000)
if rv < min {
return 0, messageError("ReadVarInt", fmt.Sprintf(
return 0, messageError("readVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
@@ -365,7 +345,7 @@ func ReadVarInt(r io.Reader) (uint64, error) {
// encoded using fewer bytes.
min := uint64(0x10000)
if rv < min {
return 0, messageError("ReadVarInt", fmt.Sprintf(
return 0, messageError("readVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
@@ -380,7 +360,7 @@ func ReadVarInt(r io.Reader) (uint64, error) {
// encoded using fewer bytes.
min := uint64(0xfd)
if rv < min {
return 0, messageError("ReadVarInt", fmt.Sprintf(
return 0, messageError("readVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
@@ -445,9 +425,9 @@ func VarIntSerializeSize(val uint64) int {
}
// ReadVarString reads a variable length string from r and returns it as a Go
// string. A variable length string is encoded as a variable length integer
// string. A variable length string is encoded as a variable length integer
// containing the length of the string followed by the bytes that represent the
// string itself. An error is returned if the length is greater than the
// string itself. An error is returned if the length is greater than the
// maximum block payload size since it helps protect against memory exhaustion
// attacks and forced panics through malformed messages.
func ReadVarString(r io.Reader, pver uint32) (string, error) {
@@ -457,7 +437,7 @@ func ReadVarString(r io.Reader, pver uint32) (string, error) {
}
// Prevent variable length strings that are larger than the maximum
// message size. It would be possible to cause memory exhaustion and
// message size. It would be possible to cause memory exhaustion and
// panics without a sane upper bound on this count.
if count > MaxMessagePayload {
str := fmt.Sprintf("variable length string is too long "+
@@ -485,11 +465,11 @@ func WriteVarString(w io.Writer, str string) error {
return err
}
// ReadVarBytes reads a variable length byte array. A byte array is encoded
// ReadVarBytes reads a variable length byte array. A byte array is encoded
// as a varInt containing the length of the array followed by the bytes
// themselves. An error is returned if the length is greater than the
// themselves. An error is returned if the length is greater than the
// passed maxAllowed parameter which helps protect against memory exhaustion
// attacks and forced panics through malformed messages. The fieldName
// attacks and forced panics through malformed messages. The fieldName
// parameter is only used for the error message so it provides more context in
// the error.
func ReadVarBytes(r io.Reader, pver uint32, maxAllowed uint32,
@@ -500,7 +480,7 @@ func ReadVarBytes(r io.Reader, pver uint32, maxAllowed uint32,
return nil, err
}
// Prevent byte array larger than the max message size. It would
// Prevent byte array larger than the max message size. It would
// be possible to cause memory exhaustion and panics without a sane
// upper bound on this count.
if count > uint64(maxAllowed) {

View File

@@ -2,62 +2,68 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"github.com/pkg/errors"
"io"
"reflect"
"strings"
"testing"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
)
// mainNetGenesisHash is the hash of the first block in the block chain for the
// mainnetGenesisHash is the hash of the first block in the block DAG for the
// main network (genesis block).
var mainNetGenesisHash = &daghash.Hash{ // Make go vet happy.
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
var mainnetGenesisHash = &daghash.Hash{
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25,
0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,
0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1,
0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,
}
// simNetGenesisHash is the hash of the first block in the block chain for the
// simnetGenesisHash is the hash of the first block in the block DAG for the
// simulation test network.
var simNetGenesisHash = &daghash.Hash{ // Make go vet happy.
var simnetGenesisHash = &daghash.Hash{
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a,
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
}
// mainNetGenesisMerkleRoot is the hash of the first transaction in the genesis
// mainnetGenesisMerkleRoot is the hash of the first transaction in the genesis
// block for the main network.
var mainNetGenesisMerkleRoot = &daghash.Hash{ // Make go vet happy.
var mainnetGenesisMerkleRoot = &daghash.Hash{
0x4a, 0x5e, 0x1e, 0x4b, 0xaa, 0xb8, 0x9f, 0x3a,
0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f,
0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a,
0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b,
}
var exampleIDMerkleRoot = &daghash.Hash{
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
var exampleAcceptedIDMerkleRoot = &daghash.Hash{
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C,
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
}
// TestElementWire tests wire encode and decode for various element types. This
var exampleUTXOCommitment = &daghash.Hash{
0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C,
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
}
// TestElementEncoding tests appmessage encode and decode for various element types. This
// is mainly to test the "fast" paths in readElement and writeElement which use
// type assertions to avoid reflection when possible.
func TestElementWire(t *testing.T) {
type writeElementReflect int32
func TestElementEncoding(t *testing.T) {
tests := []struct {
in interface{} // Value to encode
buf []byte // Wire encoding
buf []byte // Encoded value
}{
{int32(1), []byte{0x01, 0x00, 0x00, 0x00}},
{uint32(256), []byte{0x00, 0x01, 0x00, 0x00}},
@@ -82,13 +88,9 @@ func TestElementWire(t *testing.T) {
[]byte{0x01, 0x02, 0x03, 0x04},
},
{
[CommandSize]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c,
},
MessageCommand(0x10),
[]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c,
0x10, 0x00, 0x00, 0x00,
},
},
{
@@ -102,7 +104,7 @@ func TestElementWire(t *testing.T) {
},
},
{
(*daghash.Hash)(&[daghash.HashSize]byte{ // Make go vet happy.
(*daghash.Hash)(&[daghash.HashSize]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
@@ -120,23 +122,14 @@ func TestElementWire(t *testing.T) {
[]byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
},
{
InvType(InvTypeTx),
[]byte{0x01, 0x00, 0x00, 0x00},
},
{
BitcoinNet(MainNet),
[]byte{0xf9, 0xbe, 0xb4, 0xd9},
},
// Type not supported by the "fast" path and requires reflection.
{
writeElementReflect(1),
[]byte{0x01, 0x00, 0x00, 0x00},
KaspaNet(Mainnet),
[]byte{0x1d, 0xf7, 0xdc, 0x3d},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Write to wire format.
// Write to appmessage format.
var buf bytes.Buffer
err := WriteElement(&buf, test.in)
if err != nil {
@@ -149,7 +142,7 @@ func TestElementWire(t *testing.T) {
continue
}
// Read from wire format.
// Read from appmessage format.
rbuf := bytes.NewReader(test.buf)
val := test.in
if reflect.ValueOf(test.in).Kind() != reflect.Ptr {
@@ -172,9 +165,11 @@ func TestElementWire(t *testing.T) {
}
}
// TestElementWireErrors performs negative tests against wire encode and decode
// TestElementEncodingErrors performs negative tests against appmessage encode and decode
// of various element types to confirm error paths work correctly.
func TestElementWireErrors(t *testing.T) {
func TestElementEncodingErrors(t *testing.T) {
type writeElementReflect int32
tests := []struct {
in interface{} // Value to encode
max int // Max size of fixed buffer to induce errors
@@ -187,10 +182,7 @@ func TestElementWireErrors(t *testing.T) {
{true, 0, io.ErrShortWrite, io.EOF},
{[4]byte{0x01, 0x02, 0x03, 0x04}, 0, io.ErrShortWrite, io.EOF},
{
[CommandSize]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c,
},
MessageCommand(10),
0, io.ErrShortWrite, io.EOF,
},
{
@@ -201,7 +193,7 @@ func TestElementWireErrors(t *testing.T) {
0, io.ErrShortWrite, io.EOF,
},
{
(*daghash.Hash)(&[daghash.HashSize]byte{ // Make go vet happy.
(*daghash.Hash)(&[daghash.HashSize]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
@@ -210,29 +202,30 @@ func TestElementWireErrors(t *testing.T) {
0, io.ErrShortWrite, io.EOF,
},
{ServiceFlag(SFNodeNetwork), 0, io.ErrShortWrite, io.EOF},
{InvType(InvTypeTx), 0, io.ErrShortWrite, io.EOF},
{BitcoinNet(MainNet), 0, io.ErrShortWrite, io.EOF},
{KaspaNet(Mainnet), 0, io.ErrShortWrite, io.EOF},
// Type with no supported encoding.
{writeElementReflect(0), 0, errNoEncodingForType, errNoEncodingForType},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteElement(w, test.in)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("writeElement #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
// Decode from appmessage format.
r := newFixedReader(test.max, nil)
val := test.in
if reflect.ValueOf(test.in).Kind() != reflect.Ptr {
val = reflect.New(reflect.TypeOf(test.in)).Interface()
}
err = ReadElement(r, val)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("readElement #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -240,43 +233,42 @@ func TestElementWireErrors(t *testing.T) {
}
}
// TestVarIntWire tests wire encode and decode for variable length integers.
func TestVarIntWire(t *testing.T) {
// TestVarIntEncoding tests appmessage encode and decode for variable length integers.
func TestVarIntEncoding(t *testing.T) {
tests := []struct {
in uint64 // Value to encode
out uint64 // Expected decoded value
buf []byte // Wire encoding
value uint64 // Value to encode
buf []byte // Encoded value
}{
// Latest protocol version.
// Single byte
{0, 0, []byte{0x00}},
{0, []byte{0x00}},
// Max single byte
{0xfc, 0xfc, []byte{0xfc}},
{0xfc, []byte{0xfc}},
// Min 2-byte
{0xfd, 0xfd, []byte{0xfd, 0x0fd, 0x00}},
{0xfd, []byte{0xfd, 0x0fd, 0x00}},
// Max 2-byte
{0xffff, 0xffff, []byte{0xfd, 0xff, 0xff}},
{0xffff, []byte{0xfd, 0xff, 0xff}},
// Min 4-byte
{0x10000, 0x10000, []byte{0xfe, 0x00, 0x00, 0x01, 0x00}},
{0x10000, []byte{0xfe, 0x00, 0x00, 0x01, 0x00}},
// Max 4-byte
{0xffffffff, 0xffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff}},
{0xffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff}},
// Min 8-byte
{
0x100000000, 0x100000000,
0x100000000,
[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00},
},
// Max 8-byte
{
0xffffffffffffffff, 0xffffffffffffffff,
0xffffffffffffffff,
[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
var buf bytes.Buffer
err := WriteVarInt(&buf, test.in)
// Encode to appmessage format.
buf := &bytes.Buffer{}
err := WriteVarInt(buf, test.value)
if err != nil {
t.Errorf("WriteVarInt #%d error %v", i, err)
continue
@@ -287,27 +279,27 @@ func TestVarIntWire(t *testing.T) {
continue
}
// Decode from wire format.
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
val, err := ReadVarInt(rbuf)
if err != nil {
t.Errorf("ReadVarInt #%d error %v", i, err)
continue
}
if val != test.out {
t.Errorf("ReadVarInt #%d\n got: %d want: %d", i,
val, test.out)
if val != test.value {
t.Errorf("ReadVarInt #%d\n got: %x want: %x", i,
val, test.value)
continue
}
}
}
// TestVarIntWireErrors performs negative tests against wire encode and decode
// TestVarIntEncodingErrors performs negative tests against appmessage encode and decode
// of variable length integers to confirm error paths work correctly.
func TestVarIntWireErrors(t *testing.T) {
func TestVarIntEncodingErrors(t *testing.T) {
tests := []struct {
in uint64 // Value to encode
buf []byte // Wire encoding
buf []byte // Encoded value
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
@@ -327,19 +319,19 @@ func TestVarIntWireErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteVarInt(w, test.in)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("WriteVarInt #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
// Decode from appmessage format.
r := newFixedReader(test.max, test.buf)
_, err = ReadVarInt(r)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("ReadVarInt #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -355,7 +347,7 @@ func TestVarIntNonCanonical(t *testing.T) {
tests := []struct {
name string // Test name for easier identification
in []byte // Value to decode
pver uint32 // Protocol version for wire encoding
pver uint32 // Protocol version for appmessage encoding
}{
{
"0 encoded with 3 bytes", []byte{0xfd, 0x00, 0x00},
@@ -387,10 +379,10 @@ func TestVarIntNonCanonical(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from wire format.
// Decode from appmessage format.
rbuf := bytes.NewReader(test.in)
val, err := ReadVarInt(rbuf)
if _, ok := err.(*MessageError); !ok {
if msgErr := &(MessageError{}); !errors.As(err, &msgErr) {
t.Errorf("ReadVarInt #%d (%s) unexpected error %v", i,
test.name, err)
continue
@@ -403,7 +395,7 @@ func TestVarIntNonCanonical(t *testing.T) {
}
}
// TestVarIntWire tests the serialize size for variable length integers.
// TestVarIntEncoding tests the serialize size for variable length integers.
func TestVarIntSerializeSize(t *testing.T) {
tests := []struct {
val uint64 // Value to get the serialized size for
@@ -438,8 +430,8 @@ func TestVarIntSerializeSize(t *testing.T) {
}
}
// TestVarStringWire tests wire encode and decode for variable length strings.
func TestVarStringWire(t *testing.T) {
// TestVarStringEncoding tests appmessage encode and decode for variable length strings.
func TestVarStringEncoding(t *testing.T) {
pver := ProtocolVersion
// str256 is a string that takes a 2-byte varint to encode.
@@ -448,8 +440,8 @@ func TestVarStringWire(t *testing.T) {
tests := []struct {
in string // String to encode
out string // String to decoded value
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
// Empty string
@@ -462,7 +454,7 @@ func TestVarStringWire(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
var buf bytes.Buffer
err := WriteVarString(&buf, test.in)
if err != nil {
@@ -475,7 +467,7 @@ func TestVarStringWire(t *testing.T) {
continue
}
// Decode from wire format.
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
val, err := ReadVarString(rbuf, test.pver)
if err != nil {
@@ -490,9 +482,9 @@ func TestVarStringWire(t *testing.T) {
}
}
// TestVarStringWireErrors performs negative tests against wire encode and
// TestVarStringEncodingErrors performs negative tests against appmessage encode and
// decode of variable length strings to confirm error paths work correctly.
func TestVarStringWireErrors(t *testing.T) {
func TestVarStringEncodingErrors(t *testing.T) {
pver := ProtocolVersion
// str256 is a string that takes a 2-byte varint to encode.
@@ -500,8 +492,8 @@ func TestVarStringWireErrors(t *testing.T) {
tests := []struct {
in string // Value to encode
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
@@ -517,19 +509,19 @@ func TestVarStringWireErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteVarString(w, test.in)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("WriteVarString #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
// Decode from appmessage format.
r := newFixedReader(test.max, test.buf)
_, err = ReadVarString(r, test.pver)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("ReadVarString #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -539,14 +531,14 @@ func TestVarStringWireErrors(t *testing.T) {
// TestVarStringOverflowErrors performs tests to ensure deserializing variable
// length strings intentionally crafted to use large values for the string
// length are handled properly. This could otherwise potentially be used as an
// length are handled properly. This could otherwise potentially be used as an
// attack vector.
func TestVarStringOverflowErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
err error // Expected error
}{
{[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
@@ -557,7 +549,7 @@ func TestVarStringOverflowErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from wire format.
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
_, err := ReadVarString(rbuf, test.pver)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
@@ -569,8 +561,8 @@ func TestVarStringOverflowErrors(t *testing.T) {
}
// TestVarBytesWire tests wire encode and decode for variable length byte array.
func TestVarBytesWire(t *testing.T) {
// TestVarBytesEncoding tests appmessage encode and decode for variable length byte array.
func TestVarBytesEncoding(t *testing.T) {
pver := ProtocolVersion
// bytes256 is a byte array that takes a 2-byte varint to encode.
@@ -578,8 +570,8 @@ func TestVarBytesWire(t *testing.T) {
tests := []struct {
in []byte // Byte Array to write
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
// Empty byte array
@@ -592,7 +584,7 @@ func TestVarBytesWire(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
var buf bytes.Buffer
err := WriteVarBytes(&buf, test.pver, test.in)
if err != nil {
@@ -605,7 +597,7 @@ func TestVarBytesWire(t *testing.T) {
continue
}
// Decode from wire format.
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
val, err := ReadVarBytes(rbuf, test.pver, MaxMessagePayload,
"test payload")
@@ -621,9 +613,9 @@ func TestVarBytesWire(t *testing.T) {
}
}
// TestVarBytesWireErrors performs negative tests against wire encode and
// TestVarBytesEncodingErrors performs negative tests against appmessage encode and
// decode of variable length byte arrays to confirm error paths work correctly.
func TestVarBytesWireErrors(t *testing.T) {
func TestVarBytesEncodingErrors(t *testing.T) {
pver := ProtocolVersion
// bytes256 is a byte array that takes a 2-byte varint to encode.
@@ -631,8 +623,8 @@ func TestVarBytesWireErrors(t *testing.T) {
tests := []struct {
in []byte // Byte Array to write
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
@@ -648,20 +640,20 @@ func TestVarBytesWireErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteVarBytes(w, test.pver, test.in)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("WriteVarBytes #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
// Decode from appmessage format.
r := newFixedReader(test.max, test.buf)
_, err = ReadVarBytes(r, test.pver, MaxMessagePayload,
"test payload")
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("ReadVarBytes #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -671,14 +663,14 @@ func TestVarBytesWireErrors(t *testing.T) {
// TestVarBytesOverflowErrors performs tests to ensure deserializing variable
// length byte arrays intentionally crafted to use large values for the array
// length are handled properly. This could otherwise potentially be used as an
// length are handled properly. This could otherwise potentially be used as an
// attack vector.
func TestVarBytesOverflowErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
err error // Expected error
}{
{[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
@@ -689,7 +681,7 @@ func TestVarBytesOverflowErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from wire format.
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
_, err := ReadVarBytes(rbuf, test.pver, MaxMessagePayload,
"test payload")

133
app/appmessage/doc.go Normal file
View File

@@ -0,0 +1,133 @@
/*
Package appmessage implements the kaspa appmessage protocol.
At a high level, this package provides support for marshalling and unmarshalling
supported kaspa messages to and from the appmessage. This package does not deal
with the specifics of message handling such as what to do when a message is
received. This provides the caller with a high level of flexibility.
Kaspa Message Overview
The kaspa protocol consists of exchanging messages between peers. Each
message is preceded by a header which identifies information about it such as
which kaspa network it is a part of, its type, how big it is, and a checksum
to verify validity. All encoding and decoding of message headers is handled by
this package.
To accomplish this, there is a generic interface for kaspa messages named
Message which allows messages of any type to be read, written, or passed around
through channels, functions, etc. In addition, concrete implementations of most
of the currently supported kaspa messages are provided. For these supported
messages, all of the details of marshalling and unmarshalling to and from the
appmessage using kaspa encoding are handled so the caller doesn't have to concern
themselves with the specifics.
Message Interaction
The following provides a quick summary of how the kaspa messages are intended
to interact with one another. As stated above, these interactions are not
directly handled by this package.
The initial handshake consists of two peers sending each other a version message
(MsgVersion) followed by responding with a verack message (MsgVerAck). Both
peers use the information in the version message (MsgVersion) to negotiate
things such as protocol version and supported services with each other. Once
the initial handshake is complete, the following chart indicates message
interactions in no particular order.
Peer A Sends Peer B Responds
----------------------------------------------------------------------------
getaddr message (MsgRequestAddresses) addr message (MsgAddresses)
getblockinvs message (MsgGetBlockInvs) inv message (MsgInv)
inv message (MsgInv) getdata message (MsgGetData)
getdata message (MsgGetData) block message (MsgBlock) -or-
tx message (MsgTx) -or-
notfound message (MsgNotFound)
ping message (MsgPing) pong message (MsgPong)
Common Parameters
There are several common parameters that arise when using this package to read
and write kaspa messages. The following sections provide a quick overview of
these parameters so the next sections can build on them.
Protocol Version
The protocol version should be negotiated with the remote peer at a higher
level than this package via the version (MsgVersion) message exchange, however,
this package provides the appmessage.ProtocolVersion constant which indicates the
latest protocol version this package supports and is typically the value to use
for all outbound connections before a potentially lower protocol version is
negotiated.
Kaspa Network
The kaspa network is a magic number which is used to identify the start of a
message and which kaspa network the message applies to. This package provides
the following constants:
appmessage.Mainnet
appmessage.Testnet (Test network)
appmessage.Simnet (Simulation test network)
appmessage.Devnet (Development network)
Determining Message Type
As discussed in the kaspa message overview section, this package reads
and writes kaspa messages using a generic interface named Message. In
order to determine the actual concrete type of the message, use a type
switch or type assertion. An example of a type switch follows:
// Assumes msg is already a valid concrete message such as one created
// via NewMsgVersion or read via ReadMessage.
switch msg := msg.(type) {
case *appmessage.MsgVersion:
// The message is a pointer to a MsgVersion struct.
fmt.Printf("Protocol version: %d", msg.ProtocolVersion)
case *appmessage.MsgBlock:
// The message is a pointer to a MsgBlock struct.
fmt.Printf("Number of tx in block: %d", msg.Header.TxnCount)
}
Reading Messages
In order to unmarshall kaspa messages from the appmessage, use the ReadMessage
function. It accepts any io.Reader, but typically this will be a net.Conn to
a remote node running a kaspa peer. Example syntax is:
// Reads and validates the next kaspa message from conn using the
// protocol version pver and the kaspa network kaspaNet. The returns
// are a appmessage.Message, a []byte which contains the unmarshalled
// raw payload, and a possible error.
msg, rawPayload, err := appmessage.ReadMessage(conn, pver, kaspaNet)
if err != nil {
// Log and handle the error
}
Writing Messages
In order to marshall kaspa messages to the appmessage, use the WriteMessage
function. It accepts any io.Writer, but typically this will be a net.Conn to
a remote node running a kaspa peer. Example syntax to request addresses
from a remote peer is:
// Create a new getaddr kaspa message.
msg := appmessage.NewMsgRequestAddresses()
// Writes a kaspa message msg to conn using the protocol version
// pver, and the kaspa network kaspaNet. The return is a possible
// error.
err := appmessage.WriteMessage(conn, msg, pver, kaspaNet)
if err != nil {
// Log and handle the error
}
Errors
Errors returned by this package are either the raw errors provided by underlying
calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and
io.ErrShortWrite, or of type appmessage.MessageError. This allows the caller to
differentiate between general IO errors and malformed messages through type
assertions.
*/
package appmessage

View File

@@ -2,14 +2,14 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"fmt"
)
// MessageError describes an issue with a message.
// An example of some potential issues are messages from the wrong bitcoin
// An example of some potential issues are messages from the wrong kaspa
// network, invalid commands, mismatched checksums, and exceeding max payloads.
//
// This provides a mechanism for the caller to type assert the error to

View File

@@ -2,32 +2,32 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import "io"
// fakeMessage implements the Message interface and is used to force encode
// errors in messages.
type fakeMessage struct {
command string
command MessageCommand
payload []byte
forceEncodeErr bool
forceLenErr bool
}
// BtcDecode doesn't do anything. It just satisfies the wire.Message
// KaspaDecode doesn't do anything. It just satisfies the appmessage.Message
// interface.
func (msg *fakeMessage) BtcDecode(r io.Reader, pver uint32) error {
func (msg *fakeMessage) KaspaDecode(r io.Reader, pver uint32) error {
return nil
}
// BtcEncode writes the payload field of the fake message or forces an error
// if the forceEncodeErr flag of the fake message is set. It also satisfies the
// wire.Message interface.
func (msg *fakeMessage) BtcEncode(w io.Writer, pver uint32) error {
// KaspaEncode writes the payload field of the fake message or forces an error
// if the forceEncodeErr flag of the fake message is set. It also satisfies the
// appmessage.Message interface.
func (msg *fakeMessage) KaspaEncode(w io.Writer, pver uint32) error {
if msg.forceEncodeErr {
err := &MessageError{
Func: "fakeMessage.BtcEncode",
Func: "fakeMessage.KaspaEncode",
Description: "intentional error",
}
return err
@@ -39,12 +39,12 @@ func (msg *fakeMessage) BtcEncode(w io.Writer, pver uint32) error {
// Command returns the command field of the fake message and satisfies the
// Message interface.
func (msg *fakeMessage) Command() string {
func (msg *fakeMessage) Command() MessageCommand {
return msg.command
}
// MaxPayloadLength returns the length of the payload field of fake message
// or a smaller value if the forceLenErr flag of the fake message is set. It
// or a smaller value if the forceLenErr flag of the fake message is set. It
// satisfies the Message interface.
func (msg *fakeMessage) MaxPayloadLength(pver uint32) uint32 {
lenp := uint32(len(msg.payload))

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
@@ -16,7 +16,7 @@ type fixedWriter struct {
pos int
}
// Write writes the contents of p to w. When the contents of p would cause
// Write writes the contents of p to w. When the contents of p would cause
// the writer to exceed the maximum allowed size of the fixed writer,
// io.ErrShortWrite is returned and the writer is left unchanged.
//
@@ -52,7 +52,7 @@ type fixedReader struct {
iobuf *bytes.Buffer
}
// Read reads the next len(p) bytes from the fixed reader. When the number of
// Read reads the next len(p) bytes from the fixed reader. When the number of
// bytes read would exceed the maximum number of allowed bytes to be read from
// the fixed writer, an error is returned.
//

89
app/appmessage/message.go Normal file
View File

@@ -0,0 +1,89 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"fmt"
"time"
)
// MaxMessagePayload is the maximum bytes a message can be regardless of other
// individual limits imposed by messages themselves.
const MaxMessagePayload = (1024 * 1024 * 32) // 32MB
// MessageCommand is a number in the header of a message that represents its type.
type MessageCommand uint32
func (cmd MessageCommand) String() string {
cmdString, ok := MessageCommandToString[cmd]
if !ok {
cmdString = "unknown command"
}
return fmt.Sprintf("%s [code %d]", cmdString, uint8(cmd))
}
// Commands used in kaspa message headers which describe the type of message.
const (
CmdVersion MessageCommand = iota
CmdVerAck
CmdRequestAddresses
CmdAddresses
CmdRequestIBDBlocks
CmdBlock
CmdTx
CmdPing
CmdPong
CmdRequestBlockLocator
CmdBlockLocator
CmdSelectedTip
CmdRequestSelectedTip
CmdInvRelayBlock
CmdRequestRelayBlocks
CmdInvTransaction
CmdRequestTransactions
CmdIBDBlock
CmdRequestNextIBDBlocks
CmdDoneIBDBlocks
CmdTransactionNotFound
CmdReject
)
// MessageCommandToString maps all MessageCommands to their string representation
var MessageCommandToString = map[MessageCommand]string{
CmdVersion: "Version",
CmdVerAck: "VerAck",
CmdRequestAddresses: "RequestAddresses",
CmdAddresses: "Addresses",
CmdRequestIBDBlocks: "RequestBlocks",
CmdBlock: "Block",
CmdTx: "Tx",
CmdPing: "Ping",
CmdPong: "Pong",
CmdRequestBlockLocator: "RequestBlockLocator",
CmdBlockLocator: "BlockLocator",
CmdSelectedTip: "SelectedTip",
CmdRequestSelectedTip: "RequestSelectedTip",
CmdInvRelayBlock: "InvRelayBlock",
CmdRequestRelayBlocks: "RequestRelayBlocks",
CmdInvTransaction: "InvTransaction",
CmdRequestTransactions: "RequestTransactions",
CmdIBDBlock: "IBDBlock",
CmdRequestNextIBDBlocks: "RequestNextIBDBlocks",
CmdDoneIBDBlocks: "DoneIBDBlocks",
CmdTransactionNotFound: "TransactionNotFound",
CmdReject: "Reject",
}
// Message is an interface that describes a kaspa message. A type that
// implements Message has complete control over the representation of its data
// and may therefore contain additional or fewer fields than those which
// are used directly in the protocol encoded message.
type Message interface {
Command() MessageCommand
MessageNumber() uint64
SetMessageNumber(index uint64)
ReceivedAt() time.Time
SetReceivedAt(receivedAt time.Time)
}

View File

@@ -0,0 +1,75 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"fmt"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// MaxAddressesPerMsg is the maximum number of addresses that can be in a single
// kaspa Addresses message (MsgAddresses).
const MaxAddressesPerMsg = 1000
// MsgAddresses implements the Message interface and represents a kaspa
// Addresses message. It is used to provide a list of known active peers on the
// network. An active peer is considered one that has transmitted a message
// within the last 3 hours. Nodes which have not transmitted in that time
// frame should be forgotten. Each message is limited to a maximum number of
// addresses, which is currently 1000. As a result, multiple messages must
// be used to relay the full list.
//
// Use the AddAddress function to build up the list of known addresses when
// sending an Addresses message to another peer.
type MsgAddresses struct {
baseMessage
IncludeAllSubnetworks bool
SubnetworkID *subnetworkid.SubnetworkID
AddrList []*NetAddress
}
// AddAddress adds a known active peer to the message.
func (msg *MsgAddresses) AddAddress(na *NetAddress) error {
if len(msg.AddrList)+1 > MaxAddressesPerMsg {
str := fmt.Sprintf("too many addresses in message [max %d]",
MaxAddressesPerMsg)
return messageError("MsgAddresses.AddAddress", str)
}
msg.AddrList = append(msg.AddrList, na)
return nil
}
// AddAddresses adds multiple known active peers to the message.
func (msg *MsgAddresses) AddAddresses(netAddrs ...*NetAddress) error {
for _, na := range netAddrs {
err := msg.AddAddress(na)
if err != nil {
return err
}
}
return nil
}
// ClearAddresses removes all addresses from the message.
func (msg *MsgAddresses) ClearAddresses() {
msg.AddrList = []*NetAddress{}
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgAddresses) Command() MessageCommand {
return CmdAddresses
}
// NewMsgAddresses returns a new kaspa Addresses message that conforms to the
// Message interface. See MsgAddresses for details.
func NewMsgAddresses(includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) *MsgAddresses {
return &MsgAddresses{
IncludeAllSubnetworks: includeAllSubnetworks,
SubnetworkID: subnetworkID,
AddrList: make([]*NetAddress, 0, MaxAddressesPerMsg),
}
}

View File

@@ -0,0 +1,58 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"net"
"testing"
"github.com/davecgh/go-spew/spew"
)
// TestAddresses tests the MsgAddresses API.
func TestAddresses(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(3)
msg := NewMsgAddresses(false, nil)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgAddresses: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure NetAddresses are added properly.
tcpAddr := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
na := NewNetAddress(tcpAddr, SFNodeNetwork)
err := msg.AddAddress(na)
if err != nil {
t.Errorf("AddAddress: %v", err)
}
if msg.AddrList[0] != na {
t.Errorf("AddAddress: wrong address added - got %v, want %v",
spew.Sprint(msg.AddrList[0]), spew.Sprint(na))
}
// Ensure the address list is cleared properly.
msg.ClearAddresses()
if len(msg.AddrList) != 0 {
t.Errorf("ClearAddresses: address list is not empty - "+
"got %v [%v], want %v", len(msg.AddrList),
spew.Sprint(msg.AddrList[0]), 0)
}
// Ensure adding more than the max allowed addresses per message returns
// error.
for i := 0; i < MaxAddressesPerMsg+1; i++ {
err = msg.AddAddress(na)
}
if err == nil {
t.Errorf("AddAddress: expected error on too many addresses " +
"not received")
}
err = msg.AddAddresses(na)
if err == nil {
t.Errorf("AddAddresses: expected error on too many addresses " +
"not received")
}
}

View File

@@ -2,34 +2,34 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"fmt"
"io"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/kaspanet/kaspad/util/daghash"
)
// defaultTransactionAlloc is the default size used for the backing array
// for transactions. The transaction array will dynamically grow as needed, but
// for transactions. The transaction array will dynamically grow as needed, but
// this figure is intended to provide enough space for the number of
// transactions in the vast majority of blocks without needing to grow the
// backing array multiple times.
const defaultTransactionAlloc = 2048
// MaxBlocksPerMsg is the maximum number of blocks allowed per message.
const MaxBlocksPerMsg = 500
// MaxMassPerBlock is the maximum total transaction mass a block may have.
const MaxMassPerBlock = 10000000
// MaxBlockPayload is the maximum bytes a block message can be in bytes.
const MaxBlockPayload = 1000000
// MaxMassPerTx is the maximum total mass a transaction may have.
const MaxMassPerTx = MaxMassPerBlock / 2
// maxTxPerBlock is the maximum number of transactions that could
// MaxTxPerBlock is the maximum number of transactions that could
// possibly fit into a block.
const maxTxPerBlock = (MaxBlockPayload / minTxPayload) + 1
const MaxTxPerBlock = (MaxMassPerBlock / minTxPayload) + 1
// TxLoc holds locator data for the offset and length of where a transaction is
// located within a MsgBlock data buffer.
@@ -38,10 +38,11 @@ type TxLoc struct {
TxLen int
}
// MsgBlock implements the Message interface and represents a bitcoin
// block message. It is used to deliver block and transaction information in
// MsgBlock implements the Message interface and represents a kaspa
// block message. It is used to deliver block and transaction information in
// response to a getdata message (MsgGetData) for a given block hash.
type MsgBlock struct {
baseMessage
Header BlockHeader
Transactions []*MsgTx
}
@@ -56,11 +57,11 @@ func (msg *MsgBlock) ClearTransactions() {
msg.Transactions = make([]*MsgTx, 0, defaultTransactionAlloc)
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding blocks stored to disk, such as in a database, as
// opposed to decoding blocks from the wire.
func (msg *MsgBlock) BtcDecode(r io.Reader, pver uint32) error {
// opposed to decoding blocks from the appmessage.
func (msg *MsgBlock) KaspaDecode(r io.Reader, pver uint32) error {
err := readBlockHeader(r, pver, &msg.Header)
if err != nil {
return err
@@ -74,16 +75,16 @@ func (msg *MsgBlock) BtcDecode(r io.Reader, pver uint32) error {
// Prevent more transactions than could possibly fit into a block.
// It would be possible to cause memory exhaustion and panics without
// a sane upper bound on this count.
if txCount > maxTxPerBlock {
if txCount > MaxTxPerBlock {
str := fmt.Sprintf("too many transactions to fit into a block "+
"[count %d, max %d]", txCount, maxTxPerBlock)
return messageError("MsgBlock.BtcDecode", str)
"[count %d, max %d]", txCount, MaxTxPerBlock)
return messageError("MsgBlock.KaspaDecode", str)
}
msg.Transactions = make([]*MsgTx, 0, txCount)
for i := uint64(0); i < txCount; i++ {
tx := MsgTx{}
err := tx.BtcDecode(r, pver)
err := tx.KaspaDecode(r, pver)
if err != nil {
return err
}
@@ -95,18 +96,18 @@ func (msg *MsgBlock) BtcDecode(r io.Reader, pver uint32) error {
// Deserialize decodes a block from r into the receiver using a format that is
// suitable for long-term storage such as a database while respecting the
// Version field in the block. This function differs from BtcDecode in that
// BtcDecode decodes from the bitcoin wire protocol as it was sent across the
// network. The wire encoding can technically differ depending on the protocol
// Version field in the block. This function differs from KaspaDecode in that
// KaspaDecode decodes from the kaspa appmessage protocol as it was sent across the
// network. The appmessage encoding can technically differ depending on the protocol
// version and doesn't even really need to match the format of a stored block at
// all. As of the time this comment was written, the encoded block is the same
// all. As of the time this comment was written, the encoded block is the same
// in both instances, but there is a distinct difference and separating the two
// allows the API to be flexible enough to deal with changes.
func (msg *MsgBlock) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of BtcDecode.
return msg.BtcDecode(r, 0)
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaDecode.
return msg.KaspaDecode(r, 0)
}
// DeserializeTxLoc decodes r in the same manner Deserialize does, but it takes
@@ -116,9 +117,9 @@ func (msg *MsgBlock) Deserialize(r io.Reader) error {
func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) {
fullLen := r.Len()
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of existing wire protocol functions.
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of existing appmessage protocol functions.
err := readBlockHeader(r, 0, &msg.Header)
if err != nil {
return nil, err
@@ -132,9 +133,9 @@ func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) {
// Prevent more transactions than could possibly fit into a block.
// It would be possible to cause memory exhaustion and panics without
// a sane upper bound on this count.
if txCount > maxTxPerBlock {
if txCount > MaxTxPerBlock {
str := fmt.Sprintf("too many transactions to fit into a block "+
"[count %d, max %d]", txCount, maxTxPerBlock)
"[count %d, max %d]", txCount, MaxTxPerBlock)
return nil, messageError("MsgBlock.DeserializeTxLoc", str)
}
@@ -156,11 +157,11 @@ func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) {
return txLocs, nil
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding blocks to be stored to disk, such as in a
// database, as opposed to encoding blocks for the wire.
func (msg *MsgBlock) BtcEncode(w io.Writer, pver uint32) error {
// database, as opposed to encoding blocks for the appmessage.
func (msg *MsgBlock) KaspaEncode(w io.Writer, pver uint32) error {
err := writeBlockHeader(w, pver, &msg.Header)
if err != nil {
return err
@@ -172,7 +173,7 @@ func (msg *MsgBlock) BtcEncode(w io.Writer, pver uint32) error {
}
for _, tx := range msg.Transactions {
err = tx.BtcEncode(w, pver)
err = tx.KaspaEncode(w, pver)
if err != nil {
return err
}
@@ -183,18 +184,18 @@ func (msg *MsgBlock) BtcEncode(w io.Writer, pver uint32) error {
// Serialize encodes the block to w using a format that suitable for long-term
// storage such as a database while respecting the Version field in the block.
// This function differs from BtcEncode in that BtcEncode encodes the block to
// the bitcoin wire protocol in order to be sent across the network. The wire
// This function differs from KaspaEncode in that KaspaEncode encodes the block to
// the kaspa appmessage protocol in order to be sent across the network. The appmessage
// encoding can technically differ depending on the protocol version and doesn't
// even really need to match the format of a stored block at all. As of the
// even really need to match the format of a stored block at all. As of the
// time this comment was written, the encoded block is the same in both
// instances, but there is a distinct difference and separating the two allows
// the API to be flexible enough to deal with changes.
func (msg *MsgBlock) Serialize(w io.Writer) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of BtcEncode.
return msg.BtcEncode(w, 0)
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaEncode.
return msg.KaspaEncode(w, 0)
}
// SerializeSize returns the number of bytes it would take to serialize the
@@ -211,19 +212,16 @@ func (msg *MsgBlock) SerializeSize() int {
return n
}
// Command returns the protocol command string for the message. This is part
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgBlock) Command() string {
func (msg *MsgBlock) Command() MessageCommand {
return CmdBlock
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver. This is part of the Message interface implementation.
// receiver. This is part of the Message interface implementation.
func (msg *MsgBlock) MaxPayloadLength(pver uint32) uint32 {
// Block header at 80 bytes + transaction count + max transactions
// which can vary up to the MaxBlockPayload (including the block header
// and transaction count).
return MaxBlockPayload
return MaxMessagePayload
}
// BlockHash computes the block identifier hash for this block.
@@ -242,8 +240,8 @@ func (msg *MsgBlock) ConvertToPartial(subnetworkID *subnetworkid.SubnetworkID) {
}
}
// NewMsgBlock returns a new bitcoin block message that conforms to the
// Message interface. See MsgBlock for details.
// NewMsgBlock returns a new kaspa block message that conforms to the
// Message interface. See MsgBlock for details.
func NewMsgBlock(blockHeader *BlockHeader) *MsgBlock {
return &MsgBlock{
Header: *blockHeader,

View File

@@ -2,20 +2,20 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
"reflect"
"testing"
"time"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestBlock tests the MsgBlock API.
@@ -25,13 +25,14 @@ func TestBlock(t *testing.T) {
// Block 1 header.
parentHashes := blockOne.Header.ParentHashes
hashMerkleRoot := blockOne.Header.HashMerkleRoot
idMerkleRoot := blockOne.Header.IDMerkleRoot
acceptedIDMerkleRoot := blockOne.Header.AcceptedIDMerkleRoot
utxoCommitment := blockOne.Header.UTXOCommitment
bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, idMerkleRoot, bits, nonce)
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
// Ensure the command is expected value.
wantCmd := "block"
wantCmd := MessageCommand(5)
msg := NewMsgBlock(bh)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgBlock: wrong command - got %v want %v",
@@ -39,8 +40,7 @@ func TestBlock(t *testing.T) {
}
// Ensure max payload is expected value for latest protocol version.
// Num addresses (varInt) + max allowed addresses.
wantPayload := uint32(1000000)
wantPayload := uint32(1024 * 1024 * 32)
maxPayload := msg.MaxPayloadLength(pver)
if maxPayload != wantPayload {
t.Errorf("MaxPayloadLength: wrong max payload length for "+
@@ -74,7 +74,7 @@ func TestBlock(t *testing.T) {
// TestBlockHash tests the ability to generate the hash of a block accurately.
func TestBlockHash(t *testing.T) {
// Block 1 hash.
hashStr := "67ec32b619b4cda3255de5318c730e2e9f696d335427adfecae884aa41156b0f"
hashStr := "55d71bd49a8233bc9f0edbcbd0ad5d3eaebffe1fc6a6443a1c1f310fd02c11a5"
wantHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
@@ -144,15 +144,15 @@ func TestConvertToPartial(t *testing.T) {
}
}
// TestBlockWire tests the MsgBlock wire encode and decode for various numbers
// TestBlockEncoding tests the MsgBlock appmessage encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestBlockWire(t *testing.T) {
func TestBlockEncoding(t *testing.T) {
tests := []struct {
in *MsgBlock // Message to encode
out *MsgBlock // Expected decoded message
buf []byte // Wire encoding
buf []byte // Encoded value
txLocs []TxLoc // Expected transaction locations
pver uint32 // Protocol version for wire encoding
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
{
@@ -166,44 +166,44 @@ func TestBlockWire(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to wire format.
// Encode the message to appmessage format.
var buf bytes.Buffer
err := test.in.BtcEncode(&buf, test.pver)
err := test.in.KaspaEncode(&buf, test.pver)
if err != nil {
t.Errorf("BtcEncode #%d error %v", i, err)
t.Errorf("KaspaEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("BtcEncode #%d\n got: %s want: %s", i,
t.Errorf("KaspaEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the message from wire format.
// Decode the message from appmessage format.
var msg MsgBlock
rbuf := bytes.NewReader(test.buf)
err = msg.BtcDecode(rbuf, test.pver)
err = msg.KaspaDecode(rbuf, test.pver)
if err != nil {
t.Errorf("BtcDecode #%d error %v", i, err)
t.Errorf("KaspaDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&msg, test.out) {
t.Errorf("BtcDecode #%d\n got: %s want: %s", i,
t.Errorf("KaspaDecode #%d\n got: %s want: %s", i,
spew.Sdump(&msg), spew.Sdump(test.out))
continue
}
}
}
// TestBlockWireErrors performs negative tests against wire encode and decode
// TestBlockEncodingErrors performs negative tests against appmessage encode and decode
// of MsgBlock to confirm error paths work correctly.
func TestBlockWireErrors(t *testing.T) {
func TestBlockEncodingErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
in *MsgBlock // Value to encode
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
@@ -218,37 +218,39 @@ func TestBlockWireErrors(t *testing.T) {
{&blockOne, blockOneBytes, pver, 37, io.ErrShortWrite, io.EOF},
// Force error in hash merkle root.
{&blockOne, blockOneBytes, pver, 69, io.ErrShortWrite, io.EOF},
// Force error in ID merkle root.
// Force error in accepted ID merkle root.
{&blockOne, blockOneBytes, pver, 101, io.ErrShortWrite, io.EOF},
// Force error in timestamp.
// Force error in utxo commitment.
{&blockOne, blockOneBytes, pver, 133, io.ErrShortWrite, io.EOF},
// Force error in timestamp.
{&blockOne, blockOneBytes, pver, 165, io.ErrShortWrite, io.EOF},
// Force error in difficulty bits.
{&blockOne, blockOneBytes, pver, 141, io.ErrShortWrite, io.EOF},
{&blockOne, blockOneBytes, pver, 173, io.ErrShortWrite, io.EOF},
// Force error in header nonce.
{&blockOne, blockOneBytes, pver, 145, io.ErrShortWrite, io.EOF},
{&blockOne, blockOneBytes, pver, 177, io.ErrShortWrite, io.EOF},
// Force error in transaction count.
{&blockOne, blockOneBytes, pver, 153, io.ErrShortWrite, io.EOF},
{&blockOne, blockOneBytes, pver, 185, io.ErrShortWrite, io.EOF},
// Force error in transactions.
{&blockOne, blockOneBytes, pver, 154, io.ErrShortWrite, io.EOF},
{&blockOne, blockOneBytes, pver, 186, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := test.in.BtcEncode(w, test.pver)
if err != test.writeErr {
t.Errorf("BtcEncode #%d wrong error got: %v, want: %v",
err := test.in.KaspaEncode(w, test.pver)
if !errors.Is(err, test.writeErr) {
t.Errorf("KaspaEncode #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
// Decode from appmessage format.
var msg MsgBlock
r := newFixedReader(test.max, test.buf)
err = msg.BtcDecode(r, test.pver)
if err != test.readErr {
t.Errorf("BtcDecode #%d wrong error got: %v, want: %v",
err = msg.KaspaDecode(r, test.pver)
if !errors.Is(err, test.readErr) {
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
@@ -322,7 +324,7 @@ func TestBlockSerialize(t *testing.T) {
}
}
// TestBlockSerializeErrors performs negative tests against wire encode and
// TestBlockSerializeErrors performs negative tests against appmessage encode and
// decode of MsgBlock to confirm error paths work correctly.
func TestBlockSerializeErrors(t *testing.T) {
tests := []struct {
@@ -342,18 +344,20 @@ func TestBlockSerializeErrors(t *testing.T) {
{&blockOne, blockOneBytes, 37, io.ErrShortWrite, io.EOF},
// Force error in hash merkle root.
{&blockOne, blockOneBytes, 69, io.ErrShortWrite, io.EOF},
// Force error in ID merkle root.
// Force error in accepted ID merkle root.
{&blockOne, blockOneBytes, 101, io.ErrShortWrite, io.EOF},
// Force error in timestamp.
// Force error in utxo commitment.
{&blockOne, blockOneBytes, 133, io.ErrShortWrite, io.EOF},
// Force error in timestamp.
{&blockOne, blockOneBytes, 165, io.ErrShortWrite, io.EOF},
// Force error in difficulty bits.
{&blockOne, blockOneBytes, 141, io.ErrShortWrite, io.EOF},
{&blockOne, blockOneBytes, 173, io.ErrShortWrite, io.EOF},
// Force error in header nonce.
{&blockOne, blockOneBytes, 145, io.ErrShortWrite, io.EOF},
{&blockOne, blockOneBytes, 177, io.ErrShortWrite, io.EOF},
// Force error in transaction count.
{&blockOne, blockOneBytes, 153, io.ErrShortWrite, io.EOF},
{&blockOne, blockOneBytes, 185, io.ErrShortWrite, io.EOF},
// Force error in transactions.
{&blockOne, blockOneBytes, 154, io.ErrShortWrite, io.EOF},
{&blockOne, blockOneBytes, 186, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
@@ -361,7 +365,7 @@ func TestBlockSerializeErrors(t *testing.T) {
// Serialize the block.
w := newFixedWriter(test.max)
err := test.in.Serialize(w)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("Serialize #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
@@ -371,7 +375,7 @@ func TestBlockSerializeErrors(t *testing.T) {
var block MsgBlock
r := newFixedReader(test.max, test.buf)
err = block.Deserialize(r)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -380,7 +384,7 @@ func TestBlockSerializeErrors(t *testing.T) {
var txLocBlock MsgBlock
br := bytes.NewBuffer(test.buf[0:test.max])
_, err = txLocBlock.DeserializeTxLoc(br)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("DeserializeTxLoc #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -390,14 +394,14 @@ func TestBlockSerializeErrors(t *testing.T) {
// TestBlockOverflowErrors performs tests to ensure deserializing blocks which
// are intentionally crafted to use large values for the number of transactions
// are handled properly. This could otherwise potentially be used as an attack
// are handled properly. This could otherwise potentially be used as an attack
// vector.
func TestBlockOverflowErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
err error // Expected error
}{
// Block that claims to have ~uint64(0) transactions.
@@ -405,11 +409,11 @@ func TestBlockOverflowErrors(t *testing.T) {
[]byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainnetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simnetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@@ -417,13 +421,17 @@ func TestBlockOverflowErrors(t *testing.T) {
0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61,
0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32,
0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63, // IDMerkleRoot
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // AcceptedIDMerkleRoot
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // UTXOCommitment
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x61, 0xbc, 0x66, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // TxnCount
}, pver, &MessageError{},
@@ -432,17 +440,17 @@ func TestBlockOverflowErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from wire format.
// Decode from appmessage format.
var msg MsgBlock
r := bytes.NewReader(test.buf)
err := msg.BtcDecode(r, test.pver)
err := msg.KaspaDecode(r, test.pver)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("BtcDecode #%d wrong error got: %v, want: %v",
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
// Deserialize from wire format.
// Deserialize from appmessage format.
r = bytes.NewReader(test.buf)
err = msg.Deserialize(r)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
@@ -451,7 +459,7 @@ func TestBlockOverflowErrors(t *testing.T) {
continue
}
// Deserialize with transaction location info from wire format.
// Deserialize with transaction location info from appmessage format.
br := bytes.NewBuffer(test.buf)
_, err = msg.DeserializeTxLoc(br)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
@@ -473,9 +481,9 @@ func TestBlockSerializeSize(t *testing.T) {
size int // Expected serialized size
}{
// Block with no transactions.
{noTxBlock, 154},
{noTxBlock, 186},
// First block in the mainnet block chain.
// First block in the mainnet block DAG.
{&blockOne, len(blockOneBytes)},
}
@@ -491,22 +499,23 @@ func TestBlockSerializeSize(t *testing.T) {
}
}
// blockOne is the first block in the mainnet block chain.
// blockOne is the first block in the mainnet block DAG.
var blockOne = MsgBlock{
Header: BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash},
HashMerkleRoot: mainNetGenesisMerkleRoot,
IDMerkleRoot: exampleIDMerkleRoot,
Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST
Bits: 0x1d00ffff, // 486604799
Nonce: 0x9962e301, // 2573394689
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment,
Timestamp: mstime.UnixMilliseconds(0x17315ed0f99),
Bits: 0x1d00ffff, // 486604799
Nonce: 0x9962e301, // 2573394689
},
Transactions: []*MsgTx{
NewNativeMsgTx(1,
[]*TxIn{
{
PreviousOutPoint: OutPoint{
PreviousOutpoint: Outpoint{
TxID: daghash.TxID{},
Index: 0xffffffff,
},
@@ -519,7 +528,7 @@ var blockOne = MsgBlock{
[]*TxOut{
{
Value: 0x12a05f200,
PkScript: []byte{
ScriptPubKey: []byte{
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
@@ -541,11 +550,11 @@ var blockOne = MsgBlock{
var blockOneBytes = []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainNetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simNetGenesisHash
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25, // mainnetGenesisHash
0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,
0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1,
0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simnetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
@@ -553,13 +562,17 @@ var blockOneBytes = []byte{
0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f,
0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a,
0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63, // Fake IDMerkleRoot. TODO: (Ori) Replace to a real IDMerkleRoot
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // AcceptedIDMerkleRoot
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x61, 0xbc, 0x66, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // UTXOCommitment
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0x01, // TxnCount
0x01, 0x00, 0x00, 0x00, // Version
0x01, // Varint for number of transaction inputs
@@ -573,7 +586,7 @@ var blockOneBytes = []byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Sequence
0x01, // Varint for number of transaction outputs
0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of pk script
0x43, // Varint for length of scriptPubKey
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
@@ -593,5 +606,5 @@ var blockOneBytes = []byte{
// Transaction location information for block one transactions.
var blockOneTxLocs = []TxLoc{
{TxStart: 154, TxLen: 162},
{TxStart: 186, TxLen: 162},
}

View File

@@ -0,0 +1,31 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MaxBlockLocatorsPerMsg is the maximum number of block locator hashes allowed
// per message.
const MaxBlockLocatorsPerMsg = 500
// MsgBlockLocator implements the Message interface and represents a kaspa
// locator message. It is used to find the blockLocator of a peer that is
// syncing with you.
type MsgBlockLocator struct {
baseMessage
BlockLocatorHashes []*daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgBlockLocator) Command() MessageCommand {
return CmdBlockLocator
}
// NewMsgBlockLocator returns a new kaspa locator message that conforms to
// the Message interface. See MsgBlockLocator for details.
func NewMsgBlockLocator(locatorHashes []*daghash.Hash) *MsgBlockLocator {
return &MsgBlockLocator{
BlockLocatorHashes: locatorHashes,
}
}

View File

@@ -0,0 +1,34 @@
package appmessage
import (
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestBlockLocator tests the MsgBlockLocator API.
func TestBlockLocator(t *testing.T) {
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
locatorHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
msg := NewMsgBlockLocator([]*daghash.Hash{locatorHash})
// Ensure the command is expected value.
wantCmd := MessageCommand(10)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgBlockLocator: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure block locator hashes are added properly.
if msg.BlockLocatorHashes[0] != locatorHash {
t.Errorf("AddBlockLocatorHash: wrong block locator added - "+
"got %v, want %v",
spew.Sprint(msg.BlockLocatorHashes[0]),
spew.Sprint(locatorHash))
}
}

View File

@@ -0,0 +1,22 @@
package appmessage
// MsgDoneIBDBlocks implements the Message interface and represents a kaspa
// DoneIBDBlocks message. It is used to notify the IBD syncing peer that the
// syncer sent all the requested blocks.
//
// This message has no payload.
type MsgDoneIBDBlocks struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgDoneIBDBlocks) Command() MessageCommand {
return CmdDoneIBDBlocks
}
// NewMsgDoneIBDBlocks returns a new kaspa DoneIBDBlocks message that conforms to the
// Message interface.
func NewMsgDoneIBDBlocks() *MsgDoneIBDBlocks {
return &MsgDoneIBDBlocks{}
}

View File

@@ -0,0 +1,31 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
// MsgIBDBlock implements the Message interface and represents a kaspa
// ibdblock message. It is used to deliver block and transaction information in
// response to a RequestIBDBlocks message (MsgRequestIBDBlocks).
type MsgIBDBlock struct {
baseMessage
*MsgBlock
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgIBDBlock) Command() MessageCommand {
return CmdIBDBlock
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver. This is part of the Message interface implementation.
func (msg *MsgIBDBlock) MaxPayloadLength(pver uint32) uint32 {
return MaxMessagePayload
}
// NewMsgIBDBlock returns a new kaspa ibdblock message that conforms to the
// Message interface. See MsgIBDBlock for details.
func NewMsgIBDBlock(msgBlock *MsgBlock) *MsgIBDBlock {
return &MsgIBDBlock{MsgBlock: msgBlock}
}

View File

@@ -0,0 +1,118 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"bytes"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
)
// TestIBDBlock tests the MsgIBDBlock API.
func TestIBDBlock(t *testing.T) {
pver := ProtocolVersion
// Block 1 header.
parentHashes := blockOne.Header.ParentHashes
hashMerkleRoot := blockOne.Header.HashMerkleRoot
acceptedIDMerkleRoot := blockOne.Header.AcceptedIDMerkleRoot
utxoCommitment := blockOne.Header.UTXOCommitment
bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
// Ensure the command is expected value.
wantCmd := MessageCommand(17)
msg := NewMsgIBDBlock(NewMsgBlock(bh))
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgIBDBlock: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure max payload is expected value for latest protocol version.
wantPayload := uint32(1024 * 1024 * 32)
maxPayload := msg.MaxPayloadLength(pver)
if maxPayload != wantPayload {
t.Errorf("MaxPayloadLength: wrong max payload length for "+
"protocol version %d - got %v, want %v", pver,
maxPayload, wantPayload)
}
// Ensure we get the same block header data back out.
if !reflect.DeepEqual(&msg.Header, bh) {
t.Errorf("NewMsgIBDBlock: wrong block header - got %v, want %v",
spew.Sdump(&msg.Header), spew.Sdump(bh))
}
// Ensure transactions are added properly.
tx := blockOne.Transactions[0].Copy()
msg.AddTransaction(tx)
if !reflect.DeepEqual(msg.Transactions, blockOne.Transactions) {
t.Errorf("AddTransaction: wrong transactions - got %v, want %v",
spew.Sdump(msg.Transactions),
spew.Sdump(blockOne.Transactions))
}
// Ensure transactions are properly cleared.
msg.ClearTransactions()
if len(msg.Transactions) != 0 {
t.Errorf("ClearTransactions: wrong transactions - got %v, want %v",
len(msg.Transactions), 0)
}
}
// TestIBDBlockEncoding tests the MsgIBDBlock appmessage encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestIBDBlockEncoding(t *testing.T) {
tests := []struct {
in *MsgIBDBlock // Message to encode
out *MsgIBDBlock // Expected decoded message
buf []byte // Encoded value
txLocs []TxLoc // Expected transaction locations
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
{
&MsgIBDBlock{MsgBlock: &blockOne},
&MsgIBDBlock{MsgBlock: &blockOne},
blockOneBytes,
blockOneTxLocs,
ProtocolVersion,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to appmessage format.
var buf bytes.Buffer
err := test.in.KaspaEncode(&buf, test.pver)
if err != nil {
t.Errorf("KaspaEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("KaspaEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the message from appmessage format.
var msg MsgIBDBlock
msg.MsgBlock = new(MsgBlock)
rbuf := bytes.NewReader(test.buf)
err = msg.KaspaDecode(rbuf, test.pver)
if err != nil {
t.Errorf("KaspaDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&msg, test.out) {
t.Errorf("KaspaDecode #%d\n got: %s want: %s", i,
spew.Sdump(&msg), spew.Sdump(test.out))
continue
}
}
}

View File

@@ -0,0 +1,27 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgInvRelayBlock implements the Message interface and represents a kaspa
// block inventory message. It is used to notify the network about new block
// by sending their hash, and let the receiving node decide if it needs it.
type MsgInvRelayBlock struct {
baseMessage
Hash *daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgInvRelayBlock) Command() MessageCommand {
return CmdInvRelayBlock
}
// NewMsgInvBlock returns a new kaspa invrelblk message that conforms to
// the Message interface. See MsgInvRelayBlock for details.
func NewMsgInvBlock(hash *daghash.Hash) *MsgInvRelayBlock {
return &MsgInvRelayBlock{
Hash: hash,
}
}

View File

@@ -0,0 +1,31 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MaxInvPerTxInvMsg is the maximum number of hashes that can
// be in a single CmdInvTransaction message.
const MaxInvPerTxInvMsg = MaxInvPerMsg
// MsgInvTransaction implements the Message interface and represents a kaspa
// TxInv message. It is used to notify the network about new transactions
// by sending their ID, and let the receiving node decide if it needs it.
type MsgInvTransaction struct {
baseMessage
TxIDs []*daghash.TxID
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgInvTransaction) Command() MessageCommand {
return CmdInvTransaction
}
// NewMsgInvTransaction returns a new kaspa TxInv message that conforms to
// the Message interface. See MsgInvTransaction for details.
func NewMsgInvTransaction(ids []*daghash.TxID) *MsgInvTransaction {
return &MsgInvTransaction{
TxIDs: ids,
}
}

37
app/appmessage/msgping.go Normal file
View File

@@ -0,0 +1,37 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
// MsgPing implements the Message interface and represents a kaspa ping
// message.
//
// For versions BIP0031Version and earlier, it is used primarily to confirm
// that a connection is still valid. A transmission error is typically
// interpreted as a closed connection and that the peer should be removed.
// For versions AFTER BIP0031Version it contains an identifier which can be
// returned in the pong message to determine network timing.
//
// The payload for this message just consists of a nonce used for identifying
// it later.
type MsgPing struct {
baseMessage
// Unique value associated with message that is used to identify
// specific ping message.
Nonce uint64
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgPing) Command() MessageCommand {
return CmdPing
}
// NewMsgPing returns a new kaspa ping message that conforms to the Message
// interface. See MsgPing for details.
func NewMsgPing(nonce uint64) *MsgPing {
return &MsgPing{
Nonce: nonce,
}
}

View File

@@ -0,0 +1,32 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/random"
)
// TestPing tests the MsgPing API against the latest protocol version.
func TestPing(t *testing.T) {
// Ensure we get the same nonce back out.
nonce, err := random.Uint64()
if err != nil {
t.Errorf("random.Uint64: Error generating nonce: %v", err)
}
msg := NewMsgPing(nonce)
if msg.Nonce != nonce {
t.Errorf("NewMsgPing: wrong nonce - got %v, want %v",
msg.Nonce, nonce)
}
// Ensure the command is expected value.
wantCmd := MessageCommand(7)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgPing: wrong command - got %v want %v",
cmd, wantCmd)
}
}

31
app/appmessage/msgpong.go Normal file
View File

@@ -0,0 +1,31 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
// MsgPong implements the Message interface and represents a kaspa pong
// message which is used primarily to confirm that a connection is still valid
// in response to a kaspa ping message (MsgPing).
//
// This message was not added until protocol versions AFTER BIP0031Version.
type MsgPong struct {
baseMessage
// Unique value associated with message that is used to identify
// specific ping message.
Nonce uint64
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgPong) Command() MessageCommand {
return CmdPong
}
// NewMsgPong returns a new kaspa pong message that conforms to the Message
// interface. See MsgPong for details.
func NewMsgPong(nonce uint64) *MsgPong {
return &MsgPong{
Nonce: nonce,
}
}

View File

@@ -0,0 +1,31 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/random"
)
// TestPongLatest tests the MsgPong API against the latest protocol version.
func TestPongLatest(t *testing.T) {
nonce, err := random.Uint64()
if err != nil {
t.Errorf("random.Uint64: error generating nonce: %v", err)
}
msg := NewMsgPong(nonce)
if msg.Nonce != nonce {
t.Errorf("NewMsgPong: wrong nonce - got %v, want %v",
msg.Nonce, nonce)
}
// Ensure the command is expected value.
wantCmd := MessageCommand(8)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgPong: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,22 @@
package appmessage
// MsgReject implements the Message interface and represents a kaspa
// Reject message. It is used to notify peers why they are banned.
type MsgReject struct {
baseMessage
Reason string
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgReject) Command() MessageCommand {
return CmdReject
}
// NewMsgReject returns a new kaspa Reject message that conforms to the
// Message interface.
func NewMsgReject(reason string) *MsgReject {
return &MsgReject{
Reason: reason,
}
}

View File

@@ -0,0 +1,36 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// MsgRequestAddresses implements the Message interface and represents a kaspa
// RequestAddresses message. It is used to request a list of known active peers on the
// network from a peer to help identify potential nodes. The list is returned
// via one or more addr messages (MsgAddresses).
//
// This message has no payload.
type MsgRequestAddresses struct {
baseMessage
IncludeAllSubnetworks bool
SubnetworkID *subnetworkid.SubnetworkID
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestAddresses) Command() MessageCommand {
return CmdRequestAddresses
}
// NewMsgRequestAddresses returns a new kaspa RequestAddresses message that conforms to the
// Message interface. See MsgRequestAddresses for details.
func NewMsgRequestAddresses(includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) *MsgRequestAddresses {
return &MsgRequestAddresses{
IncludeAllSubnetworks: includeAllSubnetworks,
SubnetworkID: subnetworkID,
}
}

View File

@@ -0,0 +1,20 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
)
// TestRequestAddresses tests the MsgRequestAddresses API.
func TestRequestAddresses(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(2)
msg := NewMsgRequestAddresses(false, nil)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequestAddresses: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,31 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgRequestBlockLocator implements the Message interface and represents a kaspa
// RequestBlockLocator message. It is used to request a block locator between high
// and low hash.
// The locator is returned via a locator message (MsgBlockLocator).
type MsgRequestBlockLocator struct {
baseMessage
HighHash *daghash.Hash
LowHash *daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestBlockLocator) Command() MessageCommand {
return CmdRequestBlockLocator
}
// NewMsgRequestBlockLocator returns a new RequestBlockLocator message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequestBlockLocator(highHash, lowHash *daghash.Hash) *MsgRequestBlockLocator {
return &MsgRequestBlockLocator{
HighHash: highHash,
LowHash: lowHash,
}
}

View File

@@ -0,0 +1,24 @@
package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestRequestBlockLocator tests the MsgRequestBlockLocator API.
func TestRequestBlockLocator(t *testing.T) {
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
highHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
// Ensure the command is expected value.
wantCmd := MessageCommand(9)
msg := NewMsgRequestBlockLocator(highHash, &daghash.ZeroHash)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequestBlockLocator: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,34 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgRequestIBDBlocks implements the Message interface and represents a kaspa
// RequestIBDBlocks message. It is used to request a list of blocks starting after the
// low hash and until the high hash.
type MsgRequestIBDBlocks struct {
baseMessage
LowHash *daghash.Hash
HighHash *daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestIBDBlocks) Command() MessageCommand {
return CmdRequestIBDBlocks
}
// NewMsgRequstIBDBlocks returns a new kaspa RequestIBDBlocks message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequstIBDBlocks(lowHash, highHash *daghash.Hash) *MsgRequestIBDBlocks {
return &MsgRequestIBDBlocks{
LowHash: lowHash,
HighHash: highHash,
}
}

View File

@@ -0,0 +1,40 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestRequstIBDBlocks tests the MsgRequestIBDBlocks API.
func TestRequstIBDBlocks(t *testing.T) {
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
lowHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
hashStr = "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
highHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
// Ensure we get the same data back out.
msg := NewMsgRequstIBDBlocks(lowHash, highHash)
if !msg.HighHash.IsEqual(highHash) {
t.Errorf("NewMsgRequstIBDBlocks: wrong high hash - got %v, want %v",
msg.HighHash, highHash)
}
// Ensure the command is expected value.
wantCmd := MessageCommand(4)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequstIBDBlocks: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,22 @@
package appmessage
// MsgRequestNextIBDBlocks implements the Message interface and represents a kaspa
// RequestNextIBDBlocks message. It is used to notify the IBD syncer peer to send
// more blocks.
//
// This message has no payload.
type MsgRequestNextIBDBlocks struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestNextIBDBlocks) Command() MessageCommand {
return CmdRequestNextIBDBlocks
}
// NewMsgRequestNextIBDBlocks returns a new kaspa RequestNextIBDBlocks message that conforms to the
// Message interface.
func NewMsgRequestNextIBDBlocks() *MsgRequestNextIBDBlocks {
return &MsgRequestNextIBDBlocks{}
}

View File

@@ -0,0 +1,31 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgRequestRelayBlocksHashes is the maximum number of hashes that can
// be in a single RequestRelayBlocks message.
const MsgRequestRelayBlocksHashes = MaxInvPerMsg
// MsgRequestRelayBlocks implements the Message interface and represents a kaspa
// RequestRelayBlocks message. It is used to request blocks as part of the block
// relay protocol.
type MsgRequestRelayBlocks struct {
baseMessage
Hashes []*daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestRelayBlocks) Command() MessageCommand {
return CmdRequestRelayBlocks
}
// NewMsgRequestRelayBlocks returns a new kaspa RequestRelayBlocks message that conforms to
// the Message interface. See MsgRequestRelayBlocks for details.
func NewMsgRequestRelayBlocks(hashes []*daghash.Hash) *MsgRequestRelayBlocks {
return &MsgRequestRelayBlocks{
Hashes: hashes,
}
}

View File

@@ -0,0 +1,21 @@
package appmessage
// MsgRequestSelectedTip implements the Message interface and represents a kaspa
// RequestSelectedTip message. It is used to request the selected tip of another peer.
//
// This message has no payload.
type MsgRequestSelectedTip struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestSelectedTip) Command() MessageCommand {
return CmdRequestSelectedTip
}
// NewMsgRequestSelectedTip returns a new kaspa RequestSelectedTip message that conforms to the
// Message interface.
func NewMsgRequestSelectedTip() *MsgRequestSelectedTip {
return &MsgRequestSelectedTip{}
}

View File

@@ -0,0 +1,20 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
)
// TestRequestSelectedTip tests the MsgRequestSelectedTip API.
func TestRequestSelectedTip(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(12)
msg := NewMsgRequestSelectedTip()
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequestSelectedTip: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,31 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MaxInvPerRequestTransactionsMsg is the maximum number of hashes that can
// be in a single CmdInvTransaction message.
const MaxInvPerRequestTransactionsMsg = MaxInvPerMsg
// MsgRequestTransactions implements the Message interface and represents a kaspa
// RequestTransactions message. It is used to request transactions as part of the
// transactions relay protocol.
type MsgRequestTransactions struct {
baseMessage
IDs []*daghash.TxID
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestTransactions) Command() MessageCommand {
return CmdRequestTransactions
}
// NewMsgRequestTransactions returns a new kaspa RequestTransactions message that conforms to
// the Message interface. See MsgRequestTransactions for details.
func NewMsgRequestTransactions(ids []*daghash.TxID) *MsgRequestTransactions {
return &MsgRequestTransactions{
IDs: ids,
}
}

View File

@@ -0,0 +1,28 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgSelectedTip implements the Message interface and represents a kaspa
// selectedtip message. It is used to answer getseltip messages and tell
// the asking peer what is the selected tip of this peer.
type MsgSelectedTip struct {
baseMessage
// The selected tip hash of the generator of the message.
SelectedTipHash *daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgSelectedTip) Command() MessageCommand {
return CmdSelectedTip
}
// NewMsgSelectedTip returns a new kaspa selectedtip message that conforms to the
// Message interface.
func NewMsgSelectedTip(selectedTipHash *daghash.Hash) *MsgSelectedTip {
return &MsgSelectedTip{
SelectedTipHash: selectedTipHash,
}
}

View File

@@ -0,0 +1,18 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
"testing"
)
// TestSelectedTip tests the MsgSelectedTip API.
func TestSelectedTip(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(11)
msg := NewMsgSelectedTip(&daghash.ZeroHash)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgSelectedTip: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,30 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgTransactionNotFound defines a kaspa TransactionNotFound message which is sent in response to
// a RequestTransactions message if any of the requested data in not available on the peer.
type MsgTransactionNotFound struct {
baseMessage
ID *daghash.TxID
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgTransactionNotFound) Command() MessageCommand {
return CmdTransactionNotFound
}
// NewMsgTransactionNotFound returns a new kaspa transactionsnotfound message that conforms to the
// Message interface. See MsgTransactionNotFound for details.
func NewMsgTransactionNotFound(id *daghash.TxID) *MsgTransactionNotFound {
return &MsgTransactionNotFound{
ID: id,
}
}

View File

@@ -2,19 +2,18 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
"strconv"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util/binaryserializer"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
const (
@@ -44,21 +43,21 @@ const (
SequenceLockTimeMask = 0x0000ffff
// SequenceLockTimeGranularity is the defined time based granularity
// for seconds-based relative time locks. When converting from seconds
// for milliseconds-based relative time locks. When converting from milliseconds
// to a sequence number, the value is right shifted by this amount,
// therefore the granularity of relative time locks in 512 or 2^9
// seconds. Enforced relative lock times are multiples of 512 seconds.
SequenceLockTimeGranularity = 9
// therefore the granularity of relative time locks in 524288 or 2^19
// seconds. Enforced relative lock times are multiples of 524288 milliseconds.
SequenceLockTimeGranularity = 19
// defaultTxInOutAlloc is the default size used for the backing array for
// transaction inputs and outputs. The array will dynamically grow as needed,
// transaction inputs and outputs. The array will dynamically grow as needed,
// but this figure is intended to provide enough space for the number of
// inputs and outputs in a typical transaction without needing to grow the
// backing array multiple times.
defaultTxInOutAlloc = 15
// minTxInPayload is the minimum payload size for a transaction input.
// PreviousOutPoint.TxID + PreviousOutPoint.Index 4 bytes + Varint for
// PreviousOutpoint.TxID + PreviousOutpoint.Index 4 bytes + Varint for
// SignatureScript length 1 byte + Sequence 4 bytes.
minTxInPayload = 9 + daghash.HashSize
@@ -67,14 +66,14 @@ const (
maxTxInPerMessage = (MaxMessagePayload / minTxInPayload) + 1
// MinTxOutPayload is the minimum payload size for a transaction output.
// Value 8 bytes + Varint for PkScript length 1 byte.
// Value 8 bytes + Varint for ScriptPubKey length 1 byte.
MinTxOutPayload = 9
// maxTxOutPerMessage is the maximum number of transactions outputs that
// a transaction which fits into a message could possibly have.
maxTxOutPerMessage = (MaxMessagePayload / MinTxOutPayload) + 1
// minTxPayload is the minimum payload size for a transaction. Note
// minTxPayload is the minimum payload size for a transaction. Note
// that any realistically usable transaction must have at least one
// input or output, but that is a rule enforced at a higher layer, so
// it is intentionally not included here.
@@ -84,17 +83,17 @@ const (
minTxPayload = 10
// freeListMaxScriptSize is the size of each buffer in the free list
// that is used for deserializing scripts from the wire before they are
// concatenated into a single contiguous buffers. This value was chosen
// that is used for deserializing scripts from the appmessage before they are
// concatenated into a single contiguous buffers. This value was chosen
// because it is slightly more than twice the size of the vast majority
// of all "standard" scripts. Larger scripts are still deserialized
// of all "standard" scripts. Larger scripts are still deserialized
// properly as the free list will simply be bypassed for them.
freeListMaxScriptSize = 512
// freeListMaxItems is the number of buffers to keep in the free list
// to use for script deserialization. This value allows up to 100
// to use for script deserialization. This value allows up to 100
// scripts per transaction being simultaneously deserialized by 125
// peers. Thus, the peak usage of the free list is 12,500 * 512 =
// peers. Thus, the peak usage of the free list is 12,500 * 512 =
// 6,400,000 bytes.
freeListMaxItems = 12500
)
@@ -113,7 +112,7 @@ const (
// scriptFreeList defines a free list of byte slices (up to the maximum number
// defined by the freeListMaxItems constant) that have a cap according to the
// freeListMaxScriptSize constant. It is used to provide temporary buffers for
// freeListMaxScriptSize constant. It is used to provide temporary buffers for
// deserializing scripts in order to greatly reduce the number of allocations
// required.
//
@@ -122,10 +121,10 @@ const (
type scriptFreeList chan []byte
// Borrow returns a byte slice from the free list with a length according the
// provided size. A new buffer is allocated if there are any items available.
// provided size. A new buffer is allocated if there are any items available.
//
// When the size is larger than the max size allowed for items on the free list
// a new buffer of the appropriate size is allocated and returned. It is safe
// a new buffer of the appropriate size is allocated and returned. It is safe
// to attempt to return said buffer via the Return function as it will be
// ignored and allowed to go the garbage collector.
func (c scriptFreeList) Borrow(size uint64) []byte {
@@ -143,8 +142,8 @@ func (c scriptFreeList) Borrow(size uint64) []byte {
}
// Return puts the provided byte slice back on the free list when it has a cap
// of the expected length. The buffer is expected to have been obtained via
// the Borrow function. Any slices that are not of the appropriate size, such
// of the expected length. The buffer is expected to have been obtained via
// the Borrow function. Any slices that are not of the appropriate size, such
// as those whose size is greater than the largest allowed free list item size
// are simply ignored so they can go to the garbage collector.
func (c scriptFreeList) Return(buf []byte) {
@@ -154,7 +153,7 @@ func (c scriptFreeList) Return(buf []byte) {
return
}
// Return the buffer to the free list when it's not full. Otherwise let
// Return the buffer to the free list when it's not full. Otherwise let
// it be garbage collected.
select {
case c <- buf:
@@ -163,30 +162,30 @@ func (c scriptFreeList) Return(buf []byte) {
}
}
// Create the concurrent safe free list to use for script deserialization. As
// Create the concurrent safe free list to use for script deserialization. As
// previously described, this free list is maintained to significantly reduce
// the number of allocations.
var scriptPool scriptFreeList = make(chan []byte, freeListMaxItems)
// OutPoint defines a bitcoin data type that is used to track previous
// Outpoint defines a kaspa data type that is used to track previous
// transaction outputs.
type OutPoint struct {
type Outpoint struct {
TxID daghash.TxID
Index uint32
}
// NewOutPoint returns a new bitcoin transaction outpoint point with the
// NewOutpoint returns a new kaspa transaction outpoint point with the
// provided hash and index.
func NewOutPoint(txID *daghash.TxID, index uint32) *OutPoint {
return &OutPoint{
func NewOutpoint(txID *daghash.TxID, index uint32) *Outpoint {
return &Outpoint{
TxID: *txID,
Index: index,
}
}
// String returns the OutPoint in the human-readable form "txID:index".
func (o OutPoint) String() string {
// Allocate enough for ID string, colon, and 10 digits. Although
// String returns the Outpoint in the human-readable form "txID:index".
func (o Outpoint) String() string {
// Allocate enough for ID string, colon, and 10 digits. Although
// at the time of writing, the number of digits can be no greater than
// the length of the decimal representation of maxTxOutPerMessage, the
// maximum message payload may increase in the future and this
@@ -199,9 +198,9 @@ func (o OutPoint) String() string {
return string(buf)
}
// TxIn defines a bitcoin transaction input.
// TxIn defines a kaspa transaction input.
type TxIn struct {
PreviousOutPoint OutPoint
PreviousOutpoint Outpoint
SignatureScript []byte
Sequence uint64
}
@@ -227,47 +226,48 @@ func serializeSignatureScriptSize(signatureScript []byte, encodingFlags txEncodi
return VarIntSerializeSize(0)
}
// NewTxIn returns a new bitcoin transaction input with the provided
// NewTxIn returns a new kaspa transaction input with the provided
// previous outpoint point and signature script with a default sequence of
// MaxTxInSequenceNum.
func NewTxIn(prevOut *OutPoint, signatureScript []byte) *TxIn {
func NewTxIn(prevOut *Outpoint, signatureScript []byte) *TxIn {
return &TxIn{
PreviousOutPoint: *prevOut,
PreviousOutpoint: *prevOut,
SignatureScript: signatureScript,
Sequence: MaxTxInSequenceNum,
}
}
// TxOut defines a bitcoin transaction output.
// TxOut defines a kaspa transaction output.
type TxOut struct {
Value uint64
PkScript []byte
Value uint64
ScriptPubKey []byte
}
// SerializeSize returns the number of bytes it would take to serialize the
// the transaction output.
func (t *TxOut) SerializeSize() int {
// Value 8 bytes + serialized varint size for the length of PkScript +
// PkScript bytes.
return 8 + VarIntSerializeSize(uint64(len(t.PkScript))) + len(t.PkScript)
// Value 8 bytes + serialized varint size for the length of ScriptPubKey +
// ScriptPubKey bytes.
return 8 + VarIntSerializeSize(uint64(len(t.ScriptPubKey))) + len(t.ScriptPubKey)
}
// NewTxOut returns a new bitcoin transaction output with the provided
// NewTxOut returns a new kaspa transaction output with the provided
// transaction value and public key script.
func NewTxOut(value uint64, pkScript []byte) *TxOut {
func NewTxOut(value uint64, scriptPubKey []byte) *TxOut {
return &TxOut{
Value: value,
PkScript: pkScript,
Value: value,
ScriptPubKey: scriptPubKey,
}
}
// MsgTx implements the Message interface and represents a bitcoin tx message.
// MsgTx implements the Message interface and represents a kaspa tx message.
// It is used to deliver transaction information in response to a getdata
// message (MsgGetData) for a given transaction.
//
// Use the AddTxIn and AddTxOut functions to build up the list of transaction
// inputs and outputs.
type MsgTx struct {
baseMessage
Version int32
TxIn []*TxIn
TxOut []*TxOut
@@ -288,70 +288,48 @@ func (msg *MsgTx) AddTxOut(to *TxOut) {
msg.TxOut = append(msg.TxOut, to)
}
// IsCoinBase determines whether or not a transaction is a coinbase. A coinbase
// is a special transaction created by miners that has no inputs. This is
// represented in the block dag by a transaction with a single input that has
// a previous output transaction index set to the maximum value along with a
// zero TxID.
func (msg *MsgTx) IsCoinBase() bool {
// A coin base must only have one transaction input.
if len(msg.TxIn) != 1 {
return false
}
// The previous output of a coinbase must have a max value index and
// a zero TxID.
prevOut := &msg.TxIn[0].PreviousOutPoint
return prevOut.Index == math.MaxUint32 && prevOut.TxID == daghash.ZeroTxID
}
// IsFeeTransaction determines whether or not a transaction is a fee transaction. A fee
// transaction is a special transaction created by miners that distributes fees to the
// previous blocks' miners. Each input of the fee transaction should set index to maximum
// IsCoinBase determines whether or not a transaction is a coinbase transaction. A coinbase
// transaction is a special transaction created by miners that distributes fees and block subsidy
// to the previous blocks' miners, and to specify the scriptPubKey that will be used to pay the current
// miner in future blocks. Each input of the coinbase transaction should set index to maximum
// value and reference the relevant block id, instead of previous transaction id.
func (msg *MsgTx) IsFeeTransaction() bool {
for _, txIn := range msg.TxIn {
// The previous output of a fee transaction have a max value index and
// a non-zero TxID (to differentiate from coinbase).
prevOut := txIn.PreviousOutPoint
if prevOut.Index != math.MaxUint32 || prevOut.TxID == daghash.ZeroTxID {
return false
}
}
return true
}
// IsBlockReward determines whether or not a transaction is a block reward (a fee transaction or a coinbase)
func (msg *MsgTx) IsBlockReward() bool {
return msg.IsFeeTransaction() || msg.IsCoinBase()
func (msg *MsgTx) IsCoinBase() bool {
// A coinbase transaction must have subnetwork id SubnetworkIDCoinbase
return msg.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)
}
// TxHash generates the Hash for the transaction.
func (msg *MsgTx) TxHash() *daghash.Hash {
// Encode the transaction and calculate double sha256 on the result.
// Ignore the error returns since the only way the encode could fail
// is being out of memory or due to nil pointers, both of which would
// cause a run-time panic.
buf := bytes.NewBuffer(make([]byte, 0, msg.serializeSize(txEncodingExcludePayload)))
_ = msg.serialize(buf, txEncodingExcludePayload)
writer := daghash.NewDoubleHashWriter()
err := msg.serialize(writer, txEncodingExcludePayload)
if err != nil {
// this writer never return errors (no allocations or possible failures) so errors can only come from validity checks,
// and we assume we never construct malformed transactions.
panic(fmt.Sprintf("TxHash() failed. this should never fail for structurally-valid transactions. err: %+v", err))
}
hash := daghash.Hash(daghash.DoubleHashH(buf.Bytes()))
hash := writer.Finalize()
return &hash
}
// TxID generates the Hash for the transaction without the signature script, gas and payload fields.
func (msg *MsgTx) TxID() daghash.TxID {
func (msg *MsgTx) TxID() *daghash.TxID {
// Encode the transaction, replace signature script with zeroes, cut off
// payload and calculate double sha256 on the result. Ignore the error
// returns since the only way the encode could fail is being out of memory or
// due to nil pointers, both of which would cause a run-time panic.
// payload and calculate double sha256 on the result.
var encodingFlags txEncoding
if !msg.IsCoinBase() {
encodingFlags = txEncodingExcludeSignatureScript | txEncodingExcludePayload
}
buf := bytes.NewBuffer(make([]byte, 0, msg.serializeSize(encodingFlags)))
_ = msg.serialize(buf, encodingFlags)
return daghash.TxID(daghash.DoubleHashH(buf.Bytes()))
writer := daghash.NewDoubleHashWriter()
err := msg.serialize(writer, encodingFlags)
if err != nil {
// this writer never return errors (no allocations or possible failures) so errors can only come from validity checks,
// and we assume we never construct malformed transactions.
panic(fmt.Sprintf("TxID() failed. this should never fail for structurally-valid transactions. err: %+v", err))
}
txID := daghash.TxID(writer.Finalize())
return &txID
}
// Copy creates a deep copy of a transaction so that the original does not get
@@ -377,10 +355,10 @@ func (msg *MsgTx) Copy() *MsgTx {
// Deep copy the old TxIn data.
for _, oldTxIn := range msg.TxIn {
// Deep copy the old previous outpoint.
oldOutPoint := oldTxIn.PreviousOutPoint
newOutPoint := OutPoint{}
newOutPoint.TxID.SetBytes(oldOutPoint.TxID[:])
newOutPoint.Index = oldOutPoint.Index
oldOutpoint := oldTxIn.PreviousOutpoint
newOutpoint := Outpoint{}
newOutpoint.TxID.SetBytes(oldOutpoint.TxID[:])
newOutpoint.Index = oldOutpoint.Index
// Deep copy the old signature script.
var newScript []byte
@@ -393,7 +371,7 @@ func (msg *MsgTx) Copy() *MsgTx {
// Create new txIn with the deep copied data.
newTxIn := TxIn{
PreviousOutPoint: newOutPoint,
PreviousOutpoint: newOutpoint,
SignatureScript: newScript,
Sequence: oldTxIn.Sequence,
}
@@ -404,9 +382,9 @@ func (msg *MsgTx) Copy() *MsgTx {
// Deep copy the old TxOut data.
for _, oldTxOut := range msg.TxOut {
// Deep copy the old PkScript
// Deep copy the old ScriptPubKey
var newScript []byte
oldScript := oldTxOut.PkScript
oldScript := oldTxOut.ScriptPubKey
oldScriptLen := len(oldScript)
if oldScriptLen > 0 {
newScript = make([]byte, oldScriptLen)
@@ -416,8 +394,8 @@ func (msg *MsgTx) Copy() *MsgTx {
// Create new txOut with the deep copied data and append it to
// new Tx.
newTxOut := TxOut{
Value: oldTxOut.Value,
PkScript: newScript,
Value: oldTxOut.Value,
ScriptPubKey: newScript,
}
newTx.TxOut = append(newTx.TxOut, &newTxOut)
}
@@ -425,11 +403,11 @@ func (msg *MsgTx) Copy() *MsgTx {
return &newTx
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding transactions stored to disk, such as in a
// database, as opposed to decoding transactions from the wire.
func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error {
// database, as opposed to decoding transactions from the appmessage.
func (msg *MsgTx) KaspaDecode(r io.Reader, pver uint32) error {
version, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
@@ -442,18 +420,18 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error {
}
// Prevent more input transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if count > uint64(maxTxInPerMessage) {
str := fmt.Sprintf("too many input transactions to fit into "+
"max message size [count %d, max %d]", count,
maxTxInPerMessage)
return messageError("MsgTx.BtcDecode", str)
return messageError("MsgTx.KaspaDecode", str)
}
// returnScriptBuffers is a closure that returns any script buffers that
// were borrowed from the pool when there are any deserialization
// errors. This is only valid to call before the final step which
// errors. This is only valid to call before the final step which
// replaces the scripts with the location in a contiguous buffer and
// returns them.
returnScriptBuffers := func() {
@@ -464,10 +442,10 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error {
scriptPool.Return(txIn.SignatureScript)
}
for _, txOut := range msg.TxOut {
if txOut == nil || txOut.PkScript == nil {
if txOut == nil || txOut.ScriptPubKey == nil {
continue
}
scriptPool.Return(txOut.PkScript)
scriptPool.Return(txOut.ScriptPubKey)
}
}
@@ -495,14 +473,14 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error {
}
// Prevent more output transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if count > uint64(maxTxOutPerMessage) {
returnScriptBuffers()
str := fmt.Sprintf("too many output transactions to fit into "+
"max message size [count %d, max %d]", count,
maxTxOutPerMessage)
return messageError("MsgTx.BtcDecode", str)
return messageError("MsgTx.KaspaDecode", str)
}
// Deserialize the outputs.
@@ -518,7 +496,7 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error {
returnScriptBuffers()
return err
}
totalScriptSize += uint64(len(to.PkScript))
totalScriptSize += uint64(len(to.ScriptPubKey))
}
lockTime, err := binaryserializer.Uint64(r, littleEndian)
@@ -557,13 +535,17 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error {
msg.Payload = make([]byte, payloadLength)
_, err = io.ReadFull(r, msg.Payload)
if err != nil {
returnScriptBuffers()
return err
}
}
// Create a single allocation to house all of the scripts and set each
// input signature script and output public key script to the
// appropriate subslice of the overall contiguous buffer. Then, return
// appropriate subslice of the overall contiguous buffer. Then, return
// each individual script buffer back to the pool so they can be reused
// for future deserializations. This is done because it significantly
// for future deserializations. This is done because it significantly
// reduces the number of allocations the garbage collector needs to
// track, which in turn improves performance and drastically reduces the
// amount of runtime overhead that would otherwise be needed to keep
@@ -594,18 +576,18 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error {
for i := 0; i < len(msg.TxOut); i++ {
// Copy the public key script into the contiguous buffer at the
// appropriate offset.
pkScript := msg.TxOut[i].PkScript
copy(scripts[offset:], pkScript)
scriptPubKey := msg.TxOut[i].ScriptPubKey
copy(scripts[offset:], scriptPubKey)
// Reset the public key script of the transaction output to the
// slice of the contiguous buffer where the script lives.
scriptSize := uint64(len(pkScript))
scriptSize := uint64(len(scriptPubKey))
end := offset + scriptSize
msg.TxOut[i].PkScript = scripts[offset:end:end]
msg.TxOut[i].ScriptPubKey = scripts[offset:end:end]
offset += scriptSize
// Return the temporary script buffer to the pool.
scriptPool.Return(pkScript)
scriptPool.Return(scriptPubKey)
}
return nil
@@ -613,26 +595,26 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error {
// Deserialize decodes a transaction from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field in the transaction. This function differs from BtcDecode
// in that BtcDecode decodes from the bitcoin wire protocol as it was sent
// across the network. The wire encoding can technically differ depending on
// the Version field in the transaction. This function differs from KaspaDecode
// in that KaspaDecode decodes from the kaspa appmessage protocol as it was sent
// across the network. The appmessage encoding can technically differ depending on
// the protocol version and doesn't even really need to match the format of a
// stored transaction at all. As of the time this comment was written, the
// stored transaction at all. As of the time this comment was written, the
// encoded transaction is the same in both instances, but there is a distinct
// difference and separating the two allows the API to be flexible enough to
// deal with changes.
func (msg *MsgTx) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of BtcDecode.
return msg.BtcDecode(r, 0)
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaDecode.
return msg.KaspaDecode(r, 0)
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding transactions to be stored to disk, such as in a
// database, as opposed to encoding transactions for the wire.
func (msg *MsgTx) BtcEncode(w io.Writer, pver uint32) error {
// database, as opposed to encoding transactions for the appmessage.
func (msg *MsgTx) KaspaEncode(w io.Writer, pver uint32) error {
return msg.encode(w, pver, txEncodingFull)
}
@@ -679,9 +661,9 @@ func (msg *MsgTx) encode(w io.Writer, pver uint32, encodingFlags txEncoding) err
}
if !msg.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) {
if msg.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDRegistry) && msg.Gas != 0 {
str := "Transactions from registry subnetwork should have 0 gas"
return messageError("MsgTx.BtcEncode", str)
if msg.SubnetworkID.IsBuiltIn() && msg.Gas != 0 {
str := "Transactions from built-in should have 0 gas"
return messageError("MsgTx.KaspaEncode", str)
}
err = binaryserializer.PutUint64(w, littleEndian, msg.Gas)
@@ -705,13 +687,13 @@ func (msg *MsgTx) encode(w io.Writer, pver uint32, encodingFlags txEncoding) err
}
} else if msg.Payload != nil {
str := "Transactions from native subnetwork should have <nil> payload"
return messageError("MsgTx.BtcEncode", str)
return messageError("MsgTx.KaspaEncode", str)
} else if msg.PayloadHash != nil {
str := "Transactions from native subnetwork should have <nil> payload hash"
return messageError("MsgTx.BtcEncode", str)
return messageError("MsgTx.KaspaEncode", str)
} else if msg.Gas != 0 {
str := "Transactions from native subnetwork should have 0 gas"
return messageError("MsgTx.BtcEncode", str)
return messageError("MsgTx.KaspaEncode", str)
}
return nil
@@ -719,24 +701,24 @@ func (msg *MsgTx) encode(w io.Writer, pver uint32, encodingFlags txEncoding) err
// Serialize encodes the transaction to w using a format that suitable for
// long-term storage such as a database while respecting the Version field in
// the transaction. This function differs from BtcEncode in that BtcEncode
// encodes the transaction to the bitcoin wire protocol in order to be sent
// across the network. The wire encoding can technically differ depending on
// the transaction. This function differs from KaspaEncode in that KaspaEncode
// encodes the transaction to the kaspa appmessage protocol in order to be sent
// across the network. The appmessage encoding can technically differ depending on
// the protocol version and doesn't even really need to match the format of a
// stored transaction at all. As of the time this comment was written, the
// stored transaction at all. As of the time this comment was written, the
// encoded transaction is the same in both instances, but there is a distinct
// difference and separating the two allows the API to be flexible enough to
// deal with changes.
func (msg *MsgTx) Serialize(w io.Writer) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of BtcEncode.
return msg.BtcEncode(w, 0)
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaEncode.
return msg.KaspaEncode(w, 0)
}
func (msg *MsgTx) serialize(w io.Writer, encodingFlags txEncoding) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of `encode`.
return msg.encode(w, 0, encodingFlags)
}
@@ -783,23 +765,23 @@ func (msg *MsgTx) serializeSize(encodingFlags txEncoding) int {
return n
}
// Command returns the protocol command string for the message. This is part
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgTx) Command() string {
func (msg *MsgTx) Command() MessageCommand {
return CmdTx
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver. This is part of the Message interface implementation.
// receiver. This is part of the Message interface implementation.
func (msg *MsgTx) MaxPayloadLength(pver uint32) uint32 {
return MaxBlockPayload
return MaxMessagePayload
}
// PkScriptLocs returns a slice containing the start of each public key script
// within the raw serialized transaction. The caller can easily obtain the
// ScriptPubKeyLocs returns a slice containing the start of each public key script
// within the raw serialized transaction. The caller can easily obtain the
// length of each script by using len on the script available via the
// appropriate transaction output entry.
func (msg *MsgTx) PkScriptLocs() []int {
func (msg *MsgTx) ScriptPubKeyLocs() []int {
numTxOut := len(msg.TxOut)
if numTxOut == 0 {
return nil
@@ -819,18 +801,18 @@ func (msg *MsgTx) PkScriptLocs() []int {
}
// Calculate and set the appropriate offset for each public key script.
pkScriptLocs := make([]int, numTxOut)
scriptPubKeyLocs := make([]int, numTxOut)
for i, txOut := range msg.TxOut {
// The offset of the script in the transaction output is:
//
// Value 8 bytes + serialized varint size for the length of
// PkScript.
n += 8 + VarIntSerializeSize(uint64(len(txOut.PkScript)))
pkScriptLocs[i] = n
n += len(txOut.PkScript)
// ScriptPubKey.
n += 8 + VarIntSerializeSize(uint64(len(txOut.ScriptPubKey)))
scriptPubKeyLocs[i] = n
n += len(txOut.ScriptPubKey)
}
return pkScriptLocs
return scriptPubKeyLocs
}
// IsSubnetworkCompatible return true iff subnetworkID is one or more of the following:
@@ -907,8 +889,8 @@ func NewRegistryMsgTx(version int32, txIn []*TxIn, txOut []*TxOut, gasLimit uint
return NewSubnetworkMsgTx(version, txIn, txOut, subnetworkid.SubnetworkIDRegistry, 0, payload)
}
// readOutPoint reads the next sequence of bytes from r as an OutPoint.
func readOutPoint(r io.Reader, pver uint32, version int32, op *OutPoint) error {
// readOutpoint reads the next sequence of bytes from r as an Outpoint.
func readOutpoint(r io.Reader, pver uint32, version int32, op *Outpoint) error {
_, err := io.ReadFull(r, op.TxID[:])
if err != nil {
return err
@@ -918,9 +900,9 @@ func readOutPoint(r io.Reader, pver uint32, version int32, op *OutPoint) error {
return err
}
// writeOutPoint encodes op to the bitcoin protocol encoding for an OutPoint
// writeOutpoint encodes op to the kaspa protocol encoding for an Outpoint
// to w.
func writeOutPoint(w io.Writer, pver uint32, version int32, op *OutPoint) error {
func writeOutpoint(w io.Writer, pver uint32, version int32, op *Outpoint) error {
_, err := w.Write(op.TxID[:])
if err != nil {
return err
@@ -930,10 +912,10 @@ func writeOutPoint(w io.Writer, pver uint32, version int32, op *OutPoint) error
}
// readScript reads a variable length byte array that represents a transaction
// script. It is encoded as a varInt containing the length of the array
// followed by the bytes themselves. An error is returned if the length is
// script. It is encoded as a varInt containing the length of the array
// followed by the bytes themselves. An error is returned if the length is
// greater than the passed maxAllowed parameter which helps protect against
// memory exhaustion attacks and forced panics through malformed messages. The
// memory exhaustion attacks and forced panics through malformed messages. The
// fieldName parameter is only used for the error message so it provides more
// context in the error.
func readScript(r io.Reader, pver uint32, maxAllowed uint32, fieldName string) ([]byte, error) {
@@ -942,7 +924,7 @@ func readScript(r io.Reader, pver uint32, maxAllowed uint32, fieldName string) (
return nil, err
}
// Prevent byte array larger than the max message size. It would
// Prevent byte array larger than the max message size. It would
// be possible to cause memory exhaustion and panics without a sane
// upper bound on this count.
if count > uint64(maxAllowed) {
@@ -963,7 +945,7 @@ func readScript(r io.Reader, pver uint32, maxAllowed uint32, fieldName string) (
// readTxIn reads the next sequence of bytes from r as a transaction input
// (TxIn).
func readTxIn(r io.Reader, pver uint32, version int32, ti *TxIn) error {
err := readOutPoint(r, pver, version, &ti.PreviousOutPoint)
err := readOutpoint(r, pver, version, &ti.PreviousOutpoint)
if err != nil {
return err
}
@@ -977,10 +959,10 @@ func readTxIn(r io.Reader, pver uint32, version int32, ti *TxIn) error {
return ReadElement(r, &ti.Sequence)
}
// writeTxIn encodes ti to the bitcoin protocol encoding for a transaction
// writeTxIn encodes ti to the kaspa protocol encoding for a transaction
// input (TxIn) to w.
func writeTxIn(w io.Writer, pver uint32, version int32, ti *TxIn, encodingFlags txEncoding) error {
err := writeOutPoint(w, pver, version, &ti.PreviousOutPoint)
err := writeOutpoint(w, pver, version, &ti.PreviousOutpoint)
if err != nil {
return err
}
@@ -1005,12 +987,12 @@ func readTxOut(r io.Reader, pver uint32, version int32, to *TxOut) error {
return err
}
to.PkScript, err = readScript(r, pver, MaxMessagePayload,
to.ScriptPubKey, err = readScript(r, pver, MaxMessagePayload,
"transaction output public key script")
return err
}
// WriteTxOut encodes to into the bitcoin protocol encoding for a transaction
// WriteTxOut encodes to into the kaspa protocol encoding for a transaction
// output (TxOut) to w.
func WriteTxOut(w io.Writer, pver uint32, version int32, to *TxOut) error {
err := binaryserializer.PutUint64(w, littleEndian, uint64(to.Value))
@@ -1018,5 +1000,5 @@ func WriteTxOut(w io.Writer, pver uint32, version int32, to *TxOut) error {
return err
}
return WriteVarBytes(w, pver, to.PkScript)
return WriteVarBytes(w, pver, to.ScriptPubKey)
}

View File

@@ -2,20 +2,21 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"io"
"math"
"reflect"
"testing"
"unsafe"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// TestTx tests the MsgTx API.
@@ -29,15 +30,15 @@ func TestTx(t *testing.T) {
}
// Ensure the command is expected value.
wantCmd := "tx"
wantCmd := MessageCommand(6)
msg := NewNativeMsgTx(1, nil, nil)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgAddr: wrong command - got %v want %v",
t.Errorf("NewMsgAddresses: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure max payload is expected value for latest protocol version.
wantPayload := uint32(1000 * 1000)
wantPayload := uint32(1024 * 1024 * 32)
maxPayload := msg.MaxPayloadLength(pver)
if maxPayload != wantPayload {
t.Errorf("MaxPayloadLength: wrong max payload length for "+
@@ -45,31 +46,31 @@ func TestTx(t *testing.T) {
maxPayload, wantPayload)
}
// Ensure we get the same transaction output point data back out.
// Ensure we get the same transaction outpoint data back out.
// NOTE: This is a block hash and made up index, but we're only
// testing package functionality.
prevOutIndex := uint32(1)
prevOut := NewOutPoint(txID, prevOutIndex)
prevOut := NewOutpoint(txID, prevOutIndex)
if !prevOut.TxID.IsEqual(txID) {
t.Errorf("NewOutPoint: wrong ID - got %v, want %v",
t.Errorf("NewOutpoint: wrong ID - got %v, want %v",
spew.Sprint(&prevOut.TxID), spew.Sprint(txID))
}
if prevOut.Index != prevOutIndex {
t.Errorf("NewOutPoint: wrong index - got %v, want %v",
t.Errorf("NewOutpoint: wrong index - got %v, want %v",
prevOut.Index, prevOutIndex)
}
prevOutStr := fmt.Sprintf("%s:%d", txID.String(), prevOutIndex)
if s := prevOut.String(); s != prevOutStr {
t.Errorf("OutPoint.String: unexpected result - got %v, "+
t.Errorf("Outpoint.String: unexpected result - got %v, "+
"want %v", s, prevOutStr)
}
// Ensure we get the same transaction input back out.
sigScript := []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}
txIn := NewTxIn(prevOut, sigScript)
if !reflect.DeepEqual(&txIn.PreviousOutPoint, prevOut) {
if !reflect.DeepEqual(&txIn.PreviousOutpoint, prevOut) {
t.Errorf("NewTxIn: wrong prev outpoint - got %v, want %v",
spew.Sprint(&txIn.PreviousOutPoint),
spew.Sprint(&txIn.PreviousOutpoint),
spew.Sprint(prevOut))
}
if !bytes.Equal(txIn.SignatureScript, sigScript) {
@@ -80,7 +81,7 @@ func TestTx(t *testing.T) {
// Ensure we get the same transaction output back out.
txValue := uint64(5000000000)
pkScript := []byte{
scriptPubKey := []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
@@ -93,16 +94,16 @@ func TestTx(t *testing.T) {
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
}
txOut := NewTxOut(txValue, pkScript)
txOut := NewTxOut(txValue, scriptPubKey)
if txOut.Value != txValue {
t.Errorf("NewTxOut: wrong pk script - got %v, want %v",
t.Errorf("NewTxOut: wrong scriptPubKey - got %v, want %v",
txOut.Value, txValue)
}
if !bytes.Equal(txOut.PkScript, pkScript) {
t.Errorf("NewTxOut: wrong pk script - got %v, want %v",
spew.Sdump(txOut.PkScript),
spew.Sdump(pkScript))
if !bytes.Equal(txOut.ScriptPubKey, scriptPubKey) {
t.Errorf("NewTxOut: wrong scriptPubKey - got %v, want %v",
spew.Sdump(txOut.ScriptPubKey),
spew.Sdump(scriptPubKey))
}
// Ensure transaction inputs are added properly.
@@ -129,25 +130,25 @@ func TestTx(t *testing.T) {
// TestTxHash tests the ability to generate the hash of a transaction accurately.
func TestTxHashAndID(t *testing.T) {
txID1Str := "5b92e6ed52bc78745905e0d104069e46407f62ea8d7d2bce78cd13f80ce220dc"
txID1Str := "edca872f27279674c7a52192b32fd68b8b8be714bfea52d98b2c3c86c30e85c6"
wantTxID1, err := daghash.NewTxIDFromStr(txID1Str)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
t.Errorf("NewTxIDFromStr: %v", err)
return
}
// First transaction from block 113875.
// A coinbase transaction
txIn := &TxIn{
PreviousOutPoint: OutPoint{
PreviousOutpoint: Outpoint{
TxID: daghash.TxID{},
Index: 0xffffffff,
Index: math.MaxUint32,
},
SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62},
Sequence: math.MaxUint64,
}
txOut := &TxOut{
Value: 5000000000,
PkScript: []byte{
ScriptPubKey: []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
@@ -161,7 +162,7 @@ func TestTxHashAndID(t *testing.T) {
0xac, // OP_CHECKSIG
},
}
tx1 := NewNativeMsgTx(1, []*TxIn{txIn}, []*TxOut{txOut})
tx1 := NewSubnetworkMsgTx(1, []*TxIn{txIn}, []*TxOut{txOut}, subnetworkid.SubnetworkIDCoinbase, 0, nil)
// Ensure the hash produced is expected.
tx1Hash := tx1.TxHash()
@@ -180,19 +181,19 @@ func TestTxHashAndID(t *testing.T) {
hash2Str := "b11924b7eeffea821522222576c53dc5b8ddd97602f81e5e124d2626646d74ca"
wantHash2, err := daghash.NewHashFromStr(hash2Str)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
t.Errorf("NewTxIDFromStr: %v", err)
return
}
id2Str := "750499ae9e6d44961ef8bad8af27a44dd4bcbea166b71baf181e8d3997e1ff72"
wantID2, err := daghash.NewTxIDFromStr(id2Str)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
t.Errorf("NewTxIDFromStr: %v", err)
return
}
payload := []byte{1, 2, 3}
txIns := []*TxIn{&TxIn{
PreviousOutPoint: OutPoint{
txIns := []*TxIn{{
PreviousOutpoint: Outpoint{
Index: 0,
TxID: daghash.TxID{1, 2, 3},
},
@@ -212,14 +213,14 @@ func TestTxHashAndID(t *testing.T) {
txOuts := []*TxOut{
{
Value: 244623243,
PkScript: []byte{
ScriptPubKey: []byte{
0x76, 0xA9, 0x14, 0xBA, 0xDE, 0xEC, 0xFD, 0xEF, 0x05, 0x07, 0x24, 0x7F, 0xC8, 0xF7, 0x42, 0x41,
0xD7, 0x3B, 0xC0, 0x39, 0x97, 0x2D, 0x7B, 0x88, 0xAC,
},
},
{
Value: 44602432,
PkScript: []byte{
ScriptPubKey: []byte{
0x76, 0xA9, 0x14, 0xC1, 0x09, 0x32, 0x48, 0x3F, 0xEC, 0x93, 0xED, 0x51, 0xF5, 0xFE, 0x95, 0xE7,
0x25, 0x59, 0xF2, 0xCC, 0x70, 0x43, 0xF9, 0x88, 0xAC,
},
@@ -245,17 +246,16 @@ func TestTxHashAndID(t *testing.T) {
t.Errorf("tx2ID and tx2Hash shouldn't be the same for non-coinbase transaction with signature and/or payload")
}
tx2.Payload = []byte{}
tx2.TxIn[0].SignatureScript = []byte{}
newTx2Hash := tx2.TxHash()
if !tx2ID.IsEqual((*daghash.TxID)(newTx2Hash)) {
t.Errorf("tx2ID and newTx2Hash should be the same for transaction without empty signature and payload")
t.Errorf("tx2ID and newTx2Hash should be the same for transaction with an empty signature")
}
}
// TestTxWire tests the MsgTx wire encode and decode for various numbers
// TestTxEncoding tests the MsgTx appmessage encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestTxWire(t *testing.T) {
func TestTxEncoding(t *testing.T) {
// Empty tx message.
noTx := NewNativeMsgTx(1, nil, nil)
noTxEncoded := []byte{
@@ -271,8 +271,8 @@ func TestTxWire(t *testing.T) {
tests := []struct {
in *MsgTx // Message to encode
out *MsgTx // Expected decoded message
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version with no transactions.
{
@@ -293,44 +293,44 @@ func TestTxWire(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to wire format.
// Encode the message to appmessage format.
var buf bytes.Buffer
err := test.in.BtcEncode(&buf, test.pver)
err := test.in.KaspaEncode(&buf, test.pver)
if err != nil {
t.Errorf("BtcEncode #%d error %v", i, err)
t.Errorf("KaspaEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("BtcEncode #%d\n got: %s want: %s", i,
t.Errorf("KaspaEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the message from wire format.
// Decode the message from appmessage format.
var msg MsgTx
rbuf := bytes.NewReader(test.buf)
err = msg.BtcDecode(rbuf, test.pver)
err = msg.KaspaDecode(rbuf, test.pver)
if err != nil {
t.Errorf("BtcDecode #%d error %v", i, err)
t.Errorf("KaspaDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&msg, test.out) {
t.Errorf("BtcDecode #%d\n got: %s want: %s", i,
t.Errorf("KaspaDecode #%d\n got: %s want: %s", i,
spew.Sdump(&msg), spew.Sdump(test.out))
continue
}
}
}
// TestTxWireErrors performs negative tests against wire encode and decode
// TestTxEncodingErrors performs negative tests against appmessage encode and decode
// of MsgTx to confirm error paths work correctly.
func TestTxWireErrors(t *testing.T) {
func TestTxEncodingErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
in *MsgTx // Value to encode
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
@@ -353,9 +353,9 @@ func TestTxWireErrors(t *testing.T) {
{multiTx, multiTxEncoded, pver, 57, io.ErrShortWrite, io.EOF},
// Force error in transaction output value.
{multiTx, multiTxEncoded, pver, 58, io.ErrShortWrite, io.EOF},
// Force error in transaction output pk script length.
// Force error in transaction output scriptPubKey length.
{multiTx, multiTxEncoded, pver, 66, io.ErrShortWrite, io.EOF},
// Force error in transaction output pk script.
// Force error in transaction output scriptPubKey.
{multiTx, multiTxEncoded, pver, 67, io.ErrShortWrite, io.EOF},
// Force error in transaction output lock time.
{multiTx, multiTxEncoded, pver, 210, io.ErrShortWrite, io.EOF},
@@ -363,21 +363,21 @@ func TestTxWireErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := test.in.BtcEncode(w, test.pver)
if err != test.writeErr {
t.Errorf("BtcEncode #%d wrong error got: %v, want: %v",
err := test.in.KaspaEncode(w, test.pver)
if !errors.Is(err, test.writeErr) {
t.Errorf("KaspaEncode #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
// Decode from appmessage format.
var msg MsgTx
r := newFixedReader(test.max, test.buf)
err = msg.BtcDecode(r, test.pver)
if err != test.readErr {
t.Errorf("BtcDecode #%d wrong error got: %v, want: %v",
err = msg.KaspaDecode(r, test.pver)
if !errors.Is(err, test.readErr) {
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
@@ -403,7 +403,7 @@ func TestTxSerialize(t *testing.T) {
0x00, // Varint for number of input transactions
0x00, // Varint for number of output transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // Sub Network ID
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Gas
@@ -435,11 +435,11 @@ func TestTxSerialize(t *testing.T) {
}
tests := []struct {
name string
in *MsgTx // Message to encode
out *MsgTx // Expected decoded message
buf []byte // Serialized data
pkScriptLocs []int // Expected output script locations
name string
in *MsgTx // Message to encode
out *MsgTx // Expected decoded message
buf []byte // Serialized data
scriptPubKeyLocs []int // Expected output script locations
}{
// No transactions.
{
@@ -474,7 +474,7 @@ func TestTxSerialize(t *testing.T) {
multiTx,
multiTx,
multiTxEncoded,
multiTxPkScriptLocs,
multiTxScriptPubKeyLocs,
},
}
@@ -508,27 +508,27 @@ func TestTxSerialize(t *testing.T) {
}
// Ensure the public key script locations are accurate.
pkScriptLocs := test.in.PkScriptLocs()
if !reflect.DeepEqual(pkScriptLocs, test.pkScriptLocs) {
t.Errorf("PkScriptLocs #%d\n got: %s want: %s", i,
spew.Sdump(pkScriptLocs),
spew.Sdump(test.pkScriptLocs))
scriptPubKeyLocs := test.in.ScriptPubKeyLocs()
if !reflect.DeepEqual(scriptPubKeyLocs, test.scriptPubKeyLocs) {
t.Errorf("ScriptPubKeyLocs #%d\n got: %s want: %s", i,
spew.Sdump(scriptPubKeyLocs),
spew.Sdump(test.scriptPubKeyLocs))
continue
}
for j, loc := range pkScriptLocs {
wantPkScript := test.in.TxOut[j].PkScript
gotPkScript := test.buf[loc : loc+len(wantPkScript)]
if !bytes.Equal(gotPkScript, wantPkScript) {
t.Errorf("PkScriptLocs #%d:%d\n unexpected "+
for j, loc := range scriptPubKeyLocs {
wantScriptPubKey := test.in.TxOut[j].ScriptPubKey
gotScriptPubKey := test.buf[loc : loc+len(wantScriptPubKey)]
if !bytes.Equal(gotScriptPubKey, wantScriptPubKey) {
t.Errorf("ScriptPubKeyLocs #%d:%d\n unexpected "+
"script got: %s want: %s", i, j,
spew.Sdump(gotPkScript),
spew.Sdump(wantPkScript))
spew.Sdump(gotScriptPubKey),
spew.Sdump(wantScriptPubKey))
}
}
}
}
// TestTxSerializeErrors performs negative tests against wire encode and decode
// TestTxSerializeErrors performs negative tests against appmessage encode and decode
// of MsgTx to confirm error paths work correctly.
func TestTxSerializeErrors(t *testing.T) {
tests := []struct {
@@ -556,9 +556,9 @@ func TestTxSerializeErrors(t *testing.T) {
{multiTx, multiTxEncoded, 57, io.ErrShortWrite, io.EOF},
// Force error in transaction output value.
{multiTx, multiTxEncoded, 58, io.ErrShortWrite, io.EOF},
// Force error in transaction output pk script length.
// Force error in transaction output scriptPubKey length.
{multiTx, multiTxEncoded, 66, io.ErrShortWrite, io.EOF},
// Force error in transaction output pk script.
// Force error in transaction output scriptPubKey.
{multiTx, multiTxEncoded, 67, io.ErrShortWrite, io.EOF},
// Force error in transaction output lock time.
{multiTx, multiTxEncoded, 210, io.ErrShortWrite, io.EOF},
@@ -569,7 +569,7 @@ func TestTxSerializeErrors(t *testing.T) {
// Serialize the transaction.
w := newFixedWriter(test.max)
err := test.in.Serialize(w)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("Serialize #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
@@ -579,7 +579,7 @@ func TestTxSerializeErrors(t *testing.T) {
var tx MsgTx
r := newFixedReader(test.max, test.buf)
err = tx.Deserialize(r)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -590,8 +590,8 @@ func TestTxSerializeErrors(t *testing.T) {
w := bytes.NewBuffer(make([]byte, 0, registryTx.SerializeSize()))
err := registryTx.Serialize(w)
str := "Transactions from registry subnetwork should have 0 gas"
expectedErr := messageError("MsgTx.BtcEncode", str)
str := "Transactions from built-in should have 0 gas"
expectedErr := messageError("MsgTx.KaspaEncode", str)
if err == nil || err.Error() != expectedErr.Error() {
t.Errorf("TestTxSerializeErrors: expected error %v but got %v", expectedErr, err)
}
@@ -601,7 +601,7 @@ func TestTxSerializeErrors(t *testing.T) {
err = nativeTx.Serialize(w)
str = "Transactions from native subnetwork should have 0 gas"
expectedErr = messageError("MsgTx.BtcEncode", str)
expectedErr = messageError("MsgTx.KaspaEncode", str)
if err == nil || err.Error() != expectedErr.Error() {
t.Errorf("TestTxSerializeErrors: expected error %v but got %v", expectedErr, err)
}
@@ -613,7 +613,7 @@ func TestTxSerializeErrors(t *testing.T) {
err = nativeTx.Serialize(w)
str = "Transactions from native subnetwork should have <nil> payload"
expectedErr = messageError("MsgTx.BtcEncode", str)
expectedErr = messageError("MsgTx.KaspaEncode", str)
if err == nil || err.Error() != expectedErr.Error() {
t.Errorf("TestTxSerializeErrors: expected error %v but got %v", expectedErr, err)
}
@@ -621,15 +621,15 @@ func TestTxSerializeErrors(t *testing.T) {
// TestTxOverflowErrors performs tests to ensure deserializing transactions
// which are intentionally crafted to use large values for the variable number
// of inputs and outputs are handled properly. This could otherwise potentially
// of inputs and outputs are handled properly. This could otherwise potentially
// be used as an attack vector.
func TestTxOverflowErrors(t *testing.T) {
pver := ProtocolVersion
txVer := uint32(1)
tests := []struct {
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
version uint32 // Transaction version
err error // Expected error
}{
@@ -691,17 +691,17 @@ func TestTxOverflowErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from wire format.
// Decode from appmessage format.
var msg MsgTx
r := bytes.NewReader(test.buf)
err := msg.BtcDecode(r, test.pver)
err := msg.KaspaDecode(r, test.pver)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("BtcDecode #%d wrong error got: %v, want: %v",
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
// Decode from wire format.
// Decode from appmessage format.
r = bytes.NewReader(test.buf)
err = msg.Deserialize(r)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
@@ -845,7 +845,7 @@ func underlyingArrayAddress(buf []byte) uint64 {
// multiTx is a MsgTx with an input and output and used in various tests.
var multiTxIns = []*TxIn{
{
PreviousOutPoint: OutPoint{
PreviousOutpoint: Outpoint{
TxID: daghash.TxID{},
Index: 0xffffffff,
},
@@ -858,7 +858,7 @@ var multiTxIns = []*TxIn{
var multiTxOuts = []*TxOut{
{
Value: 0x12a05f200,
PkScript: []byte{
ScriptPubKey: []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
@@ -874,7 +874,7 @@ var multiTxOuts = []*TxOut{
},
{
Value: 0x5f5e100,
PkScript: []byte{
ScriptPubKey: []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
@@ -891,7 +891,7 @@ var multiTxOuts = []*TxOut{
}
var multiTx = NewNativeMsgTx(1, multiTxIns, multiTxOuts)
// multiTxEncoded is the wire encoded bytes for multiTx using protocol version
// multiTxEncoded is the appmessage encoded bytes for multiTx using protocol version
// 60002 and is used in the various tests.
var multiTxEncoded = []byte{
0x01, 0x00, 0x00, 0x00, // Version
@@ -906,7 +906,7 @@ var multiTxEncoded = []byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Sequence
0x02, // Varint for number of output transactions
0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of pk script
0x43, // Varint for length of scriptPubKey
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
@@ -919,7 +919,7 @@ var multiTxEncoded = []byte{
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
0x00, 0xe1, 0xf5, 0x05, 0x00, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of pk script
0x43, // Varint for length of scriptPubKey
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
@@ -937,6 +937,6 @@ var multiTxEncoded = []byte{
0x00, 0x00, 0x00, 0x00, // Sub Network ID
}
// multiTxPkScriptLocs is the location information for the public key scripts
// multiTxScriptPubKeyLocs is the location information for the public key scripts
// located in multiTx.
var multiTxPkScriptLocs = []int{67, 143}
var multiTxScriptPubKeyLocs = []int{67, 143}

View File

@@ -0,0 +1,26 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
// MsgVerAck defines a kaspa verack message which is used for a peer to
// acknowledge a version message (MsgVersion) after it has used the information
// to negotiate parameters. It implements the Message interface.
//
// This message has no payload.
type MsgVerAck struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgVerAck) Command() MessageCommand {
return CmdVerAck
}
// NewMsgVerAck returns a new kaspa verack message that conforms to the
// Message interface.
func NewMsgVerAck() *MsgVerAck {
return &MsgVerAck{}
}

View File

@@ -0,0 +1,20 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
)
// TestVerAck tests the MsgVerAck API.
func TestVerAck(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(1)
msg := NewMsgVerAck()
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgVerAck: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,130 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"fmt"
"github.com/kaspanet/kaspad/version"
"strings"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// MaxUserAgentLen is the maximum allowed length for the user agent field in a
// version message (MsgVersion).
const MaxUserAgentLen = 256
// DefaultUserAgent for appmessage in the stack
var DefaultUserAgent = fmt.Sprintf("/kaspad:%s/", version.Version())
// MsgVersion implements the Message interface and represents a kaspa version
// message. It is used for a peer to advertise itself as soon as an outbound
// connection is made. The remote peer then uses this information along with
// its own to negotiate. The remote peer must then respond with a version
// message of its own containing the negotiated values followed by a verack
// message (MsgVerAck). This exchange must take place before any further
// communication is allowed to proceed.
type MsgVersion struct {
baseMessage
// Version of the protocol the node is using.
ProtocolVersion uint32
// The peer's network (mainnet, testnet, etc.)
Network string
// Bitfield which identifies the enabled services.
Services ServiceFlag
// Time the message was generated. This is encoded as an int64 on the appmessage.
Timestamp mstime.Time
// Address of the local peer.
Address *NetAddress
// The peer unique ID
ID *id.ID
// The user agent that generated messsage. This is a encoded as a varString
// on the appmessage. This has a max length of MaxUserAgentLen.
UserAgent string
// The selected tip hash of the generator of the version message.
SelectedTipHash *daghash.Hash
// Don't announce transactions to peer.
DisableRelayTx bool
// The subnetwork of the generator of the version message. Should be nil in full nodes
SubnetworkID *subnetworkid.SubnetworkID
}
// HasService returns whether the specified service is supported by the peer
// that generated the message.
func (msg *MsgVersion) HasService(service ServiceFlag) bool {
return msg.Services&service == service
}
// AddService adds service as a supported service by the peer generating the
// message.
func (msg *MsgVersion) AddService(service ServiceFlag) {
msg.Services |= service
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgVersion) Command() MessageCommand {
return CmdVersion
}
// NewMsgVersion returns a new kaspa version message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
selectedTipHash *daghash.Hash, subnetworkID *subnetworkid.SubnetworkID) *MsgVersion {
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
return &MsgVersion{
ProtocolVersion: ProtocolVersion,
Network: network,
Services: 0,
Timestamp: mstime.Now(),
Address: addr,
ID: id,
UserAgent: DefaultUserAgent,
SelectedTipHash: selectedTipHash,
DisableRelayTx: false,
SubnetworkID: subnetworkID,
}
}
// ValidateUserAgent checks userAgent length against MaxUserAgentLen
func ValidateUserAgent(userAgent string) error {
if len(userAgent) > MaxUserAgentLen {
str := fmt.Sprintf("user agent too long [len %d, max %d]",
len(userAgent), MaxUserAgentLen)
return messageError("MsgVersion", str)
}
return nil
}
// AddUserAgent adds a user agent to the user agent string for the version
// message. The version string is not defined to any strict format, although
// it is recommended to use the form "major.minor.revision" e.g. "2.6.41".
func (msg *MsgVersion) AddUserAgent(name string, version string,
comments ...string) {
newUserAgent := fmt.Sprintf("%s:%s", name, version)
if len(comments) != 0 {
newUserAgent = fmt.Sprintf("%s(%s)", newUserAgent,
strings.Join(comments, "; "))
}
newUserAgent = fmt.Sprintf("%s%s/", msg.UserAgent, newUserAgent)
msg.UserAgent = newUserAgent
}

View File

@@ -0,0 +1,96 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/daghash"
"net"
"reflect"
"testing"
)
// TestVersion tests the MsgVersion API.
func TestVersion(t *testing.T) {
pver := ProtocolVersion
// Create version message data.
selectedTipHash := &daghash.Hash{12, 34}
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
me := NewNetAddress(tcpAddrMe, SFNodeNetwork)
generatedID, err := id.GenerateID()
if err != nil {
t.Fatalf("id.GenerateID: %s", err)
}
// Ensure we get the correct data back out.
msg := NewMsgVersion(me, generatedID, "mainnet", selectedTipHash, nil)
if msg.ProtocolVersion != pver {
t.Errorf("NewMsgVersion: wrong protocol version - got %v, want %v",
msg.ProtocolVersion, pver)
}
if !reflect.DeepEqual(msg.Address, me) {
t.Errorf("NewMsgVersion: wrong me address - got %v, want %v",
spew.Sdump(&msg.Address), spew.Sdump(me))
}
if msg.ID.String() != generatedID.String() {
t.Errorf("NewMsgVersion: wrong nonce - got %s, want %s",
msg.ID, generatedID)
}
if msg.UserAgent != DefaultUserAgent {
t.Errorf("NewMsgVersion: wrong user agent - got %v, want %v",
msg.UserAgent, DefaultUserAgent)
}
if !msg.SelectedTipHash.IsEqual(selectedTipHash) {
t.Errorf("NewMsgVersion: wrong selected tip hash - got %s, want %s",
msg.SelectedTipHash, selectedTipHash)
}
if msg.DisableRelayTx {
t.Errorf("NewMsgVersion: disable relay tx is not false by "+
"default - got %v, want %v", msg.DisableRelayTx, false)
}
msg.AddUserAgent("myclient", "1.2.3", "optional", "comments")
customUserAgent := DefaultUserAgent + "myclient:1.2.3(optional; comments)/"
if msg.UserAgent != customUserAgent {
t.Errorf("AddUserAgent: wrong user agent - got %s, want %s",
msg.UserAgent, customUserAgent)
}
msg.AddUserAgent("mygui", "3.4.5")
customUserAgent += "mygui:3.4.5/"
if msg.UserAgent != customUserAgent {
t.Errorf("AddUserAgent: wrong user agent - got %s, want %s",
msg.UserAgent, customUserAgent)
}
// Version message should not have any services set by default.
if msg.Services != 0 {
t.Errorf("NewMsgVersion: wrong default services - got %v, want %v",
msg.Services, 0)
}
if msg.HasService(SFNodeNetwork) {
t.Errorf("HasService: SFNodeNetwork service is set")
}
// Ensure the command is expected value.
wantCmd := MessageCommand(0)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgVersion: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure adding the full service node flag works.
msg.AddService(SFNodeNetwork)
if msg.Services != SFNodeNetwork {
t.Errorf("AddService: wrong services - got %v, want %v",
msg.Services, SFNodeNetwork)
}
if !msg.HasService(SFNodeNetwork) {
t.Errorf("HasService: SFNodeNetwork service not set")
}
}

View File

@@ -0,0 +1,74 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/util/mstime"
"net"
)
// NetAddress defines information about a peer on the network including the time
// it was last seen, the services it supports, its IP address, and port.
type NetAddress struct {
// Last time the address was seen.
Timestamp mstime.Time
// Bitfield which identifies the services supported by the address.
Services ServiceFlag
// IP address of the peer.
IP net.IP
// Port the peer is using. This is encoded in big endian on the appmessage
// which differs from most everything else.
Port uint16
}
// HasService returns whether the specified service is supported by the address.
func (na *NetAddress) HasService(service ServiceFlag) bool {
return na.Services&service == service
}
// AddService adds service as a supported service by the peer generating the
// message.
func (na *NetAddress) AddService(service ServiceFlag) {
na.Services |= service
}
// TCPAddress converts the NetAddress to *net.TCPAddr
func (na *NetAddress) TCPAddress() *net.TCPAddr {
return &net.TCPAddr{
IP: na.IP,
Port: int(na.Port),
}
}
// NewNetAddressIPPort returns a new NetAddress using the provided IP, port, and
// supported services with defaults for the remaining fields.
func NewNetAddressIPPort(ip net.IP, port uint16, services ServiceFlag) *NetAddress {
return NewNetAddressTimestamp(mstime.Now(), services, ip, port)
}
// NewNetAddressTimestamp returns a new NetAddress using the provided
// timestamp, IP, port, and supported services. The timestamp is rounded to
// single millisecond precision.
func NewNetAddressTimestamp(
timestamp mstime.Time, services ServiceFlag, ip net.IP, port uint16) *NetAddress {
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
na := NetAddress{
Timestamp: timestamp,
Services: services,
IP: ip,
Port: port,
}
return &na
}
// NewNetAddress returns a new NetAddress using the provided TCP address and
// supported services with defaults for the remaining fields.
func NewNetAddress(addr *net.TCPAddr, services ServiceFlag) *NetAddress {
return NewNetAddressIPPort(addr.IP, uint16(addr.Port), services)
}

View File

@@ -0,0 +1,45 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"net"
"testing"
)
// TestNetAddress tests the NetAddress API.
func TestNetAddress(t *testing.T) {
ip := net.ParseIP("127.0.0.1")
port := 16111
// Test NewNetAddress.
na := NewNetAddress(&net.TCPAddr{IP: ip, Port: port}, 0)
// Ensure we get the same ip, port, and services back out.
if !na.IP.Equal(ip) {
t.Errorf("NetNetAddress: wrong ip - got %v, want %v", na.IP, ip)
}
if na.Port != uint16(port) {
t.Errorf("NetNetAddress: wrong port - got %v, want %v", na.Port,
port)
}
if na.Services != 0 {
t.Errorf("NetNetAddress: wrong services - got %v, want %v",
na.Services, 0)
}
if na.HasService(SFNodeNetwork) {
t.Errorf("HasService: SFNodeNetwork service is set")
}
// Ensure adding the full service node flag works.
na.AddService(SFNodeNetwork)
if na.Services != SFNodeNetwork {
t.Errorf("AddService: wrong services - got %v, want %v",
na.Services, SFNodeNetwork)
}
if !na.HasService(SFNodeNetwork) {
t.Errorf("HasService: SFNodeNetwork service not set")
}
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"fmt"
@@ -10,13 +10,16 @@ import (
"strings"
)
// XXX pedro: we will probably need to bump this.
const (
// ProtocolVersion is the latest protocol version this package supports.
ProtocolVersion uint32 = 1
// DefaultServices describes the default services that are supported by
// the server.
DefaultServices = SFNodeNetwork | SFNodeBloom | SFNodeCF
)
// ServiceFlag identifies services supported by a bitcoin peer.
// ServiceFlag identifies services supported by a kaspa peer.
type ServiceFlag uint64
const (
@@ -89,45 +92,41 @@ func (f ServiceFlag) String() string {
return s
}
// BitcoinNet represents which bitcoin network a message belongs to.
type BitcoinNet uint32
// KaspaNet represents which kaspa network a message belongs to.
type KaspaNet uint32
// Constants used to indicate the message bitcoin network. They can also be
// Constants used to indicate the message kaspa network. They can also be
// used to seek to the next message when a stream's state is unknown, but
// this package does not provide that functionality since it's generally a
// better idea to simply disconnect clients that are misbehaving over TCP.
const (
// MainNet represents the main bitcoin network.
MainNet BitcoinNet = 0xd9b4bef9
// Mainnet represents the main kaspa network.
Mainnet KaspaNet = 0x3ddcf71d
// TestNet represents the regression test network.
TestNet BitcoinNet = 0xdab5bffa
// Testnet represents the test network.
Testnet KaspaNet = 0xddb8af8f
// TestNet3 represents the test network (version 3).
TestNet3 BitcoinNet = 0x0709110b
// Simnet represents the simulation test network.
Simnet KaspaNet = 0x374dcf1c
// SimNet represents the simulation test network.
SimNet BitcoinNet = 0x12141c16
// DevNet represents the development test network.
DevNet BitcoinNet = 0x01020304
// Devnet represents the development test network.
Devnet KaspaNet = 0x732d87e1
)
// bnStrings is a map of bitcoin networks back to their constant names for
// bnStrings is a map of kaspa networks back to their constant names for
// pretty printing.
var bnStrings = map[BitcoinNet]string{
MainNet: "MainNet",
TestNet: "TestNet",
TestNet3: "TestNet3",
SimNet: "SimNet",
DevNet: "DevNet",
var bnStrings = map[KaspaNet]string{
Mainnet: "Mainnet",
Testnet: "Testnet",
Simnet: "Simnet",
Devnet: "Devnet",
}
// String returns the BitcoinNet in human-readable form.
func (n BitcoinNet) String() string {
// String returns the KaspaNet in human-readable form.
func (n KaspaNet) String() string {
if s, ok := bnStrings[n]; ok {
return s
}
return fmt.Sprintf("Unknown BitcoinNet (%d)", uint32(n))
return fmt.Sprintf("Unknown KaspaNet (%d)", uint32(n))
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import "testing"
@@ -33,17 +33,16 @@ func TestServiceFlagStringer(t *testing.T) {
}
}
// TestBitcoinNetStringer tests the stringized output for bitcoin net types.
func TestBitcoinNetStringer(t *testing.T) {
// TestKaspaNetStringer tests the stringized output for kaspa net types.
func TestKaspaNetStringer(t *testing.T) {
tests := []struct {
in BitcoinNet
in KaspaNet
want string
}{
{MainNet, "MainNet"},
{TestNet, "TestNet"},
{TestNet3, "TestNet3"},
{SimNet, "SimNet"},
{0xffffffff, "Unknown BitcoinNet (4294967295)"},
{Mainnet, "Mainnet"},
{Testnet, "Testnet"},
{Simnet, "Simnet"},
{0xffffffff, "Unknown KaspaNet (4294967295)"},
}
t.Logf("Running %d tests", len(tests))

14
app/log.go Normal file
View File

@@ -0,0 +1,14 @@
// Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2017 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package app
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.KASD)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -0,0 +1,64 @@
// Copyright (c) 2015-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blocklogger
import (
"github.com/kaspanet/kaspad/util/mstime"
"sync"
"time"
"github.com/kaspanet/kaspad/util"
)
var (
receivedLogBlocks int64
receivedLogTx int64
lastBlockLogTime = mstime.Now()
mtx sync.Mutex
)
// LogBlock logs a new block blue score as an information message
// to show progress to the user. In order to prevent spam, it limits logging to
// one message every 10 seconds with duration and totals included.
func LogBlock(block *util.Block) error {
mtx.Lock()
defer mtx.Unlock()
receivedLogBlocks++
receivedLogTx += int64(len(block.MsgBlock().Transactions))
now := mstime.Now()
duration := now.Sub(lastBlockLogTime)
if duration < time.Second*10 {
return nil
}
// Truncate the duration to 10s of milliseconds.
tDuration := duration.Round(10 * time.Millisecond)
// Log information about new block blue score.
blockStr := "blocks"
if receivedLogBlocks == 1 {
blockStr = "block"
}
txStr := "transactions"
if receivedLogTx == 1 {
txStr = "transaction"
}
blueScore, err := block.BlueScore()
if err != nil {
return err
}
log.Infof("Processed %d %s in the last %s (%d %s, blue score %d, %s)",
receivedLogBlocks, blockStr, tDuration, receivedLogTx,
txStr, blueScore, block.MsgBlock().Header.Timestamp)
receivedLogBlocks = 0
receivedLogTx = 0
lastBlockLogTime = now
return nil
}

View File

@@ -0,0 +1,11 @@
// Copyright (c) 2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blocklogger
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log, _ = logger.Get(logger.SubsystemTags.PROT)

View File

@@ -0,0 +1,14 @@
package common
import (
"time"
"github.com/pkg/errors"
)
// DefaultTimeout is the default duration to wait for enqueuing/dequeuing
// to/from routes.
const DefaultTimeout = 30 * time.Second
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")

View File

@@ -0,0 +1,10 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
)
// AddressManager returns the address manager associated to the flow context.
func (f *FlowContext) AddressManager() *addressmanager.AddressManager {
return f.addressManager
}

View File

@@ -0,0 +1,74 @@
package flowcontext
import (
"sync/atomic"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
)
// OnNewBlock updates the mempool after a new block arrival, and
// relays newly unorphaned transactions and possibly rebroadcast
// manually added transactions when not in IBD.
func (f *FlowContext) OnNewBlock(block *util.Block) error {
transactionsAcceptedToMempool, err := f.txPool.HandleNewBlock(block)
if err != nil {
return err
}
return f.broadcastTransactionsAfterBlockAdded(block, transactionsAcceptedToMempool)
}
func (f *FlowContext) broadcastTransactionsAfterBlockAdded(block *util.Block, transactionsAcceptedToMempool []*util.Tx) error {
f.updateTransactionsToRebroadcast(block)
// Don't relay transactions when in IBD.
if atomic.LoadUint32(&f.isInIBD) != 0 {
return nil
}
var txIDsToRebroadcast []*daghash.TxID
if f.shouldRebroadcastTransactions() {
txIDsToRebroadcast = f.txIDsToRebroadcast()
}
txIDsToBroadcast := make([]*daghash.TxID, len(transactionsAcceptedToMempool)+len(txIDsToRebroadcast))
for i, tx := range transactionsAcceptedToMempool {
txIDsToBroadcast[i] = tx.ID()
}
offset := len(transactionsAcceptedToMempool)
for i, txID := range txIDsToRebroadcast {
txIDsToBroadcast[offset+i] = txID
}
if len(txIDsToBroadcast) == 0 {
return nil
}
if len(txIDsToBroadcast) > appmessage.MaxInvPerTxInvMsg {
txIDsToBroadcast = txIDsToBroadcast[:appmessage.MaxInvPerTxInvMsg]
}
inv := appmessage.NewMsgInvTransaction(txIDsToBroadcast)
return f.Broadcast(inv)
}
// SharedRequestedBlocks returns a *blockrelay.SharedRequestedBlocks for sharing
// data about requested blocks between different peers.
func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks {
return f.sharedRequestedBlocks
}
// AddBlock adds the given block to the DAG and propagates it.
func (f *FlowContext) AddBlock(block *util.Block, flags blockdag.BehaviorFlags) error {
_, _, err := f.DAG().ProcessBlock(block, flags)
if err != nil {
return err
}
err = f.OnNewBlock(block)
if err != nil {
return err
}
return f.Broadcast(appmessage.NewMsgInvBlock(block.Hash()))
}

View File

@@ -0,0 +1,8 @@
package flowcontext
import "github.com/kaspanet/kaspad/infrastructure/config"
// Config returns an instance of *config.Config associated to the flow context.
func (f *FlowContext) Config() *config.Config {
return f.cfg
}

View File

@@ -0,0 +1,8 @@
package flowcontext
import "github.com/kaspanet/kaspad/domain/blockdag"
// DAG returns the DAG associated to the flow context.
func (f *FlowContext) DAG() *blockdag.BlockDAG {
return f.dag
}

View File

@@ -0,0 +1,31 @@
package flowcontext
import (
"errors"
"sync/atomic"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
)
// HandleError handles an error from a flow,
// It sends the error to errChan if isStopping == 0 and increments isStopping
//
// If this is ErrRouteClosed - forward it to errChan
// If this is ProtocolError - logs the error, and forward it to errChan
// Otherwise - panics
func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error) {
isErrRouteClosed := errors.Is(err, router.ErrRouteClosed)
if !isErrRouteClosed {
if protocolErr := &(protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
panic(err)
}
log.Errorf("error from %s: %s", flowName, err)
}
if atomic.AddUint32(isStopping, 1) == 1 {
errChan <- err
}
}

View File

@@ -0,0 +1,63 @@
package flowcontext
import (
"sync"
"time"
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/relaytransactions"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
)
// FlowContext holds state that is relevant to more than one flow or one peer, and allows communication between
// different flows that can be associated to different peers.
type FlowContext struct {
cfg *config.Config
netAdapter *netadapter.NetAdapter
txPool *mempool.TxPool
dag *blockdag.BlockDAG
addressManager *addressmanager.AddressManager
connectionManager *connmanager.ConnectionManager
transactionsToRebroadcastLock sync.Mutex
transactionsToRebroadcast map[daghash.TxID]*util.Tx
lastRebroadcastTime time.Time
sharedRequestedTransactions *relaytransactions.SharedRequestedTransactions
sharedRequestedBlocks *blockrelay.SharedRequestedBlocks
isInIBD uint32
startIBDMutex sync.Mutex
ibdPeer *peerpkg.Peer
peers map[id.ID]*peerpkg.Peer
peersMutex sync.RWMutex
}
// New returns a new instance of FlowContext.
func New(cfg *config.Config, dag *blockdag.BlockDAG, addressManager *addressmanager.AddressManager,
txPool *mempool.TxPool, netAdapter *netadapter.NetAdapter,
connectionManager *connmanager.ConnectionManager) *FlowContext {
return &FlowContext{
cfg: cfg,
netAdapter: netAdapter,
dag: dag,
addressManager: addressManager,
connectionManager: connectionManager,
txPool: txPool,
sharedRequestedTransactions: relaytransactions.NewSharedRequestedTransactions(),
sharedRequestedBlocks: blockrelay.NewSharedRequestedBlocks(),
peers: make(map[id.ID]*peerpkg.Peer),
transactionsToRebroadcast: make(map[daghash.TxID]*util.Tx),
}
}

View File

@@ -0,0 +1,89 @@
package flowcontext
import (
"sync/atomic"
"time"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/blockdag"
)
// StartIBDIfRequired selects a peer and starts IBD against it
// if required
func (f *FlowContext) StartIBDIfRequired() {
f.startIBDMutex.Lock()
defer f.startIBDMutex.Unlock()
if f.IsInIBD() {
return
}
peer := f.selectPeerForIBD(f.dag)
if peer == nil {
spawn("StartIBDIfRequired-requestSelectedTipsIfRequired", f.requestSelectedTipsIfRequired)
return
}
atomic.StoreUint32(&f.isInIBD, 1)
f.ibdPeer = peer
spawn("StartIBDIfRequired-peer.StartIBD", peer.StartIBD)
}
// IsInIBD is true if IBD is currently running
func (f *FlowContext) IsInIBD() bool {
return atomic.LoadUint32(&f.isInIBD) != 0
}
// selectPeerForIBD returns the first peer whose selected tip
// hash is not in our DAG
func (f *FlowContext) selectPeerForIBD(dag *blockdag.BlockDAG) *peerpkg.Peer {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
for _, peer := range f.peers {
peerSelectedTipHash := peer.SelectedTipHash()
if !dag.IsInDAG(peerSelectedTipHash) {
return peer
}
}
return nil
}
func (f *FlowContext) requestSelectedTipsIfRequired() {
if f.isDAGTimeCurrent() {
return
}
f.requestSelectedTips()
}
func (f *FlowContext) isDAGTimeCurrent() bool {
const minDurationToRequestSelectedTips = time.Minute
return f.dag.Now().Sub(f.dag.SelectedTipHeader().Timestamp) > minDurationToRequestSelectedTips
}
func (f *FlowContext) requestSelectedTips() {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
for _, peer := range f.peers {
peer.RequestSelectedTipIfRequired()
}
}
// FinishIBD finishes the current IBD flow and starts a new one if required.
func (f *FlowContext) FinishIBD() {
f.ibdPeer = nil
atomic.StoreUint32(&f.isInIBD, 0)
f.StartIBDIfRequired()
}
// IBDPeer returns the currently active IBD peer.
// Returns nil if we aren't currently in IBD
func (f *FlowContext) IBDPeer() *peerpkg.Peer {
if !f.IsInIBD() {
return nil
}
return f.ibdPeer
}

View File

@@ -0,0 +1,9 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -0,0 +1,74 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/pkg/errors"
)
// NetAdapter returns the net adapter that is associated to the flow context.
func (f *FlowContext) NetAdapter() *netadapter.NetAdapter {
return f.netAdapter
}
// ConnectionManager returns the connection manager that is associated to the flow context.
func (f *FlowContext) ConnectionManager() *connmanager.ConnectionManager {
return f.connectionManager
}
// AddToPeers marks this peer as ready and adds it to the ready peers list.
func (f *FlowContext) AddToPeers(peer *peerpkg.Peer) error {
f.peersMutex.Lock()
defer f.peersMutex.Unlock()
if _, ok := f.peers[*peer.ID()]; ok {
return errors.Wrapf(common.ErrPeerWithSameIDExists, "peer with ID %s already exists", peer.ID())
}
f.peers[*peer.ID()] = peer
return nil
}
// RemoveFromPeers remove this peer from the peers list.
func (f *FlowContext) RemoveFromPeers(peer *peerpkg.Peer) {
f.peersMutex.Lock()
defer f.peersMutex.Unlock()
delete(f.peers, *peer.ID())
}
// readyPeerConnections returns the NetConnections of all the ready peers.
func (f *FlowContext) readyPeerConnections() []*netadapter.NetConnection {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
peerConnections := make([]*netadapter.NetConnection, len(f.peers))
i := 0
for _, peer := range f.peers {
peerConnections[i] = peer.Connection()
i++
}
return peerConnections
}
// Broadcast broadcast the given message to all the ready peers.
func (f *FlowContext) Broadcast(message appmessage.Message) error {
return f.netAdapter.Broadcast(f.readyPeerConnections(), message)
}
// Peers returns the currently active peers
func (f *FlowContext) Peers() []*peerpkg.Peer {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
peers := make([]*peerpkg.Peer, len(f.peers))
i := 0
for _, peer := range f.peers {
peers[i] = peer
i++
}
return peers
}

View File

@@ -0,0 +1,71 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/relaytransactions"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
"time"
)
// AddTransaction adds transaction to the mempool and propagates it.
func (f *FlowContext) AddTransaction(tx *util.Tx) error {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
transactionsAcceptedToMempool, err := f.txPool.ProcessTransaction(tx, false)
if err != nil {
return err
}
if len(transactionsAcceptedToMempool) > 1 {
return errors.New("got more than one accepted transactions when no orphans were allowed")
}
f.transactionsToRebroadcast[*tx.ID()] = tx
inv := appmessage.NewMsgInvTransaction([]*daghash.TxID{tx.ID()})
log.Criticalf("~~~~~ FlowContext.AddTransaction() broadcasting %s", tx.ID())
return f.Broadcast(inv)
}
func (f *FlowContext) updateTransactionsToRebroadcast(block *util.Block) {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
// Note: if the block is red, its transactions won't be rebroadcasted
// anymore, although they are not included in the UTXO set.
// This is probably ok, since red blocks are quite rare.
for _, tx := range block.Transactions() {
delete(f.transactionsToRebroadcast, *tx.ID())
}
}
func (f *FlowContext) shouldRebroadcastTransactions() bool {
const rebroadcastInterval = 30 * time.Second
return time.Since(f.lastRebroadcastTime) > rebroadcastInterval
}
func (f *FlowContext) txIDsToRebroadcast() []*daghash.TxID {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
txIDs := make([]*daghash.TxID, len(f.transactionsToRebroadcast))
i := 0
for _, tx := range f.transactionsToRebroadcast {
txIDs[i] = tx.ID()
i++
}
return txIDs
}
// SharedRequestedTransactions returns a *relaytransactions.SharedRequestedTransactions for sharing
// data about requested transactions between different peers.
func (f *FlowContext) SharedRequestedTransactions() *relaytransactions.SharedRequestedTransactions {
return f.sharedRequestedTransactions
}
// TxPool returns the transaction pool associated to the manager.
func (f *FlowContext) TxPool() *mempool.TxPool {
return f.txPool
}

View File

@@ -0,0 +1,57 @@
package addressexchange
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// ReceiveAddressesContext is the interface for the context needed for the ReceiveAddresses flow.
type ReceiveAddressesContext interface {
Config() *config.Config
AddressManager() *addressmanager.AddressManager
}
// ReceiveAddresses asks a peer for more addresses if needed.
func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
if !context.AddressManager().NeedMoreAddresses() {
return nil
}
subnetworkID := peer.SubnetworkID()
msgGetAddresses := appmessage.NewMsgRequestAddresses(false, subnetworkID)
err := outgoingRoute.Enqueue(msgGetAddresses)
if err != nil {
return err
}
message, err := incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
msgAddresses := message.(*appmessage.MsgAddresses)
if len(msgAddresses.AddrList) > addressmanager.GetAddressesMax {
return protocolerrors.Errorf(true, "address count exceeded %d", addressmanager.GetAddressesMax)
}
if msgAddresses.IncludeAllSubnetworks {
return protocolerrors.Errorf(true, "got unexpected "+
"IncludeAllSubnetworks=true in [%s] command", msgAddresses.Command())
}
if !msgAddresses.SubnetworkID.IsEqual(context.Config().SubnetworkID) && msgAddresses.SubnetworkID != nil {
return protocolerrors.Errorf(false, "only full nodes and %s subnetwork IDs "+
"are allowed in [%s] command, but got subnetwork ID %s",
context.Config().SubnetworkID, msgAddresses.Command(), msgAddresses.SubnetworkID)
}
sourceAddress := peer.Connection().NetAddress()
context.AddressManager().AddAddresses(msgAddresses.AddrList, sourceAddress, msgAddresses.SubnetworkID)
return nil
}

View File

@@ -0,0 +1,52 @@
package addressexchange
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"math/rand"
)
// SendAddressesContext is the interface for the context needed for the SendAddresses flow.
type SendAddressesContext interface {
AddressManager() *addressmanager.AddressManager
}
// SendAddresses sends addresses to a peer that requests it.
func SendAddresses(context SendAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
msgGetAddresses := message.(*appmessage.MsgRequestAddresses)
addresses := context.AddressManager().AddressCache(msgGetAddresses.IncludeAllSubnetworks,
msgGetAddresses.SubnetworkID)
msgAddresses := appmessage.NewMsgAddresses(msgGetAddresses.IncludeAllSubnetworks, msgGetAddresses.SubnetworkID)
err = msgAddresses.AddAddresses(shuffleAddresses(addresses)...)
if err != nil {
return err
}
return outgoingRoute.Enqueue(msgAddresses)
}
// shuffleAddresses randomizes the given addresses sent if there are more than the maximum allowed in one message.
func shuffleAddresses(addresses []*appmessage.NetAddress) []*appmessage.NetAddress {
addressCount := len(addresses)
if addressCount < appmessage.MaxAddressesPerMsg {
return addresses
}
shuffleAddresses := make([]*appmessage.NetAddress, addressCount)
copy(shuffleAddresses, addresses)
rand.Shuffle(addressCount, func(i, j int) {
shuffleAddresses[i], shuffleAddresses[j] = shuffleAddresses[j], shuffleAddresses[i]
})
// Truncate it to the maximum size.
shuffleAddresses = shuffleAddresses[:appmessage.MaxAddressesPerMsg]
return shuffleAddresses
}

View File

@@ -0,0 +1,55 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// RelayBlockRequestsContext is the interface for the context needed for the HandleRelayBlockRequests flow.
type RelayBlockRequestsContext interface {
DAG() *blockdag.BlockDAG
}
// HandleRelayBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
// their corresponding blocks to the requesting peer.
func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
getRelayBlocksMessage := message.(*appmessage.MsgRequestRelayBlocks)
for _, hash := range getRelayBlocksMessage.Hashes {
// Fetch the block from the database.
block, err := context.DAG().BlockByHash(hash)
if blockdag.IsNotInDAGErr(err) {
return protocolerrors.Errorf(true, "block %s not found", hash)
} else if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
}
msgBlock := block.MsgBlock()
// If we are a full node and the peer is a partial node, we must convert
// the block to a partial block.
nodeSubnetworkID := context.DAG().SubnetworkID()
peerSubnetworkID := peer.SubnetworkID()
isNodeFull := nodeSubnetworkID == nil
isPeerFull := peerSubnetworkID == nil
if isNodeFull && !isPeerFull {
msgBlock.ConvertToPartial(peerSubnetworkID)
}
err = outgoingRoute.Enqueue(msgBlock)
if err != nil {
return err
}
}
}
}

View File

@@ -0,0 +1,245 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
mathUtil "github.com/kaspanet/kaspad/util/math"
"github.com/pkg/errors"
)
// RelayInvsContext is the interface for the context needed for the HandleRelayInvs flow.
type RelayInvsContext interface {
NetAdapter() *netadapter.NetAdapter
DAG() *blockdag.BlockDAG
OnNewBlock(block *util.Block) error
SharedRequestedBlocks() *SharedRequestedBlocks
StartIBDIfRequired()
IsInIBD() bool
Broadcast(message appmessage.Message) error
}
type handleRelayInvsFlow struct {
RelayInvsContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
invsQueue []*appmessage.MsgInvRelayBlock
}
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
// are missing, adds them to the DAG and propagates them to the rest of the network.
func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
flow := &handleRelayInvsFlow{
RelayInvsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
}
return flow.start()
}
func (flow *handleRelayInvsFlow) start() error {
for {
inv, err := flow.readInv()
if err != nil {
return err
}
log.Debugf("Got relay inv for block %s", inv.Hash)
if flow.DAG().IsKnownBlock(inv.Hash) {
if flow.DAG().IsKnownInvalid(inv.Hash) {
return protocolerrors.Errorf(true, "sent inv of an invalid block %s",
inv.Hash)
}
continue
}
flow.StartIBDIfRequired()
if flow.IsInIBD() {
// Block relay is disabled during IBD
continue
}
requestQueue := newHashesQueueSet()
requestQueue.enqueueIfNotExists(inv.Hash)
for requestQueue.len() > 0 {
err := flow.requestBlocks(requestQueue)
if err != nil {
return err
}
}
}
}
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
if len(flow.invsQueue) > 0 {
var inv *appmessage.MsgInvRelayBlock
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
return inv, nil
}
msg, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
if !ok {
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
"expecting an inv message", msg.Command())
}
return inv, nil
}
func (flow *handleRelayInvsFlow) requestBlocks(requestQueue *hashesQueueSet) error {
numHashesToRequest := mathUtil.MinInt(appmessage.MsgRequestRelayBlocksHashes, requestQueue.len())
hashesToRequest := requestQueue.dequeue(numHashesToRequest)
pendingBlocks := map[daghash.Hash]struct{}{}
var filteredHashesToRequest []*daghash.Hash
for _, hash := range hashesToRequest {
exists := flow.SharedRequestedBlocks().addIfNotExists(hash)
if exists {
continue
}
// The block can become known from another peer in the process of orphan resolution
if flow.DAG().IsKnownBlock(hash) {
continue
}
pendingBlocks[*hash] = struct{}{}
filteredHashesToRequest = append(filteredHashesToRequest, hash)
}
// Exit early if we've filtered out all the hashes
if len(filteredHashesToRequest) == 0 {
return nil
}
// In case the function returns earlier than expected, we want to make sure requestedBlocks is
// clean from any pending blocks.
defer flow.SharedRequestedBlocks().removeSet(pendingBlocks)
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks(filteredHashesToRequest)
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
if err != nil {
return err
}
for len(pendingBlocks) > 0 {
msgBlock, err := flow.readMsgBlock()
if err != nil {
return err
}
block := util.NewBlock(msgBlock)
blockHash := block.Hash()
if _, ok := pendingBlocks[*blockHash]; !ok {
return protocolerrors.Errorf(true, "got unrequested block %s", block.Hash())
}
err = flow.processAndRelayBlock(requestQueue, block)
if err != nil {
return err
}
delete(pendingBlocks, *blockHash)
flow.SharedRequestedBlocks().remove(blockHash)
}
return nil
}
// readMsgBlock returns the next msgBlock in msgChan, and populates invsQueue with any inv messages that meanwhile arrive.
//
// Note: this function assumes msgChan can contain only appmessage.MsgInvRelayBlock and appmessage.MsgBlock messages.
func (flow *handleRelayInvsFlow) readMsgBlock() (
msgBlock *appmessage.MsgBlock, err error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
switch message := message.(type) {
case *appmessage.MsgInvRelayBlock:
flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgBlock:
return message, nil
default:
return nil, errors.Errorf("unexpected message %s", message.Command())
}
}
}
func (flow *handleRelayInvsFlow) processAndRelayBlock(requestQueue *hashesQueueSet, block *util.Block) error {
blockHash := block.Hash()
isOrphan, isDelayed, err := flow.DAG().ProcessBlock(block, blockdag.BFNone)
if err != nil {
if !errors.As(err, &blockdag.RuleError{}) {
return errors.Wrapf(err, "failed to process block %s", blockHash)
}
log.Infof("Rejected block %s from %s: %s", blockHash, flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
}
if isDelayed {
return nil
}
if isOrphan {
blueScore, err := block.BlueScore()
if err != nil {
return protocolerrors.Errorf(true, "received an orphan "+
"block %s with malformed blue score", blockHash)
}
const maxOrphanBlueScoreDiff = 10000
selectedTipBlueScore := flow.DAG().SelectedTipBlueScore()
if blueScore > selectedTipBlueScore+maxOrphanBlueScoreDiff {
log.Infof("Orphan block %s has blue score %d and the selected tip blue score is "+
"%d. Ignoring orphans with a blue score difference from the selected tip greater than %d",
blockHash, blueScore, selectedTipBlueScore, maxOrphanBlueScoreDiff)
return nil
}
// Request the parents for the orphan block from the peer that sent it.
missingAncestors := flow.DAG().GetOrphanMissingAncestorHashes(blockHash)
for _, missingAncestor := range missingAncestors {
requestQueue.enqueueIfNotExists(missingAncestor)
}
return nil
}
err = blocklogger.LogBlock(block)
if err != nil {
return err
}
err = flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
if err != nil {
return err
}
flow.StartIBDIfRequired()
err = flow.OnNewBlock(block)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,35 @@
package blockrelay
import "github.com/kaspanet/kaspad/util/daghash"
type hashesQueueSet struct {
queue []*daghash.Hash
set map[daghash.Hash]struct{}
}
func (r *hashesQueueSet) enqueueIfNotExists(hash *daghash.Hash) {
if _, ok := r.set[*hash]; ok {
return
}
r.queue = append(r.queue, hash)
r.set[*hash] = struct{}{}
}
func (r *hashesQueueSet) dequeue(numItems int) []*daghash.Hash {
var hashes []*daghash.Hash
hashes, r.queue = r.queue[:numItems], r.queue[numItems:]
for _, hash := range hashes {
delete(r.set, *hash)
}
return hashes
}
func (r *hashesQueueSet) len() int {
return len(r.queue)
}
func newHashesQueueSet() *hashesQueueSet {
return &hashesQueueSet{
set: make(map[daghash.Hash]struct{}),
}
}

View File

@@ -0,0 +1,9 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -0,0 +1,46 @@
package blockrelay
import (
"sync"
"github.com/kaspanet/kaspad/util/daghash"
)
// SharedRequestedBlocks is a data structure that is shared between peers that
// holds the hashes of all the requested blocks to prevent redundant requests.
type SharedRequestedBlocks struct {
blocks map[daghash.Hash]struct{}
sync.Mutex
}
func (s *SharedRequestedBlocks) remove(hash *daghash.Hash) {
s.Lock()
defer s.Unlock()
delete(s.blocks, *hash)
}
func (s *SharedRequestedBlocks) removeSet(blockHashes map[daghash.Hash]struct{}) {
s.Lock()
defer s.Unlock()
for hash := range blockHashes {
delete(s.blocks, hash)
}
}
func (s *SharedRequestedBlocks) addIfNotExists(hash *daghash.Hash) (exists bool) {
s.Lock()
defer s.Unlock()
_, ok := s.blocks[*hash]
if ok {
return true
}
s.blocks[*hash] = struct{}{}
return false
}
// NewSharedRequestedBlocks returns a new instance of SharedRequestedBlocks.
func NewSharedRequestedBlocks() *SharedRequestedBlocks {
return &SharedRequestedBlocks{
blocks: make(map[daghash.Hash]struct{}),
}
}

View File

@@ -0,0 +1,116 @@
package handshake
import (
"sync"
"sync/atomic"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/locks"
"github.com/pkg/errors"
)
// HandleHandshakeContext is the interface for the context needed for the HandleHandshake flow.
type HandleHandshakeContext interface {
Config() *config.Config
NetAdapter() *netadapter.NetAdapter
DAG() *blockdag.BlockDAG
AddressManager() *addressmanager.AddressManager
StartIBDIfRequired()
AddToPeers(peer *peerpkg.Peer) error
HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error)
}
// HandleHandshake sets up the handshake protocol - It sends a version message and waits for an incoming
// version message, as well as a verack for the sent version
func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.NetConnection,
receiveVersionRoute *routerpkg.Route, sendVersionRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
) (*peerpkg.Peer, error) {
// For HandleHandshake to finish, we need to get from the other node
// a version and verack messages, so we increase the wait group by 2
// and block HandleHandshake with wg.Wait().
wg := sync.WaitGroup{}
wg.Add(2)
isStopping := uint32(0)
errChan := make(chan error)
peer := peerpkg.New(netConnection)
var peerAddress *appmessage.NetAddress
spawn("HandleHandshake-ReceiveVersion", func() {
address, err := ReceiveVersion(context, receiveVersionRoute, outgoingRoute, peer)
if err != nil {
handleError(err, "ReceiveVersion", &isStopping, errChan)
return
}
peerAddress = address
wg.Done()
})
spawn("HandleHandshake-SendVersion", func() {
err := SendVersion(context, sendVersionRoute, outgoingRoute, peer)
if err != nil {
handleError(err, "SendVersion", &isStopping, errChan)
return
}
wg.Done()
})
select {
case err := <-errChan:
if err != nil {
return nil, err
}
return nil, nil
case <-locks.ReceiveFromChanWhenDone(func() { wg.Wait() }):
}
err := context.AddToPeers(peer)
if err != nil {
if errors.As(err, &common.ErrPeerWithSameIDExists) {
return nil, protocolerrors.Wrap(false, err, "peer already exists")
}
return nil, err
}
if peerAddress != nil {
subnetworkID := peer.SubnetworkID()
context.AddressManager().AddAddress(peerAddress, peerAddress, subnetworkID)
context.AddressManager().Good(peerAddress, subnetworkID)
}
context.StartIBDIfRequired()
return peer, nil
}
// Handshake is different from other flows, since in it should forward router.ErrRouteClosed to errChan
// Therefore we implement a separate handleError for handshake
func handleError(err error, flowName string, isStopping *uint32, errChan chan error) {
if errors.Is(err, routerpkg.ErrRouteClosed) {
if atomic.AddUint32(isStopping, 1) == 1 {
errChan <- err
}
return
}
if protocolErr := &(protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
log.Errorf("Handshake protocol error from %s: %s", flowName, err)
if atomic.AddUint32(isStopping, 1) == 1 {
errChan <- err
}
return
}
panic(err)
}

View File

@@ -0,0 +1,9 @@
package handshake
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -0,0 +1,101 @@
package handshake
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
var (
// allowSelfConnections is only used to allow the tests to bypass the self
// connection detecting and disconnect logic since they intentionally
// do so for testing purposes.
allowSelfConnections bool
// minAcceptableProtocolVersion is the lowest protocol version that a
// connected peer may support.
minAcceptableProtocolVersion = appmessage.ProtocolVersion
)
type receiveVersionFlow struct {
HandleHandshakeContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// ReceiveVersion waits for the peer to send a version message, sends a
// verack in response, and updates its info accordingly.
func ReceiveVersion(context HandleHandshakeContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) (*appmessage.NetAddress, error) {
flow := &receiveVersionFlow{
HandleHandshakeContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
msgVersion, ok := message.(*appmessage.MsgVersion)
if !ok {
return nil, protocolerrors.New(true, "a version message must precede all others")
}
if !allowSelfConnections && flow.NetAdapter().ID().IsEqual(msgVersion.ID) {
return nil, protocolerrors.New(true, "connected to self")
}
// Disconnect and ban peers from a different network
if msgVersion.Network != flow.Config().ActiveNetParams.Name {
return nil, protocolerrors.Errorf(true, "wrong network")
}
// Notify and disconnect clients that have a protocol version that is
// too old.
//
// NOTE: If minAcceptableProtocolVersion is raised to be higher than
// appmessage.RejectVersion, this should send a reject packet before
// disconnecting.
if msgVersion.ProtocolVersion < minAcceptableProtocolVersion {
return nil, protocolerrors.Errorf(false, "protocol version must be %d or greater",
minAcceptableProtocolVersion)
}
// Disconnect from partial nodes in networks that don't allow them
if !flow.DAG().Params.EnableNonNativeSubnetworks && msgVersion.SubnetworkID != nil {
return nil, protocolerrors.New(true, "partial nodes are not allowed")
}
// Disconnect if:
// - we are a full node and the outbound connection we've initiated is a partial node
// - the remote node is partial and our subnetwork doesn't match their subnetwork
localSubnetworkID := flow.Config().SubnetworkID
isLocalNodeFull := localSubnetworkID == nil
isRemoteNodeFull := msgVersion.SubnetworkID == nil
isOutbound := flow.peer.Connection().IsOutbound()
if (isLocalNodeFull && !isRemoteNodeFull && isOutbound) ||
(!isLocalNodeFull && !isRemoteNodeFull && !msgVersion.SubnetworkID.IsEqual(localSubnetworkID)) {
return nil, protocolerrors.New(false, "incompatible subnetworks")
}
flow.peer.UpdateFieldsFromMsgVersion(msgVersion)
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgVerAck())
if err != nil {
return nil, err
}
flow.peer.Connection().SetID(msgVersion.ID)
return msgVersion.Address, nil
}

View File

@@ -0,0 +1,78 @@
package handshake
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/version"
)
var (
// userAgentName is the user agent name and is used to help identify
// ourselves to other kaspa peers.
userAgentName = "kaspad"
// userAgentVersion is the user agent version and is used to help
// identify ourselves to other kaspa peers.
userAgentVersion = version.Version()
// defaultServices describes the default services that are supported by
// the server.
defaultServices = appmessage.DefaultServices
// defaultRequiredServices describes the default services that are
// required to be supported by outbound peers.
defaultRequiredServices = appmessage.SFNodeNetwork
)
type sendVersionFlow struct {
HandleHandshakeContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// SendVersion sends a version to a peer and waits for verack.
func SendVersion(context HandleHandshakeContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
flow := &sendVersionFlow{
HandleHandshakeContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *sendVersionFlow) start() error {
selectedTipHash := flow.DAG().SelectedTipHash()
subnetworkID := flow.Config().SubnetworkID
// Version message.
localAddress := flow.AddressManager().GetBestLocalAddress(flow.peer.Connection().NetAddress())
msg := appmessage.NewMsgVersion(localAddress, flow.NetAdapter().ID(),
flow.Config().ActiveNetParams.Name, selectedTipHash, subnetworkID)
msg.AddUserAgent(userAgentName, userAgentVersion, flow.Config().UserAgentComments...)
// Advertise the services flag
msg.Services = defaultServices
// Advertise our max supported protocol version.
msg.ProtocolVersion = appmessage.ProtocolVersion
// Advertise if inv messages for transactions are desired.
msg.DisableRelayTx = flow.Config().BlocksOnly
err := flow.outgoingRoute.Enqueue(msg)
if err != nil {
return err
}
// Wait for verack
_, err = flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,72 @@
package ibd
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/daghash"
)
// RequestBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
type RequestBlockLocatorContext interface {
DAG() *blockdag.BlockDAG
}
type handleRequestBlockLocatorFlow struct {
RequestBlockLocatorContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestBlockLocator handles getBlockLocator messages
func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute *router.Route,
outgoingRoute *router.Route) error {
flow := &handleRequestBlockLocatorFlow{
RequestBlockLocatorContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestBlockLocatorFlow) start() error {
for {
lowHash, highHash, err := flow.receiveGetBlockLocator()
if err != nil {
return err
}
locator, err := flow.DAG().BlockLocatorFromHashes(highHash, lowHash)
if err != nil || len(locator) == 0 {
return protocolerrors.Errorf(true, "couldn't build a block "+
"locator between blocks %s and %s", lowHash, highHash)
}
err = flow.sendBlockLocator(locator)
if err != nil {
return err
}
}
}
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (lowHash *daghash.Hash,
highHash *daghash.Hash, err error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
}
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
return msgGetBlockLocator.LowHash, msgGetBlockLocator.HighHash, nil
}
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator blockdag.BlockLocator) error {
msgBlockLocator := appmessage.NewMsgBlockLocator(locator)
err := flow.outgoingRoute.Enqueue(msgBlockLocator)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,126 @@
package ibd
import (
"errors"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/daghash"
)
const ibdBatchSize = router.DefaultMaxMessages
// RequestIBDBlocksContext is the interface for the context needed for the HandleRequestIBDBlocks flow.
type RequestIBDBlocksContext interface {
DAG() *blockdag.BlockDAG
}
type handleRequestBlocksFlow struct {
RequestIBDBlocksContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestIBDBlocks handles getBlocks messages
func HandleRequestIBDBlocks(context RequestIBDBlocksContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &handleRequestBlocksFlow{
RequestIBDBlocksContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestBlocksFlow) start() error {
for {
lowHash, highHash, err := receiveRequestIBDBlocks(flow.incomingRoute)
if err != nil {
return err
}
msgIBDBlocks, err := flow.buildMsgIBDBlocks(lowHash, highHash)
if err != nil {
return err
}
for offset := 0; offset < len(msgIBDBlocks); offset += ibdBatchSize {
end := offset + ibdBatchSize
if end > len(msgIBDBlocks) {
end = len(msgIBDBlocks)
}
blocksToSend := msgIBDBlocks[offset:end]
err = flow.sendMsgIBDBlocks(blocksToSend)
if err != nil {
return nil
}
// Exit the loop and don't wait for the GetNextIBDBlocks message if the last batch was
// less than ibdBatchSize.
if len(blocksToSend) < ibdBatchSize {
break
}
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
if _, ok := message.(*appmessage.MsgRequestNextIBDBlocks); !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextIBDBlocks, message.Command())
}
}
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneIBDBlocks())
if err != nil {
return err
}
}
}
func receiveRequestIBDBlocks(incomingRoute *router.Route) (lowHash *daghash.Hash,
highHash *daghash.Hash, err error) {
message, err := incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
}
msgRequestIBDBlocks := message.(*appmessage.MsgRequestIBDBlocks)
return msgRequestIBDBlocks.LowHash, msgRequestIBDBlocks.HighHash, nil
}
func (flow *handleRequestBlocksFlow) buildMsgIBDBlocks(lowHash *daghash.Hash,
highHash *daghash.Hash) ([]*appmessage.MsgIBDBlock, error) {
const maxHashesInMsgIBDBlocks = appmessage.MaxInvPerMsg
blockHashes, err := flow.DAG().AntiPastHashesBetween(lowHash, highHash, maxHashesInMsgIBDBlocks)
if err != nil {
if errors.Is(err, blockdag.ErrInvalidParameter) {
return nil, protocolerrors.Wrapf(true, err, "could not get antiPast between "+
"%s and %s", lowHash, highHash)
}
return nil, err
}
msgIBDBlocks := make([]*appmessage.MsgIBDBlock, len(blockHashes))
for i, blockHash := range blockHashes {
block, err := flow.DAG().BlockByHash(blockHash)
if err != nil {
return nil, err
}
msgIBDBlocks[i] = appmessage.NewMsgIBDBlock(block.MsgBlock())
}
return msgIBDBlocks, nil
}
func (flow *handleRequestBlocksFlow) sendMsgIBDBlocks(msgIBDBlocks []*appmessage.MsgIBDBlock) error {
for _, msgIBDBlock := range msgIBDBlocks {
err := flow.outgoingRoute.Enqueue(msgIBDBlock)
if err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,212 @@
package ibd
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
// HandleIBDContext is the interface for the context needed for the HandleIBD flow.
type HandleIBDContext interface {
DAG() *blockdag.BlockDAG
OnNewBlock(block *util.Block) error
StartIBDIfRequired()
FinishIBD()
}
type handleIBDFlow struct {
HandleIBDContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// HandleIBD waits for IBD start and handles it when IBD is triggered for this peer
func HandleIBD(context HandleIBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
flow := &handleIBDFlow{
HandleIBDContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *handleIBDFlow) start() error {
for {
err := flow.runIBD()
if err != nil {
return err
}
}
}
func (flow *handleIBDFlow) runIBD() error {
flow.peer.WaitForIBDStart()
defer flow.FinishIBD()
peerSelectedTipHash := flow.peer.SelectedTipHash()
log.Debugf("Trying to find highest shared chain block with peer %s with selected tip %s", flow.peer, peerSelectedTipHash)
highestSharedBlockHash, err := flow.findHighestSharedBlockHash(peerSelectedTipHash)
if err != nil {
return err
}
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
if flow.DAG().IsKnownFinalizedBlock(highestSharedBlockHash) {
return protocolerrors.Errorf(false, "cannot initiate "+
"IBD with peer %s because the highest shared chain block (%s) is "+
"below the finality point", flow.peer, highestSharedBlockHash)
}
return flow.downloadBlocks(highestSharedBlockHash, peerSelectedTipHash)
}
func (flow *handleIBDFlow) findHighestSharedBlockHash(peerSelectedTipHash *daghash.Hash) (lowHash *daghash.Hash,
err error) {
lowHash = flow.DAG().Params.GenesisHash
highHash := peerSelectedTipHash
for {
err := flow.sendGetBlockLocator(lowHash, highHash)
if err != nil {
return nil, err
}
blockLocatorHashes, err := flow.receiveBlockLocator()
if err != nil {
return nil, err
}
// We check whether the locator's highest hash is in the local DAG.
// If it is, return it. If it isn't, we need to narrow our
// getBlockLocator request and try again.
locatorHighHash := blockLocatorHashes[0]
if flow.DAG().IsInDAG(locatorHighHash) {
return locatorHighHash, nil
}
highHash, lowHash = flow.DAG().FindNextLocatorBoundaries(blockLocatorHashes)
}
}
func (flow *handleIBDFlow) sendGetBlockLocator(lowHash *daghash.Hash, highHash *daghash.Hash) error {
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(highHash, lowHash)
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
}
func (flow *handleIBDFlow) receiveBlockLocator() (blockLocatorHashes []*daghash.Hash, err error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
msgBlockLocator, ok := message.(*appmessage.MsgBlockLocator)
if !ok {
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
}
return msgBlockLocator.BlockLocatorHashes, nil
}
func (flow *handleIBDFlow) downloadBlocks(highestSharedBlockHash *daghash.Hash,
peerSelectedTipHash *daghash.Hash) error {
err := flow.sendGetBlocks(highestSharedBlockHash, peerSelectedTipHash)
if err != nil {
return err
}
blocksReceived := 0
for {
msgIBDBlock, doneIBD, err := flow.receiveIBDBlock()
if err != nil {
return err
}
if doneIBD {
return nil
}
err = flow.processIBDBlock(msgIBDBlock)
if err != nil {
return err
}
blocksReceived++
if blocksReceived%ibdBatchSize == 0 {
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextIBDBlocks())
if err != nil {
return err
}
}
}
}
func (flow *handleIBDFlow) sendGetBlocks(highestSharedBlockHash *daghash.Hash,
peerSelectedTipHash *daghash.Hash) error {
msgGetBlockInvs := appmessage.NewMsgRequstIBDBlocks(highestSharedBlockHash, peerSelectedTipHash)
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
}
func (flow *handleIBDFlow) receiveIBDBlock() (msgIBDBlock *appmessage.MsgIBDBlock, doneIBD bool, err error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch message := message.(type) {
case *appmessage.MsgIBDBlock:
return message, false, nil
case *appmessage.MsgDoneIBDBlocks:
return nil, true, nil
default:
return nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
}
}
func (flow *handleIBDFlow) processIBDBlock(msgIBDBlock *appmessage.MsgIBDBlock) error {
block := util.NewBlock(msgIBDBlock.MsgBlock)
if flow.DAG().IsInDAG(block.Hash()) {
log.Debugf("IBD block %s is already in the DAG. Skipping...", block.Hash())
return nil
}
isOrphan, isDelayed, err := flow.DAG().ProcessBlock(block, blockdag.BFNone)
if err != nil {
if !errors.As(err, &blockdag.RuleError{}) {
return errors.Wrapf(err, "failed to process block %s during IBD", block.Hash())
}
log.Infof("Rejected block %s from %s during IBD: %s", block.Hash(), flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block %s during IBD", block.Hash())
}
if isOrphan {
return protocolerrors.Errorf(true, "received orphan block %s "+
"during IBD", block.Hash())
}
if isDelayed {
return protocolerrors.Errorf(false, "received delayed block %s "+
"during IBD", block.Hash())
}
err = flow.OnNewBlock(block)
if err != nil {
return err
}
err = blocklogger.LogBlock(block)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,9 @@
package ibd
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.IBDS)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -0,0 +1,61 @@
package selectedtip
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// HandleRequestSelectedTipContext is the interface for the context needed for the HandleRequestSelectedTip flow.
type HandleRequestSelectedTipContext interface {
DAG() *blockdag.BlockDAG
}
type handleRequestSelectedTipFlow struct {
HandleRequestSelectedTipContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestSelectedTip handles getSelectedTip messages
func HandleRequestSelectedTip(context HandleRequestSelectedTipContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &handleRequestSelectedTipFlow{
HandleRequestSelectedTipContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestSelectedTipFlow) start() error {
for {
err := flow.receiveGetSelectedTip()
if err != nil {
return err
}
err = flow.sendSelectedTipHash()
if err != nil {
return err
}
}
}
func (flow *handleRequestSelectedTipFlow) receiveGetSelectedTip() error {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
_, ok := message.(*appmessage.MsgRequestSelectedTip)
if !ok {
return errors.Errorf("received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestSelectedTip, message.Command())
}
return nil
}
func (flow *handleRequestSelectedTipFlow) sendSelectedTipHash() error {
msgSelectedTip := appmessage.NewMsgSelectedTip(flow.DAG().SelectedTipHash())
return flow.outgoingRoute.Enqueue(msgSelectedTip)
}

View File

@@ -0,0 +1,79 @@
package selectedtip
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/daghash"
)
// RequestSelectedTipContext is the interface for the context needed for the RequestSelectedTip flow.
type RequestSelectedTipContext interface {
DAG() *blockdag.BlockDAG
StartIBDIfRequired()
}
type requestSelectedTipFlow struct {
RequestSelectedTipContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// RequestSelectedTip waits for selected tip requests and handles them
func RequestSelectedTip(context RequestSelectedTipContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
flow := &requestSelectedTipFlow{
RequestSelectedTipContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *requestSelectedTipFlow) start() error {
for {
err := flow.runSelectedTipRequest()
if err != nil {
return err
}
}
}
func (flow *requestSelectedTipFlow) runSelectedTipRequest() error {
flow.peer.WaitForSelectedTipRequests()
defer flow.peer.FinishRequestingSelectedTip()
err := flow.requestSelectedTip()
if err != nil {
return err
}
peerSelectedTipHash, err := flow.receiveSelectedTip()
if err != nil {
return err
}
flow.peer.SetSelectedTipHash(peerSelectedTipHash)
flow.StartIBDIfRequired()
return nil
}
func (flow *requestSelectedTipFlow) requestSelectedTip() error {
msgGetSelectedTip := appmessage.NewMsgRequestSelectedTip()
return flow.outgoingRoute.Enqueue(msgGetSelectedTip)
}
func (flow *requestSelectedTipFlow) receiveSelectedTip() (selectedTipHash *daghash.Hash, err error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
msgSelectedTip := message.(*appmessage.MsgSelectedTip)
return msgSelectedTip.SelectedTipHash, nil
}

View File

@@ -0,0 +1,42 @@
package ping
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// ReceivePingsContext is the interface for the context needed for the ReceivePings flow.
type ReceivePingsContext interface {
}
type receivePingsFlow struct {
ReceivePingsContext
incomingRoute, outgoingRoute *router.Route
}
// ReceivePings handles all ping messages coming through incomingRoute.
// This function assumes that incomingRoute will only return MsgPing.
func ReceivePings(context ReceivePingsContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &receivePingsFlow{
ReceivePingsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *receivePingsFlow) start() error {
for {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
pingMessage := message.(*appmessage.MsgPing)
pongMessage := appmessage.NewMsgPong(pingMessage.Nonce)
err = flow.outgoingRoute.Enqueue(pongMessage)
if err != nil {
return err
}
}
}

Some files were not shown because too many files have changed in this diff Show More