Compare commits

...

296 Commits

Author SHA1 Message Date
Ori Newman
a32a9011c7 [NOD-1305] Close client connection on disconnect (#909) 2020-08-31 15:57:11 +03:00
stasatdaglabs
5da957f16e Update to version 0.6.8 2020-08-30 11:31:43 +03:00
Ori Newman
505d264603 [NOD-1322] Fix compilation on windows (#905) 2020-08-27 18:04:54 +03:00
Ori Newman
883361fea3 [NOD-1323] Always save new block reachability data (#906) 2020-08-27 18:03:50 +03:00
stasatdaglabs
13a6872a45 Update to version 0.6.7 2020-08-26 12:13:43 +03:00
Elichai Turkel
c82a951a24 [NOD-1316] Refactor TestGHOSTDAG to enable arbitrary DAGs (#899)
* Add VirtualBlueHashes to BlockDAG

* Refactor TestGHOSTDAG to read DAGs from json files

* Added a new DAG for the ghostdag test suite

* Pass BehaviorFlags to delayed blocks
2020-08-25 14:00:43 +03:00
Ori Newman
bbb9dfa4cd [NOD-1318] Check if relay block is known before requesting it (#900) 2020-08-25 09:18:03 +03:00
stasatdaglabs
86d51fa1cb [NOD-1307] Fix duplicate connections (#897)
* [NOD-1307] Lock peersMutex in methods that don't.

* [NOD-1307] Fix duplicate connections.

* [NOD-1307] Use RLock instead of Lock.

* [NOD-1307] Simplify IsEqual.
2020-08-24 16:11:32 +03:00
Ori Newman
8dd7b95423 [NOD-1308] Don't call wg.done() on handshake if flow failed (#896) 2020-08-24 14:03:58 +03:00
stasatdaglabs
b668d98942 [NOD-1095] Fix data races in gRPCConnection.stream. (#895) 2020-08-24 12:56:19 +03:00
stasatdaglabs
e9602cc777 [NOD-1304] Fix nil deference originating in HandleHandshake. (#894) 2020-08-24 11:45:33 +03:00
stasatdaglabs
5fd164bf66 [NOD-1095] RLock the dagLock in SelectedTipHeader. (#893) 2020-08-24 11:31:12 +03:00
Ori Newman
83e7c9e8e4 [NOD-1303] Fix concurent access to UTXO set from RPC (#892) 2020-08-23 18:54:03 +03:00
Ori Newman
a6b8eea369 [NOD-1301] Add MsgReject to protowire mapping (#891) 2020-08-23 18:29:41 +03:00
stasatdaglabs
15b545ee2b [NOD-592] Remove TODOs and XXXs from the codebase (#890)
* [NOD-592] Remove TODOs related to fake nonces.

* [NOD-592] Remove irrelevant TODOs from handleRescanBlocks and parseTxAcceptedVerboseNtfnParams.

* [NOD-592] Fix TODO in handleGetTxOut.

* [NOD-592] Remove irrelevant TODO from updateAddress.

* [NOD-592] Move StandardVerifyFlags to a separate file.

* [NOD-592] Remove TODOs in sign.go.

* [NOD-592] Remove TODO in scriptval_test.go.

* [NOD-592] Remove TODO in reachabilitystore.go.

* [NOD-592] Remove XXXs.

* [NOD-592] Fix a comment.

* [NOD-557] Move AddAddressByIP out of AddressManager since it's used only for tests..

* [NOD-557] Remove rescan blocks.

* [NOD-592] Fix handleGetTxOut.
2020-08-23 17:17:06 +03:00
stasatdaglabs
667b2d46e9 [NOD-557] Remove RegTest (#889)
* [NOD-557] Remove regTest network.

* [NOD-557] Remove remaining references to regTest.

* [NOD-557] Move newHashFromStr from params.go to params_test.go.

* [NOD-557] Rename test to network in register_test.go.

* [NOD-557] Replaced removed tests in TestDecodeAddressErrorConditions.
2020-08-23 15:38:27 +03:00
stasatdaglabs
53ab906ea8 [NOD-1279] Handle ruleErrors properly in processIBDBlock. (#887) 2020-08-23 13:42:21 +03:00
stasatdaglabs
5d20772f94 [NOD-1293] Fix kaspad sending 127.0.0.1 in its msgVersion (#886)
* [NOD-1293] Use addressManager's GetBestLocalAddress.

* [NOD-1293] Copy the initListeners function from the old p2p to the address manager.

* [NOD-1293] Remove debug logs.

* [NOD-1293] Remove unused import.

* [NOD-1293] Fix a comment.
2020-08-23 13:11:48 +03:00
stasatdaglabs
d4728bd9b6 Update to version 0.6.6 2020-08-23 11:22:13 +03:00
Ori Newman
4dbd64478c [NOD-1294] In TestTxRelay return after tx is found in the mempool (#885) 2020-08-20 19:05:53 +03:00
stasatdaglabs
7756baf9a9 [NOD-1290] Add blocklogger.LogBlock to IBD. (#884) 2020-08-20 12:29:11 +03:00
Ori Newman
c331293a2e [NOD-1289] Check if connection exists before establishing another one with the same address (#883) 2020-08-20 11:50:29 +03:00
Ori Newman
fcae491e6d [NOD-1286] Close router from netConnection.Disconnect (#881)
* [NOD-1286] Close router from netConnection.Disconnect

* [NOD-1286] Close router in grpc errors as well

* [NOD-1286] Fix typo

* [NOD-1286] Rename isConnected->isRouterClosed
2020-08-19 17:28:01 +03:00
stasatdaglabs
5a4cafe342 Update to version 0.6.5 2020-08-19 15:00:12 +03:00
Ori Newman
8dae378bd9 [NOD-1285] Fix deadlock on connection manager (#880) 2020-08-19 13:24:20 +03:00
stasatdaglabs
8dd409dc1c [NOD-1223] Rename executables package back to cmd. (#879) 2020-08-19 11:45:11 +03:00
Ori Newman
74110a2e49 [NOD-1282] Remove peer after disconnect (#878) 2020-08-19 11:10:10 +03:00
Ori Newman
ce876a7c44 Merge remote-tracking branch 'origin/v0.6.3-dev' into v0.6.4-dev 2020-08-18 19:03:52 +03:00
stasatdaglabs
d14809694f [NOD-1223] Reorganize directory structure (#874)
* [NOD-1223] Delete unused files/packages.

* [NOD-1223] Move signal and limits to the os package.

* [NOD-1223] Put database and dbaccess into the db package.

* [NOD-1223] Fold the logs package into the logger package.

* [NOD-1223] Rename domainmessage to appmessage.

* [NOD-1223] Rename to/from DomainMessage to AppMessage.

* [NOD-1223] Move appmessage to the app packge.

* [NOD-1223] Move protocol to the app packge.

* [NOD-1223] Move the network package to the infrastructure packge.

* [NOD-1223] Rename cmd to executables.

* [NOD-1223] Fix go.doc in the logger package.
2020-08-18 10:26:39 +03:00
stasatdaglabs
450ff81f86 [NOD-1275] Fix onNewBlock not being called from RPC submitBlock (#873)
* [NOD-1275] Fix onNewBlock not being called from from RPC submitBlock.

* [NOD-1275] Rename tx to txID.
2020-08-17 15:24:00 +03:00
Ori Newman
1f04f30ea7 [NOD-1273] Order parents in PrepareBlockForTest (#872) 2020-08-17 14:26:56 +03:00
Ori Newman
3e4e8d8b6b Merge remote-tracking branch 'origin/v0.6.2-dev' into v0.6.3-dev 2020-08-16 18:13:46 +03:00
Ori Newman
31c0399484 Update to version 0.6.4 2020-08-16 17:55:49 +03:00
Ori Newman
8cac582f6d Update to version 0.6.4 2020-08-16 17:30:09 +03:00
Ori Newman
f2a3ccd9ab [NOD-1271] Move version package to the top level (#871)
* [NOD-1271] Move version package to the top level

* [NOD-1271] Fix imports
2020-08-16 17:16:11 +03:00
Ori Newman
31b5cd8d28 Fix merge errors from v0.6.2-rc2 to v0.6.3-dev 2020-08-16 15:35:47 +03:00
Ori Newman
96bd1fa99b [NOD-1262] Add network name to MinimalNetAdapter handshake (#867) 2020-08-16 15:30:04 +03:00
Svarog
48d498e820 [NOD-1259] Do not panic on non-protocol errors from RPC (#863)
* [NOD-1259] All rule-errors should be protocol-errors

* [NOD-1259] Handle submitting of coinbase transactions properly

* Revert "[NOD-1259] All rule-errors should be protocol-errors"

This reverts commit 2fd30c1856.

* [NOD-1259] Don't panic on non-protocol errors in ProtocolManager.AddTransaction/AddBlock

* [NOD-1259] Implement subnetworkid.IsBuiltInOrNative and use where appropriate
2020-08-16 15:29:23 +03:00
Ori Newman
32c5cfeaf5 [NOD-1204] Add timestamp and message number to domain messages (#854) 2020-08-16 15:26:02 +03:00
stasatdaglabs
d55f4e8164 [NOD-1220] Add network string field to Version message (#852)
* [NOD-1220] Add network name to the version message.

* [NOD-1220] Ban peers from the wrong network.

* [NOD-1220] Add the network parameter to protowire.

* [NOD-1220] Add "kaspa-" to network names.
2020-08-16 15:25:25 +03:00
stasatdaglabs
1927e81202 [NOD-1129] Fix NewBlockTemplate creating incesous blocks (#870)
* [NOD-1129] Implement TestIncestousNewBlockTemplate.

* [NOD-1129] Add some debug logs to TestIncestousNewBlockTemplate.

* [NOD-1129] Fix merge errors.

* [NOD-1129] Narrow down on the failure.

* [NOD-1129] Fix bad initial value for child.interval in reachabilityTreeNode.addChild.

* [NOD-1129] Rewrite the test to be specific to reachability.
2020-08-16 13:14:44 +03:00
stasatdaglabs
8a4ece1101 [NOD-1223] Reorganize project (#868)
* [NOD-1223] Move all network stuff into a new network package.

* [NOD-1223] Delete the unused package testutil.

* [NOD-1223] Move infrastructure stuff into a new instrastructure package.

* [NOD-1223] Move domain stuff into a new domain package.
2020-08-13 17:27:25 +03:00
Elichai Turkel
0bf1052abf [NOD-1101] Hash data without serializing into a buffer first (#779)
* Add Hash Writers

* Add the hash writers to the tests

* Add the DoubleHash Writer to the benchmarks

* Remove buffers from hashing by using the Hash Writer

* Replace empty slice with nil in mempool test payload
2020-08-13 15:40:54 +03:00
Ori Newman
2af03c1ccf [NOD-1207] Send reject messages (#855)
* [NOD-1207] Send reject messages

* [NOD-1207] Empty outgoing route before disconnecting

* [NOD-1207] Renumber fields in RejectMessage

* [NOD-1207] Use more accurate log messages

* [NOD-1207] Call registerRejectsFlow

* [NOD-1207] Panic if outgoingRoute.Enqueue returns unexpected error

* [NOD-1207] Fix comment and rename variables

* [NOD-1207] Fix comment

* [NOD-1207] add baseMessage to MsgReject

* [NOD-1207] Fix comments and add block hash to error if it's rejected
2020-08-13 15:32:41 +03:00
Ori Newman
a2aa58c8a4 [NOD-1201] Panic if callbacks are not set (#856)
* [NOD-1201] Panic if necessary callback are not set in gRPCConnection and gRPCServer

* [NOD-1201] Fix comment and change return order

* [NOD-1201] Return nil instead of error on gRPCServer.Start

* [NOD-1201] Fix typo
2020-08-13 15:21:52 +03:00
oudeis
7e74fc0b2b [NOD-1248] netadapter unit test (#865)
* [NOD-1246/NOD-1248] Add unit test for NetAdapter

* [NOD-1246/NOD-1248] Do not ignore OK

* [NOD-1248] Lint code

- Move `t *testing.T` to be first parameter in test-helper function
- Rename `getRouterInitializer` to `routerInitializerForTest`
- Make test data constants

Co-authored-by: Yaroslav Reshetnyk <yaroslav.r@it-dimension.com>
2020-08-13 15:07:20 +03:00
stasatdaglabs
0653e59e16 [NOD-1190] Refactor process.go (#858)
* [NOD-1190] Move non-processBlock stuff out of process.go.

* [NOD-1190] Move everything out of accept.go.

* [NOD-1190] Move all processBlock functions to process.go.

* [NOD-1190] Move orphan stuff to orphan.go.

* [NOD-1190] Remove thresholdstate stuff.

* [NOD-1190] Move isSynced to sync_rate.go.

* [NOD-1190] Move delayed block stuff to delayed_blocks.go.

* [NOD-1190] Rename orphans.go to orphaned_blocks.go.

* [NOD-1190] Move non-BlockDAG structs out of dag.go.

* [NOD-1190] Remove unused fields.

* [NOD-1190] Fixup BlockDAG.New a bit.

* [NOD-1190] Move sequence lock stuff to sequence_lock.go

* [NOD-1190] Move some multiset stuff out of dag.go.

* [NOD-1190] Move finality stuff out of dag.go.

* [NOD-1190] Move blocklocator stuff out of dag.go.

* [NOD-1190] Move confirmation stuff out of dag.go.

* [NOD-1190] Move utxo and selected parent chain stuff out of dag.go.

* [NOD-1190] Move BlockDAG lock functions to the beginning of dag.go.

* [NOD-1190] Move verifyAndBuildUTXO out of process.go.

* [NOD-1190] Extract handleProcessBlockError to a function.

* [NOD-1190] Remove daglock unlock in notifyBlockAccepted.

* [NOD-1190] Extract checkDuplicateBlock to a method.

* [NOD-1190] Fix merge errors.

* [NOD-1190] Remove unused parameter from CalcSequenceLock.

* [NOD-1190] Extract processBlock contents into functions.

* [NOD-1190] Fix parent delayed blocks not marking their children as delayed

* [NOD-1190] Fix TestProcessDelayedBlocks.

* [NOD-1190] Extract stuff in maybeAcceptBlock to separate functions.

* [NOD-1190] Rename handleProcessBlockError to handleConnectBlockError.

* [NOD-1190] Remove some comments.

* [NOD-1190] Use lowercase in error messages.

* [NOD-1190] Rename createNewBlockNode to createBlockNodeFromBlock.

* [NOD-1190] Rename orphaned_blocks.go to orpan_blocks.go.

* [NOD-1190] Extract validateUTXOCommitment to a separate function.

* [NOD-1190] Fix a bug in validateUTXOCommitment.

* [NOD-1190] Rename checkBlockTxsFinalized to checkBlockTransactionsFinalized.

* [NOD-1190] Add a comment over createBlockNodeFromBlock.

* [NOD-1190] Fold validateAllTxsFinalized into checkBlockTransactionsFinalized.

* [NOD-1190] Return parents from checkBlockParents.

* [NOD-1190] Remove the processBlock prefix from the functions that had it.

* [NOD-1190] Begin extracting functions out of checkTransactionSanity.

* [NOD-1190] Finish extracting functions out of checkTransactionSanity.

* [NOD-1190] Remove an unused parameter.

* [NOD-1190] Fix merge errors.

* [NOD-1190] Added an explanation as to why we change the nonce in TestProcessDelayedBlocks.

* [NOD-1190] Fix a comment.

* [NOD-1190] Fix a comment.

* [NOD-1190] Fix a typo.

* [NOD-1190] Replace checkBlockParents with handleLookupParentNodesError.
2020-08-13 13:33:43 +03:00
oudeis
32463ce906 [NOD-1247] Add check for routerInitializer presence (#864)
Co-authored-by: Yaroslav Reshetnyk <yaroslav.r@it-dimension.com>
2020-08-13 12:04:43 +03:00
stasatdaglabs
23a3594c18 [NOD-1233] Go over all TODO(libp2p)s and either fix them or create tickets for them (#860)
* [NOD-1233] Remove HandleNewBlockOld.

* [NOD-1233] Make ErrRouteClosed not a protocol error.

* [NOD-1233] Fix ambiguous comments.

* [NOD-1233] Remove a no-longer-relevant comment.

* [NOD-1233] Remove some of the TODOs.

* [NOD-1233] Replace fakeSourceAddress with a real sourceAddress.

* [NOD-1233] Remove a no-longer-relevant TODO.

* [NOD-1233] Remove TODO from handleGetNetTotals.

* [NOD-1233] Remove a no-longer-relevant TODO.

* [NOD-1233] Disconnect if connected to wrong partial/full type.

* [NOD-1233] Get rid of mempool tags.

* [NOD-1233] Remove TODOs.

* [NOD-1233] Simplify a test.

* [NOD-1190] Remove getNetTotals.
2020-08-13 09:41:02 +03:00
Ori Newman
ffe153efa7 [NOD-1262] Add network name to MinimalNetAdapter handshake (#867) 2020-08-12 17:55:58 +03:00
Ori Newman
ca3172dad0 [NOD-1239] Delete app.WaitForShutdown() (#866) 2020-08-12 16:38:52 +03:00
Ori Newman
22dc3f998f [NOD-1256] Optimize PrepareBlockForTest (#861)
* [NOD-1256] Optimize PrepareBlockForTest

* [NOD-1256] Remove redundant comment
2020-08-12 16:25:42 +03:00
Svarog
91f4ed9825 [NOD-1259] Do not panic on non-protocol errors from RPC (#863)
* [NOD-1259] All rule-errors should be protocol-errors

* [NOD-1259] Handle submitting of coinbase transactions properly

* Revert "[NOD-1259] All rule-errors should be protocol-errors"

This reverts commit 2fd30c1856.

* [NOD-1259] Don't panic on non-protocol errors in ProtocolManager.AddTransaction/AddBlock

* [NOD-1259] Implement subnetworkid.IsBuiltInOrNative and use where appropriate
2020-08-12 12:29:58 +03:00
Ori Newman
aa9556aa59 [NOD-1257] Disable difficulty adjustment on simnet (#862)
* [NOD-1257] Disable difficulty adjustment on simnet

* [NOD-1257] Explictly set DisableDifficultyAdjustment everywhere
2020-08-12 12:24:37 +03:00
stasatdaglabs
91f0fe5740 [NOD-1238] Fix acceptance index never being initialized. (#859) 2020-08-11 16:53:34 +03:00
Ori Newman
b0fecc9f87 [NOD-1204] Add timestamp and message number to domain messages (#854) 2020-08-10 12:55:24 +03:00
stasatdaglabs
53cccd405f [NOD-1220] Add network string field to Version message (#852)
* [NOD-1220] Add network name to the version message.

* [NOD-1220] Ban peers from the wrong network.

* [NOD-1220] Add the network parameter to protowire.

* [NOD-1220] Add "kaspa-" to network names.
2020-08-09 18:11:13 +03:00
stasatdaglabs
5b84184921 [NOD-1221] Explicitly add a maximum message size in gRPC (#851)
* [NOD-1221] Explicitly add a maximum message size in gRPC.

* [NOD-1221] Limit sent message size and print a debug log on start.
2020-08-09 17:56:26 +03:00
Yuval Shaul
af1df425a2 update to version v0.6.2 2020-08-09 15:16:21 +03:00
Ori Newman
8e170cf327 [NOD-1225] Rename wire to domainmessage and get rid of InvType (#853)
* [NOD-1225] Rename wire to domainmessage

* [NOD-1225] Get rid of references to package wire in the code, and get rid of InvType
2020-08-09 12:39:15 +03:00
stasatdaglabs
b55cfee8c8 [NOD-1229] Fix node crashing if AntiPastHashesBetween lowHigh or highHash are not found in the DAG (#849)
* [NOD-1229] Fix node crashing if AntiPastHashesBetween lowHigh or highHash are not found in the DAG

* [NOD-1229] Rename InvalidParameterError to ErrInvalidParameter.

* [NOD-1229] Lowercasify errors.
2020-08-09 09:36:29 +03:00
stasatdaglabs
420c3d4258 [NOD-1222] Turn on gzip in gRPC. (#850) 2020-08-06 17:28:40 +03:00
Mike Zak
b92943a98c Update to version v0.6.1 2020-08-06 15:16:05 +03:00
Mike Zak
e1318aa326 [NOD-1208] Labels should be lower case 2020-08-06 11:34:22 +03:00
Mike Zak
2bd4a71913 [NOD-1208] Continue the correct loop 2020-08-06 11:11:44 +03:00
Mike Zak
5b206f4c9d [NOD-1208] Use lower case for errors + omit hard-coded numbers 2020-08-06 10:17:20 +03:00
Mike Zak
3f969a2921 [NOD-1208] Add comment to handlePingPong to explain it's one-sided. 2020-08-06 10:10:31 +03:00
Mike Zak
90be14fd57 [NOD-1208] Added comment explaining why Version.Address is optional 2020-08-06 10:00:25 +03:00
Mike Zak
1a5d9fc65c [NOD-1208] Renamed NetAdapterMock to standalone.MinimalNetAdapter 2020-08-05 15:59:23 +03:00
Mike Zak
ec03a094e5 [NOD-1208] Use ID netAdapter generates in netAdapterMock handshake 2020-08-05 15:50:46 +03:00
Mike Zak
9d60bb1ee7 [NOD-1208] Use netConnection.Disconnect in Routes.Disconnect 2020-08-05 15:46:32 +03:00
Mike Zak
cd10de2dce [NOD-1208] Add NetAdapterMock 2020-08-05 10:40:33 +03:00
Mike Zak
658fb08c02 [NOD-1208] Do not ban if DisableBanning was turned on 2020-08-05 10:40:33 +03:00
Mike Zak
3b40488877 [NOD-1208] Allow sending Version message without an address 2020-08-05 10:40:33 +03:00
Svarog
d3d0ad0cf3 [NOD-1226] Properly handle ErrIsClosed during handshake (#843)
* [NOD-1226] Consider ErrIsClosed as protocol error during handshake

* Revert "[NOD-1226] Consider ErrIsClosed as protocol error during handshake"

This reverts commit 74bb07b6cd.

* [NOD-1226] handle errors separately for handshake

* [NOD-1226] Wrap ErrRouteClosed as protocol error in handshake

* [NOD-1226] return if ErrRouteClosed

* [NOD-1226] Use atomic

* [NOD-1226] Don't wrap with protocol error

* [NOD-1226] Fix comment

* [NOD-1226] Fix comment
2020-08-05 10:37:19 +03:00
Svarog
473cc37a75 [NOD-1224] Fix duplicate connections and duplicate blocks bugs (#842)
* [NOD-1224] Make block already existing a ruleError

* [NOD-1224] Remove block from pendingBlocks list only after it was processed

* [NOD-1224] AddToPeers should have a Write Lock, not Read Lock

* [NOD-1224] Check for unrequested before processing
2020-08-04 11:07:21 +03:00
Svarog
966cba4a4e [NOD-1218] KaspadMessage_Pong.toWireMessage should return MsgPong, not MsgPing (#841) 2020-08-03 18:18:06 +03:00
Svarog
da90755530 [NOD-1215] Added integration test for address exchange (#839)
* [NOD-1215] Added integration test for address exchange

* [NOD-1215] Fix error message
2020-08-03 10:34:21 +03:00
Svarog
fa58623815 [NOD-1214] Added test for 64 incoming connections to single node (#838)
* [NOD-1214] Added test for 64 incoming connections to single node

* [NOD-1214] Expand comments, and a small rename

* [NOD-1214] Make sure no bully reports blockAdded twice
2020-08-03 10:30:38 +03:00
Svarog
26af4da507 [NOD-1217] MethodUsageText should take a write lock, to protect methodToInfo (#840) 2020-08-03 10:29:04 +03:00
Svarog
b527470153 [NOD-1211] Transaction relay integration test + fixes to flow (#836)
* [NOD-1162] Separate kaspad to it's own package, so that I can use it out of integration test

* [NOD-1162] Begin integration tests

* [NOD-1162] [FIX] Assign cfg to RPCServer

* [NOD-1162] Basic integration test ready

* [NOD-1162] Wait for connection for real

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] Make connect something that can be invoked in middle of test

* [NOD-1162] Complete first integration test

* [NOD-1162] Undo refactor error

* [NOD-1162] Rename Kaspad to App

* [NOD-1162] Convert checking connection to polling

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] Add comment

* [NOD-1162] Added support for 3 nodes and clients in integration tests

* [NOD-1162] Add third node to integration test

* [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock

* [NOD-1192] Broadcast transactions only if there's more then 0

* [NOD-1162] Removed double waitTillNextIteration

* [NOD-1192] Rename: broadcastTransactions -> broadcastTransactionsAfterBlockAdded

* [NOD-1162] Call NotifyBlocks on client3 as well

* [NOD-1162] ErrTimeout and ErrRouteClosed should be ProtocolErrors

* [NOD-1162] Added comment and removed redundant type PeerAddedCallback

* [NOD-1162] Revert overly eager rename

* [NOD-1162] Move DisalbeTLS to common config + minimize call for ioutil.TempDir()

* [NOD-1162] Add some clarifications in code

* [NOD-1193] Skip closed connections in NetAdapter.Broadcast

* [NOD-1193] Make sure to protect connectionsToRouters from concurrent access

* [NOD-1162] Add _test to all files in integration package

* [NOD-1162] Introduced appHarness to better encapsulate a single node

* [NOD-1162] Removed onChainChanged handler

* [NOD-1162] Remove redundant closure

* [NOD-1162] Correctly mark integration_test config as Simnet

* [NOD-1162] Rename app.ID -> app.P2PNodeID

* [NOD-1162] Move TestIntegrationBasicSync to basic_sync_test.go

* [NOD-1210] Made it possible to setup any number of harnesses needed

* [NOD-1210] Rename appHarness1/2 to incoming/outgoing in connect function

* [NOD-1210] Add the 117-incoming-connections integration test

* [NOD-1210] Delete 117-incoming-connections test because it opens too much files

* [NOD-1210] Added function to notify of blocks conveniently

* [NOD-1210] Added function to mine a block from-A-to-Z

* [NOD-1210] Added IBD integration test

* [NOD-1210] Finish test for IBD and fix bug where
requestSelectedTipsIfRequired ran in handshake's goroutine

* [NOD-1210] Set log level to debug

* [NOD-1211] Add test for transaction relay

* [NOD-1211] Compare fix incorrect comaprison in KaspadMessage_RequestTransactions.fromWireMessage

* [NOD-1211] Return ok instead of err from FetchTxDesc and FetchTransaction

* [NOD-1211] Added MsgTransactionNotFound type

* [NOD-1211] Added HandlRequestedTransactions flow

* [NOD-1211] Wait for blocks to be accepted before moving forward

* [NOD-1211] Rename CmdNotFound to CmdTransactionNotFound

* [NOD-1211] Rename: requestAndSolveTemplate -> mineNextBlock

* [NOD-1211] Renamed incoming/outgoing to appHarness1/appHarness2 in isConnected

* [NOD-1211] Move check of Hash == nil to outside wireHashToProto

* [NOD-1211] Instantiate payloadHash before *x
2020-08-02 16:11:16 +03:00
Ori Newman
e70561141d [NOD-1212] Request IBD blocks in batches (#835)
* [NOD-1212] Request IBD blocks in batches

* [NOD-1212] Fix condition

* [NOD-1212] Remove redundant functions

* [NOD-1212] gofmt

* [NOD-1212] Fix condition

* [NOD-1212] Fix off by one error and add messages to messages.proto

* [NOD-1212] Refactor downloadBlocks

* [NOD-1212] Fix comment

* [NOD-1212] Return DefaultTimeout to original value
2020-08-02 13:46:07 +03:00
Ori Newman
20b547984e [NOD-1191] Fix erroneous condition (#837)
* [NOD-1191] Convert wire protocol to 100% protobuf

* [NOD-1191] Simplify wire interface and remove redundant messages

* [NOD-1191] Map all proto to wire conversions

* [NOD-1203] Create netadapter outside of protocol manager

* [NOD-1191] Fix nil errors

* [NOD-1191] Fix comments

* [NOD-1191] Add converter interface

* [NOD-1191] Add missing GetBlockLocator message

* [NOD-1191] Change message names that starts with 'get' to 'request'

* [NOD-1191] Change message commands values

* [NOD-1191] Remove redundant methods

* [NOD-1191] Rename message constructors

* [NOD-1191] Change message commands to use iota

* [NOD-1191] Add missing outputs to protobuf conversion

* [NOD-1191] Make block header a required field

* [NOD-1191] Rename variables

* [NOD-1212] Fix test names

* [NOD-1191] Rename flow names

* [NOD-1191] Fix infinite loop

* [NOD-1191] Fix wrong condition
2020-08-02 11:10:56 +03:00
Svarog
16a658a5be [NOD-1210] Add integration test for IBD and fix bug where requestSelectedTipsIfRequired ran in handshake's goroutine (#834)
* [NOD-1162] Separate kaspad to it's own package, so that I can use it out of integration test

* [NOD-1162] Begin integration tests

* [NOD-1162] [FIX] Assign cfg to RPCServer

* [NOD-1162] Basic integration test ready

* [NOD-1162] Wait for connection for real

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] Make connect something that can be invoked in middle of test

* [NOD-1162] Complete first integration test

* [NOD-1162] Undo refactor error

* [NOD-1162] Rename Kaspad to App

* [NOD-1162] Convert checking connection to polling

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] Add comment

* [NOD-1162] Added support for 3 nodes and clients in integration tests

* [NOD-1162] Add third node to integration test

* [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock

* [NOD-1192] Broadcast transactions only if there's more then 0

* [NOD-1162] Removed double waitTillNextIteration

* [NOD-1192] Rename: broadcastTransactions -> broadcastTransactionsAfterBlockAdded

* [NOD-1162] Call NotifyBlocks on client3 as well

* [NOD-1162] ErrTimeout and ErrRouteClosed should be ProtocolErrors

* [NOD-1162] Added comment and removed redundant type PeerAddedCallback

* [NOD-1162] Revert overly eager rename

* [NOD-1162] Move DisalbeTLS to common config + minimize call for ioutil.TempDir()

* [NOD-1162] Add some clarifications in code

* [NOD-1193] Skip closed connections in NetAdapter.Broadcast

* [NOD-1193] Make sure to protect connectionsToRouters from concurrent access

* [NOD-1162] Add _test to all files in integration package

* [NOD-1162] Introduced appHarness to better encapsulate a single node

* [NOD-1162] Removed onChainChanged handler

* [NOD-1162] Remove redundant closure

* [NOD-1162] Correctly mark integration_test config as Simnet

* [NOD-1162] Rename app.ID -> app.P2PNodeID

* [NOD-1162] Move TestIntegrationBasicSync to basic_sync_test.go

* [NOD-1210] Made it possible to setup any number of harnesses needed

* [NOD-1210] Rename appHarness1/2 to incoming/outgoing in connect function

* [NOD-1210] Add the 117-incoming-connections integration test

* [NOD-1210] Delete 117-incoming-connections test because it opens too much files

* [NOD-1210] Added function to notify of blocks conveniently

* [NOD-1210] Added function to mine a block from-A-to-Z

* [NOD-1210] Added IBD integration test

* [NOD-1210] Finish test for IBD and fix bug where
requestSelectedTipsIfRequired ran in handshake's goroutine

* [NOD-1210] Set log level to debug

* [NOD-1210] A bunch of renamings
2020-08-02 09:42:27 +03:00
Ori Newman
42e50e6dc2 [NOD-1191] Convert wire protocol to proto (#831)
* [NOD-1191] Convert wire protocol to 100% protobuf

* [NOD-1191] Simplify wire interface and remove redundant messages

* [NOD-1191] Map all proto to wire conversions

* [NOD-1203] Create netadapter outside of protocol manager

* [NOD-1191] Fix nil errors

* [NOD-1191] Fix comments

* [NOD-1191] Add converter interface

* [NOD-1191] Add missing GetBlockLocator message

* [NOD-1191] Change message names that starts with 'get' to 'request'

* [NOD-1191] Change message commands values

* [NOD-1191] Remove redundant methods

* [NOD-1191] Rename message constructors

* [NOD-1191] Change message commands to use iota

* [NOD-1191] Add missing outputs to protobuf conversion

* [NOD-1191] Make block header a required field

* [NOD-1191] Rename variables

* [NOD-1212] Fix test names

* [NOD-1191] Rename flow names

* [NOD-1191] Fix infinite loop
2020-07-30 18:19:55 +03:00
Svarog
3d942ce355 [NOD-1162] Integration test (#822)
* [NOD-1162] Separate kaspad to it's own package, so that I can use it out of integration test

* [NOD-1162] Begin integration tests

* [NOD-1162] [FIX] Assign cfg to RPCServer

* [NOD-1162] Basic integration test ready

* [NOD-1162] Wait for connection for real

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] Make connect something that can be invoked in middle of test

* [NOD-1162] Complete first integration test

* [NOD-1162] Undo refactor error

* [NOD-1162] Rename Kaspad to App

* [NOD-1162] Convert checking connection to polling

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] Add comment

* [NOD-1162] Added support for 3 nodes and clients in integration tests

* [NOD-1162] Add third node to integration test

* [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock

* [NOD-1192] Broadcast transactions only if there's more then 0

* [NOD-1162] Removed double waitTillNextIteration

* [NOD-1192] Rename: broadcastTransactions -> broadcastTransactionsAfterBlockAdded

* [NOD-1162] Call NotifyBlocks on client3 as well

* [NOD-1162] ErrTimeout and ErrRouteClosed should be ProtocolErrors

* [NOD-1162] Added comment and removed redundant type PeerAddedCallback

* [NOD-1162] Revert overly eager rename

* [NOD-1162] Move DisalbeTLS to common config + minimize call for ioutil.TempDir()

* [NOD-1162] Add some clarifications in code

* [NOD-1193] Skip closed connections in NetAdapter.Broadcast

* [NOD-1193] Make sure to protect connectionsToRouters from concurrent access

* [NOD-1162] Add _test to all files in integration package

* [NOD-1162] Introduced appHarness to better encapsulate a single node

* [NOD-1162] Removed onChainChanged handler

* [NOD-1162] Remove redundant closure

* [NOD-1162] Correctly mark integration_test config as Simnet

* [NOD-1162] Rename app.ID -> app.P2PNodeID
2020-07-30 10:47:56 +03:00
Ori Newman
94f617b06a [NOD-1206] Call peer.StartIBD in new goroutine (#833) 2020-07-29 12:03:59 +03:00
Svarog
211c4d05e8 [NOD-1120] Separate registration of routes, and the starting of flows (#832)
* [NOD-1120] Separate flow registration and running

* [NOD-1120] Extract executeFunc to separate function

* [NOD-1120] Move the registration of flows out of goroutine

* [NOD-1120] Return after handleError

* [NOD-1120] Rename: addXXXFlow -> registerXXXFlow

* Rename: stop -> errChan

* [NOD-1120] Fix name of goroutine
2020-07-29 12:02:04 +03:00
Ori Newman
a9f3bdf4ab [NOD-1203] Create netadapter outside of protocol manager (#830) 2020-07-29 10:17:13 +03:00
Svarog
2303aecab4 [NOD-1198] Make router a property of netConnection, and remove map from connection to router in netAdapter (#829)
* [NOD-1198] Make router a property of netConnection, and remove map from connection to router in netAdapter

* [NOD-1198] Moved all router logic from netAdapter to netConnection

* [NOD-1198] Move disconnect to NetConnection

* [NOD-1198] Unexport netConnection.start

* [NOD-1198] Remove error from Disconnect functions

* [NOD-1198] Make sure OnDisconnectedHandler doesn't run when it shouldn't
2020-07-28 11:27:48 +03:00
Svarog
7655841e9f [NOD-1194] Make error handling more centralized, and ignore ErrRouteClosed (#828)
* [NOD-1194] Make error handling more centralized, and ignore ErrRouteClosed

* [NOD-1194] Ignore ErrRouteClosed in connection_loops as well

* [NOD-1194] Enhance comment

* [NOD-1194] Return after any HandleError

* [NOD-1194] Rephrased comment
2020-07-27 15:07:28 +03:00
stasatdaglabs
c4bbcf9de6 [NOD-1181] Mark banned peers in address manager and persist bans to disk (#826)
* [NOD-1079] Fix block rejects over "Already have block" (#783)

* [NOD-1079] Return regular error instead of ruleError on already-have-block in ProcessBlock.

* [NOD-1079] Fix bad implementation of IsSelectedTipKnown.

* [NOD-1079] In shouldQueryPeerSelectedTips use selected DAG tip timestamp instead of past median time.

* [NOD-1079] Remove redundant (and possibly buggy) clearing of sm.requestedBlocks.

* [NOD-684] change simnet block rate to block per ms (#782)

* [NOD-684] Get rid of dag.targetTimePerBlock and use finality duration in dag params

* [NOD-684] Fix regtest genesis block

* [NOD-684] Set simnet's TargetTimePerBlock to 1ms

* [NOD-684] Shorten simnet finality duration

* [NOD-684] Change isDAGCurrentMaxDiff to be written as number of blocks

* [NOD-684] Fix NextBlockMinimumTime to be add one millisecond after past median time

* [NOD-1004] Make AddrManager.getAddress use only 1 loop to check all address chances and pick one of them (#741)

* [NOD-1004] Remove code duplication in Good().

* [NOD-1004] Remove some more code duplication in Good().

* [NOD-1004] Remove some more code duplication in Good().

* [NOD-1004] Remove code duplication in GetAddress().

* [NOD-1004] Remove code duplication in updateAddress.

* [NOD-1004] Remove some more code duplication in updateAddress.

* [NOD-1004] Remove redundant check in expireNew.

* [NOD-1004] Remove superfluous existence check from updateAddress.

* [NOD-1004] Make triedBucket use a slice instead of a list.

* [NOD-1004] Remove code duplication in getAddress.

* [NOD-1004] Remove infinite loops out of getAddress.

* [NOD-1004] Made impossible branch panic.

* [NOD-1004] Remove a mystery comment.

* [NOD-1004] Remove an unnecessary sort.

* [NOD-1004] Make AddressKey a type alias.

* [NOD-1004] Added comment for AddressKey

* [NOD-1004] Fix merge errors.

* [NOD-1004] Fix merge errors.

* [NOD-1004] Do some renaming.

* [NOD-1004] Do some more renaming.

* [NOD-1004] Rename AddrManager to AddressManager.

* [NOD-1004] Rename AddrManager to AddressManager.

* [NOD-1004] Do some more renaming.

* [NOD-1004] Rename bucket to addressBucketArray.

* [NOD-1004] Fix a comment.

* [NOD-1004] Rename na to netAddress.

* [NOD-1004] Bring back an existence check.

* [NOD-1004] Fix an error message.

* [NOD-1004] Fix a comment.

* [NOD-1004] Use a boolean instead of -1.

* [NOD-1004] Use a boolean instead of -1 in another place.

Co-authored-by: Mike Zak <feanorr@gmail.com>

* Fix merge errors.

* [NOD-1181] Move isBanned logic into addressManager.

* [NOD-1181] Persist bans to disk.

* [NOD-1181] Add comments.

* [NOD-1181] Add an additional exit condition to the connection loop.

* [NOD-1181] Add a TODO.

* [NOD-1181] Wrap not-found errors in addressManager.

* [NOD-1181] Fix a comment.

* [NOD-1181] Rename banned to isBanned.

* [NOD-1181] Fix bad error handling in routerInitializer.

* [NOD-1181] Remove a TODO.

Co-authored-by: Ori Newman <orinewman1@gmail.com>
Co-authored-by: Mike Zak <feanorr@gmail.com>
2020-07-27 14:45:18 +03:00
stasatdaglabs
0cec1ce23e [NOD-1189] Exit early if we've filtered out all the hashes in block relay. (#827) 2020-07-27 12:23:43 +03:00
Svarog
089fe828aa [NOD-1193] Skip closed connections in NetAdapter.Broadcast (#825)
* [NOD-1193] Skip closed connections in NetAdapter.Broadcast

* [NOD-1193] Make sure to protect connectionsToRouters from concurrent access
2020-07-27 10:32:07 +03:00
stasatdaglabs
24a09fb3df Merge 0.6.0-dev into 0.6.0-libp2p (#824)
* [NOD-1079] Fix block rejects over "Already have block" (#783)

* [NOD-1079] Return regular error instead of ruleError on already-have-block in ProcessBlock.

* [NOD-1079] Fix bad implementation of IsSelectedTipKnown.

* [NOD-1079] In shouldQueryPeerSelectedTips use selected DAG tip timestamp instead of past median time.

* [NOD-1079] Remove redundant (and possibly buggy) clearing of sm.requestedBlocks.

* [NOD-684] change simnet block rate to block per ms (#782)

* [NOD-684] Get rid of dag.targetTimePerBlock and use finality duration in dag params

* [NOD-684] Fix regtest genesis block

* [NOD-684] Set simnet's TargetTimePerBlock to 1ms

* [NOD-684] Shorten simnet finality duration

* [NOD-684] Change isDAGCurrentMaxDiff to be written as number of blocks

* [NOD-684] Fix NextBlockMinimumTime to be add one millisecond after past median time

* [NOD-1004] Make AddrManager.getAddress use only 1 loop to check all address chances and pick one of them (#741)

* [NOD-1004] Remove code duplication in Good().

* [NOD-1004] Remove some more code duplication in Good().

* [NOD-1004] Remove some more code duplication in Good().

* [NOD-1004] Remove code duplication in GetAddress().

* [NOD-1004] Remove code duplication in updateAddress.

* [NOD-1004] Remove some more code duplication in updateAddress.

* [NOD-1004] Remove redundant check in expireNew.

* [NOD-1004] Remove superfluous existence check from updateAddress.

* [NOD-1004] Make triedBucket use a slice instead of a list.

* [NOD-1004] Remove code duplication in getAddress.

* [NOD-1004] Remove infinite loops out of getAddress.

* [NOD-1004] Made impossible branch panic.

* [NOD-1004] Remove a mystery comment.

* [NOD-1004] Remove an unnecessary sort.

* [NOD-1004] Make AddressKey a type alias.

* [NOD-1004] Added comment for AddressKey

* [NOD-1004] Fix merge errors.

* [NOD-1004] Fix merge errors.

* [NOD-1004] Do some renaming.

* [NOD-1004] Do some more renaming.

* [NOD-1004] Rename AddrManager to AddressManager.

* [NOD-1004] Rename AddrManager to AddressManager.

* [NOD-1004] Do some more renaming.

* [NOD-1004] Rename bucket to addressBucketArray.

* [NOD-1004] Fix a comment.

* [NOD-1004] Rename na to netAddress.

* [NOD-1004] Bring back an existence check.

* [NOD-1004] Fix an error message.

* [NOD-1004] Fix a comment.

* [NOD-1004] Use a boolean instead of -1.

* [NOD-1004] Use a boolean instead of -1 in another place.

Co-authored-by: Mike Zak <feanorr@gmail.com>

* Fix merge errors.

Co-authored-by: Ori Newman <orinewman1@gmail.com>
Co-authored-by: Mike Zak <feanorr@gmail.com>
2020-07-26 15:23:18 +03:00
Svarog
b2901454d6 [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock (#823)
* [NOD-1192] Use lock-less functions in TxPool.HandleNewBlock

* [NOD-1192] Broadcast transactions only if there's more then 0

* [NOD-1192] Rename: broadcastTransactions -> broadcastTransactionsAfterBlockAdded
2020-07-26 14:42:59 +03:00
stasatdaglabs
6cf589dc9b [NOD-1145] Normalize panics in flows (#819)
* [NOD-1145] Remove panics from regular flows.

* [NOD-1145] Remove panics from the handshake flow.

* [NOD-1045] Fix merge errors.

* [NOD-1045] Remove a comment.

* [NOD-1045] Handle errors properly in AddTransaction and AddBlock.

* [NOD-1045] Remove a comment.

* [NOD-1045] Wrap ErrPeerWithSameIDExists with ProtocolError.

* [NOD-1145] Add TODOs.
2020-07-26 13:46:59 +03:00
stasatdaglabs
683ceda3a7 [NOD-1152] Move banning from netAdapter to connectionManager (#820)
* [NOD-1152] Move banning out of netadapter.

* [NOD-1152] Add a comment.

* [NOD-1152] Fix a comment.
2020-07-26 13:42:48 +03:00
Svarog
6a18b56587 [NOD-1162] Fixes from integration test (#821)
* [NOD-1162] [FIX] Connection manager should run the moment it adds a request

* [NOD-1162] [FIX] Set peerID on handshake

* [NOD-1162] [FIX] Broadcast should send to outgoing route, not incoming

* [NOD-1162] [FIX] Add CmdInvRelayBlock to MakeEmptyMessage

* [NOD-1162] [FIX] Initialize Hash before decoding MsgInvRelayBlock

* [NOD-1162] [FIX] Invert condition

* [NOD-1162] [FIX] Fixes to encoding of MsgGetRelayBlocks

* [NOD-1162] [FIX] Add MsgGetRelayBlocks to MakeEmptyMessage

* [NOD-1162] Add comment
2020-07-26 11:44:16 +03:00
Ori Newman
2c9e5be816 [NOD-1177] In handleGetConnectedPeerInfo, populate the missing result fields (#818) 2020-07-23 13:35:30 +03:00
stasatdaglabs
5d5a0ef335 [NOD-1153] Remove redundant maps from NetAdapter (#817)
* [NOD-1153] Remove redundant maps from NetAdapter.

* [NOD-1153] Fix a comment.

* [NOD-1153] Fix a comment.
2020-07-23 12:07:53 +03:00
Ori Newman
428f16ffef [NOD-1185] Broadcast blocks submitted through RPC (#816)
* [NOD-1185] Broadcast blocks submitted through RPC

* [NOD-1185] Send inv instead of block

* [NOD-1185] Fix go.sum and go.mod
2020-07-23 11:50:44 +03:00
stasatdaglabs
f93e54b63c [NOD-1184] Protect incomingRoutes from concurrent read/write. (#815) 2020-07-23 11:17:52 +03:00
Ori Newman
c30b350e8e [NOD-1183] Fix rpc server config (#814) 2020-07-23 10:49:46 +03:00
stasatdaglabs
8fdb5aa024 [NOD-1123] Implement banning (#812)
* [NOD-1123] Bubble bad-message errors up to the protocol level.

* [NOD-1123] Implement Banning.

* [NOD-1123] Properly use &stopped.

* [NOD-1123] Ban by IP rather than IP and port.

* [NOD-1123] Don't initiate connections to banned peers.

* [NOD-1123] Fix infinite loop in checkOutgoingConnections.

* [NOD-1123] Fix bannedAddresses key.

* [NOD-1123] Rename onBadMessageHandler to onInvalidMessageHandler.
2020-07-23 10:47:28 +03:00
Ori Newman
83a3c30d01 [NOD-1176] Implement a struct for each flow to share flow data (#811)
* [NOD-1176] Implement a struct for each flow to share flow data

* [NOD-1178] Add empty contexts to flow structs for consistency
2020-07-22 15:12:54 +03:00
stasatdaglabs
63646c8c92 [NOD-1175] Implement AddBlock (#809)
* [NOD-1175] Get rid of something weird.

* [NOD-1175] Implement AddBlock.

* [NOD-1175] Implement BFDisallowOrphans.

* [NOD-1175] Pass flags into AddBlock.

* [NOD-1175] Remove isOrphan and isDelayed handling from AddBlock.

* [NOD-1175] Use default return values in error.

* [NOD-1175] Bring back a comment.

* [NOD-1175] Add ErrOrphanBlockIsNotAllowed to errorCodeStrings.
2020-07-22 13:47:38 +03:00
stasatdaglabs
097e7ab42a [NOD-1155] Always use NetConnection for disconnection. (#810) 2020-07-22 12:18:08 +03:00
stasatdaglabs
3d45c8de50 [NOD-1130] Integrate RPC with the new architecture (#807)
* [NOD-1130] Delete rpcadapters.go.

* [NOD-1130] Delete p2p. Move rpc to top level.

* [NOD-1130] Remove DAGParams from rpcserverConfig.

* [NOD-1130] Remove rpcserverPeer, rpcserverConnManager, rpcserverSyncManager, and rpcserverConfig.

* [NOD-1130] Remove wallet RPC commands.

* [NOD-1130] Remove wallet RPC commands.

* [NOD-1130] Remove connmgr and peer.

* [NOD-1130] Move rpcmodel into rpc.

* [NOD-1130] Implement ConnectionCount.

* [NOD-1130] Remove ping and node RPC commands.

* [NOD-1130] Dummify handleGetNetTotals.

* [NOD-1130] Add NetConnection to Peer.

* [NOD-1130] Fix merge errors.

* [NOD-1130] Implement Peers.

* [NOD-1130] Fix HandleGetConnectedPeerInfo.

* [NOD-1130] Fix SendRawTransaction.

* [NOD-1130] Rename addManualNode to connect and removeManualNode to disconnect.

* [NOD-1130] Add a stub for AddBlock.

* [NOD-1130] Fix tests.

* [NOD-1130] Replace half-baked contents of RemoveConnection with a stub.

* [NOD-1130] Fix merge errors.

* [NOD-1130] Make golint happy.

* [NOD-1130] Get rid of something weird.

* [NOD-1130] Rename minerClient back to client.

* [NOD-1130] Add a few fields to GetConnectedPeerInfoResult.

* [NOD-1130] Rename oneTry to isPermanent.

* [NOD-1130] Implement ConnectionCount in NetAdapter.

* [NOD-1130] Move RawMempoolVerbose out of mempool.

* [NOD-1130] Move isSynced into the mining package.

* [NOD-1130] Fix a compilation error.

* [NOD-1130] Make golint happy.

* [NOD-1130] Fix merge errors.
2020-07-22 10:26:39 +03:00
Ori Newman
8e1958c20b [NOD-1168] Add context interfaces for flows (#808)
* [NOD-1168] Add context interfaces to flows

* [NOD-1168] Move IBD state to protocol manager

* [NOD-1168] Move ready peers to protocol manager

* [NOD-1168] Add comments

* [NOD-1168] Separate context interfaces for send and receive pings

* [NOD-1168] Add protocol shared state to FlowContext

* [NOD-1168] Fix comment

* [NOD-1168] Rename Context->HandleHandshakeContext

* [NOD-1168] Initialize readyPeers and transactionsToRebroadcast

* [NOD-1168] Rename readyPeers -> peers
2020-07-21 18:02:33 +03:00
Ori Newman
3e6c1792ef [NOD-1170] Return a custom error when a route is closed (#805)
* [NOD-1170] Return a custom error when a route is closed

* [NOD-1170] Return ErrRouteClosed directly from route methods

* [NOD-1170] Fix comment location
2020-07-21 12:06:11 +03:00
Svarog
6b5b4bfb2a [NOD-1164] Remove the singleton from dbaccess, to enable multiple db connections in same run (#806)
* [NOD-1164] Defined DatabaseContext as the basic object of dbaccess

* [NOD-1164] Update everything to use databaseContext

* [NOD-1164] Fix tests

* [NOD-1164] Add comments

* [NOD-1164] Removed databaseContext from blockNode

* [NOD-1164] Enforce DatabaseContext != nil

* [NOD-1164] Remove redundant and wrong comment line
2020-07-21 12:02:44 +03:00
Ori Newman
b797436884 [NOD-1127] Implement transaction propagation (#803)
* [NOD-1128] Add all flows to a directory names flows

* [NOD-1128] Make everything in protocol package a manager method

* [NOD-1128] Add AddTransaction mechanism to protocol manager

* [NOD-1128] Add mempool related flows

* [NOD-1128] Add mempool related flows

* [NOD-1128] Add mempool related flows

* [NOD-1127] Fix router message types

* [NOD-1127] Inline updateQueues

* [NOD-1127] Rename acceptedTxs->transactionsAcceptedToMempool

* [NOD-1127] Add TODOs to notify transactions to RPC

* [NOD-1127] Fix comment

* [NOD-1127] Rename acceptedTxs->transactionsAcceptedToMempool

* [NOD-1127] Rename MsgTxInv->MsgInvTransaction

* [NOD-1127] Rename MsgTxInv.TXIDs->TxIDS

* [NOD-1127] Change flow name

* [NOD-1127] Call m.addTransactionRelayFlow

* [NOD-1127] Remove redundant line

* [NOD-1127] Use common.DefaultTimeout

* [NOD-1127] Return early if len(idsToRequest) == 0

* [NOD-1127] Add NewBlockHandler to IBD
2020-07-20 16:01:35 +03:00
Svarog
2de3c1d0d4 [NOD-1160] Convert *config.Config from singleton to an object that is being passed around (#802)
* [NOD-1160] remove activeConfig from config package + update main

* [NOD-1160] Update main and addrmanager

* [NOD-1160] Update netAdapater

* [NOD-1160] Update connmanager

* [NOD-1160] Fix connmgr package

* [NOD-1160] Fixed DNSSeed functions

* [NOD-1160] Fixed protocol package and subpackages

* [NOD-1160] Fix p2p package

* [NOD-1160] Fix rpc package

* [NOD-1160] Fix kaspad a final time

* [NOD-1160] Make dnsseed.SeedFromDNS callable outside kaspad

* [NOD-1160] Fix tests

* [NOD-1160] Pass cfg to kaspad

* [NOD-1160] Add comment and remove redundant object

* [NOD-1160] Fix typo
2020-07-20 14:33:35 +03:00
Ori Newman
7e81757e2f [NOD-1161] Name goroutines and log them by the name (#804)
* [NOD-1161] Name goroutines and log them by the name

* [NOD-1161] Fix some goroutine names
2020-07-20 13:00:23 +03:00
stasatdaglabs
4773f87875 [NOD-1125] Implement the IBD flow (#800)
* [NOD-1125] Write a skeleton for starting IBD.

* [NOD-1125] Add WaitForIBDStart to Peer.

* [NOD-1125] Move functions around.

* [NOD-1125] Fix merge errors.

* [NOD-1125] Fix a comment.

* [NOD-1125] Implement sendGetBlockLocator.

* [NOD-1125] Begin implementing findIBDLowHash.

* [NOD-1125] Finish implementing findIBDLowHash.

* [NOD-1125] Rename findIBDLowHash to findHighestSharedBlockHash.

* [NOD-1125] Implement downloadBlocks.

* [NOD-1125] Implement msgIBDBlock.

* [NOD-1125] Implement msgIBDBlock.

* [NOD-1125] Fix message types for HandleIBD.

* [NOD-1125] Write a skeleton for requesting selected tip hashes.

* [NOD-1125] Write a skeleton for the rest of the IBD requests.

* [NOD-1125] Implement HandleGetBlockLocator.

* [NOD-1125] Fix wrong timeout.

* [NOD-1125] Fix compilation error.

* [NOD-1125] Implement HandleGetBlocks.

* [NOD-1125] Fix compilation errors.

* [NOD-1125] Fix merge errors.

* [NOD-1125] Implement selectPeerForIBD.

* [NOD-1125] Implement RequestSelectedTip.

* [NOD-1125] Implement HandleGetSelectedTip.

* [NOD-1125] Make go lint happy.

* [NOD-1125] Add minGetSelectedTipInterval.

* [NOD-1125] Call StartIBDIfRequired where needed.

* [NOD-1125] Fix merge errors.

* [NOD-1125] Remove a redundant line.

* [NOD-1125] Rename shouldContinue to shouldStop.

* [NOD-1125] Lowercasify an error message.

* [NOD-1125] Shuffle statements around in findHighestSharedBlockHash.

* [NOD-1125] Rename hasRecentlyReceivedBlock to isDAGTimeCurrent.

* [NOD-1125] Scope minGetSelectedTipInterval.

* [NOD-1125] Handle an unhandled error.

* [NOD-1125] Use AddUint32 instead of LoadUint32 + StoreUint32.

* [NOD-1125] Use AddUint32 instead of LoadUint32 + StoreUint32.

* [NOD-1125] Use SwapUint32 instead of AddUint32.

* [NOD-1125] Remove error from requestSelectedTips.

* [NOD-1125] Actually stop IBD when it should stop.

* [NOD-1125] Actually stop RequestSelectedTip when it should stop.

* [NOD-1125] Don't ban peers that send us delayed blocks during IBD.

* [NOD-1125] Make unexpected message type messages nicer.

* [NOD-1125] Remove Peer.ready and make HandleHandshake return it to guarantee we never operate on a non-initialized peer.

* [NOD-1125] Remove errors associated with Peer.ready.

* [NOD-1125] Extract maxHashesInMsgIBDBlocks to a const.

* [NOD-1125] Move the ibd package into flows.

* [NOD-1125] Start IBD if required after getting an unknown block inv.

* [NOD-1125] Don't request blocks during relay if we're in the middle of IBD.

* [NOD-1125] Remove AddBlockLocatorHash.

* [NOD-1125] Extract runIBD to a seperate function.

* [NOD-1125] Extract runSelectedTipRequest to a seperate function.

* [NOD-1125] Remove EnqueueWithTimeout.

* [NOD-1125] Increase the capacity of the outgoingRoute.

* [NOD-1125] Fix some bad names.

* [NOD-1125] Fix a comment.

* [NOD-1125] Simplify a comment.

* [NOD-1125] Move WaitFor... functions into their respective run... functions.

* [NOD-1125] Return default values in case of error.

* [NOD-1125] Use CmdXXX in error messages.

* [NOD-1125] Use MaxInvPerMsg in outgoingRouteMaxMessages instead of MaxBlockLocatorsPerMsg.

* [NOD-1125] Fix a comment.

* [NOD-1125] Disconnect a peer that sends us a delayed block during IBD.

* [NOD-1125] Use StoreUint32 instead of SwapUint32.

* [NOD-1125] Add a comment.

* [NOD-1125] Don't ban peers that send us delayed blocks.
2020-07-20 12:52:23 +03:00
Svarog
aa5bc34280 [NOD-1148] P2P stabilization (#798)
* [NOD-1148] Add lock around route's close operation

* [NOD-1148] Added tracing of incoming and outgoing messages

* [NOD-1148] Cast to MsgPing should have been to MsgPong

* [NOD-1148] Check for NeedMoreAddresses before sending GetAddr message
and invert condition
2020-07-19 14:57:34 +03:00
Svarog
b9a25c1141 [NOD-1163] Combine seperated flows into single packages (#801)
* [NOD-1163] Combine seperated flows into single packages

* [NOD-1163] Move handshake.go to handshake package

* [NOD-1163] Use single logger prefix for everything under protocol

* [NOD-1163] Add comment

* [NOD-1163] Fix refactor error
2020-07-19 11:24:25 +03:00
Svarog
b42b8b16fd [NOD-1120] Connection Manager (#796)
* [NOD-1120] Removed closure in NetAdapter.onConnectedHanlder

* [NOD-1120] Implement all connection manager methods

* [NOD-1120] Integrated connmanager into kaspad + added call for dnsseeder

* [NOD-1120] Allow buffer to not be bytes.Buffer

* [NOD-1120] Added timeout to connect

* [NOD-1120] Don't enter connections to  add loop if none needed

* [NOD-1120] Add call for addressManager.Good

* [NOD-1120] Minor bug fixes

* [NOD-1120] Remove errChan from grpcConnection

* [NOD-1120] Add comments to exported methods

* [NOD-1120] cancel the context for DialContext in gRPCServer.Connect

* [NOD-1120] Don't try to remove from connSet a connection that doesn't exist

* [NOD-1120] add ok bool to connectionSet.get

* [NOD-1120] Remove overuse of if-else in checkConnectionRequests

* [NOD-1120] Made some order in ConnectionManager

* [NOD-1120] Moved checkIncomingConnections to it's own file

* [NOD-1120] cleanup in checkOutgoingConnections

* [NOD-1120] Cleanup in SeedDNS, and move call outside of connection manager

* [NOD-1120] Add check that both --connect and --addpeer aren't used

* [NOD-1120] Move dial timeout to constant

* [NOD-1120] Enhance comment

* [NOD-1120] Log connection failure out of initiateConnection

* [NOD-1148] Reshuffle checkRequestedConnections to make more sense

* [NOD-1120] Move continue to correct place + reshuffle logging code

* [NOD-1120] Don't expose server.Connection outside netAdapter - expose a wrapper instead

* [NOD-1120] Add comments

* [NOD-1120] Don't return the connection from netAdapter.Connect()

* [NOD-1120] Use .Address as key for connectionSet

* [NOD-1120] Fix minRetryDuration usage

* [NOD-1120] Remove the correct number of incoming connections

* [NOD-1120] Add comment

* [NOD-1120] Rename connSet -> incomingConnectionSet

* [NOD-1120] fix grammar
2020-07-16 17:15:58 +03:00
Ori Newman
e0aac68759 [NOD-1128] Convert message type to uint32 (#799)
* [NOD-1128] Change message command to uint32

* [NOD-1128] Don't use iota

* [NOD-1128] Remove redundant line
2020-07-16 17:11:05 +03:00
Ori Newman
9939671ccc [NOD-1147] Implement address exchange (#795)
* [NOD-1147] Implement address exchange

* [NOD-1147] Put placeholder for source address

* [NOD-1147] Fix tests

* [NOD-1147] Add comment

* [NOD-1147] Remove needAddresses from MsgGetAddr

* [NOD-1147] Use rand.Shuffle

* [NOD-1147] Remove redundant const

* [NOD-1147] Move defer to its correct place

* [NOD-1147] Fix typo

* [NOD-1147] Use EnqueueWithTimeout for outgoingRoute

* [NOD-1147] Rename MsgGetAddr->MsgGetAddresses

* [NOD-1147] Rename MsgGetAddr->MsgGetAddresses

* [NOD-1147] Rename MsgAddr->MsgAddresses

* [NOD-1147] Rename fakeSrcAddr->fakeSourceAddress

* [NOD-1147] Remove redundant files

* [NOD-1147] CmdAddr -> CmdAddress

* [NOD-1147] Rename addr to address in protocol package
2020-07-15 17:19:46 +03:00
Ori Newman
eaa8515442 [NOD-1150] Add in netadapter function to disconnect router (#797)
* [NOD-1150] Add in netadapter function to disconnect router

* [NOD-1150] Fix comment
2020-07-15 14:42:17 +03:00
Ori Newman
04b578cee1 [NOD-1137] Implement handshake protocol (#792)
* [NOD-1126] Implement block relay flow

* [NOD-1126] Implement block relay flow

* [NOD-1126] Add StartGetRelayBlocksListener

* [NOD-1126] Integrate with new interface

* [NOD-1126] Fix comments

* [NOD-1126] Refactor protocol.go

* [NOD-1126] Split long lines

* [NOD-1126] Fix comment

* [NOD-1126] move sharedRequestedBlocks to a separate file

* [NOD-1126] Fix error message

* [NOD-1126] Move handleInv to StartBlockRelay

* [NOD-1126] Create hashesQueueSet type

* [NOD-1126] Make deleteFromRequestedBlocks a method

* [NOD-1126] Fix comment

* [NOD-1126] Add block logger

* [NOD-1126] Rename advertisedProtoVer->advertisedProtocolVer

* [NOD-1126] Fix comment and an error message

* [NOD-1126] Remove redundant loop

* [NOD-1126] Move requestBlocks upper

* [NOD-1126] Remove exiting blocks in requestedBlocks from hashesToRequest

* [NOD-1126] Change comment

* [NOD-1126] Rename stallResponseTimeout->timeout

* [NOD-1126] Use switch inside readMsgBlock

* [NOD-1126] Fix error message and remove redundant log

* [NOD-1126] Rename pacakge names

* [NOD-1126] Fix comment

* [NOD-1126] Change file names

* [NOD-1126] Convert block to partial if needed

* [NOD-1126] Remove function redeclaration

* [NOD-1126] continue instead of return

* [NOD-1126] Rename LogBlockBlueScore->LogBlock

* [NOD-1126] Add minimum functions to utils

* [NOD-1126] Flip condition on readInv

* [NOD-1126] Rename utilMath->mathUtil

* [NOD-1126] Fix comment

* [NOD-1137] Implement handshake

* [NOD-1137] Replace version's nonce with ID

* [NOD-1137] Remove redundant function

* [NOD-1137] Move handshake to a separate file

* [NOD-1137] Add todo

* [NOD-1137] Replace peer internal id with global peer ID

* [NOD-1137] Add serializer/deserializer to ID

* [NOD-1137] Remove validation from AddUserAgent

* [NOD-1137] Add missing id package

* [NOD-1137] Rename variables

* [NOD-1137] Add comment

* [NOD-1137] Implement GetBestLocalAddress

* [NOD-1137] Implement TODOs

* [NOD-1137] Rename variables

* [NOD-1137] Move errors.Is inside err!=nil branch

* [NOD-1137] Fix erroneous condition on Dequeue

* [NOD-1137] Fix bug in GetReadyPeerIDs

* [NOD-1137] Handle external IP on GetBestLocalAddress

* [NOD-1137] Remove version and verack message types when handshake is over

* [NOD-1137] Add FromBytes to id package

* [NOD-1137] Add protocol error

* [NOD-1137] Add ErrTimeout

* [NOD-1137] Log error only if exists

* [NOD-1137] Replace idFromBytes->id.FromBytes

* [NOD-1137] Add comments

* [NOD-1137] Remove ErrTimeout

* [NOD-1137] Unremove ErrTimeout

* [NOD-1137] Change comment

* [NOD-1137] Use EnqueueWithTimeout everywhere in protocol
2020-07-14 17:20:29 +03:00
stasatdaglabs
f8e53d309c [NOD-1142] Implement EnqueueWithTimeout and DequeueWithTimeout (#794)
* [NOD-1142] Implement EnqueueWithTimeout and DequeueWithTimeout.

* [NOD-1142] Use DequeueWithTimeout in readMsgBlock.

* [NOD-1142] Add comments about the new methods.
2020-07-14 16:14:27 +03:00
stasatdaglabs
6076309b3e [NOD-1142] Implement ping flow (#793)
* [NOD-1124] Move Router to the router package.

* [NOD-1124] Implement SetOnRouteCapacityReachedHandler.

* [NOD-1124] Use Routes instead of bare channels.

* [NOD-1124] Fix merge errors.

* [NOD-1124] Connect the Router to the Connection.

* [NOD-1124] Fix merge errors.

* [NOD-1124] Move some variables around.

* [NOD-1124] Fix unreachable code.

* [NOD-1124] Fix a variable name.

* [NOD-1142] Implement ping flows.

* [NOD-1142] Add ping flows to startFlows.

* [NOD-1142] Fix merge errors.

* [NOD-1142] Fix a typo.

* [NOD-1142] Add comments to exported functions.

* [NOD-1142] Fix bad flow name.

* [NOD-1142] Remove a redundant empty line.

* [NOD-1142] Fix a typo.

* [NOD-1142] Simplify for loop.

* [NOD-1142] Rename HandlePing to HandleIncomingPings and StartPingLoop to StartSendingPings.

* [NOD-1142] Fix no-longer-infinite loop.

* [NOD-1142] Represent ping duration as time.Duration instead of an int64.

* [NOD-1142] Rename HandleIncomingPings to ReceivePings and StartSendingPings to SendPings.

* [NOD-1142] Move pingInterval to within SendPings.

* [NOD-1142] Rephrase a comment.
2020-07-14 12:41:37 +03:00
stasatdaglabs
05db135d23 [NOD-1124] Implement the Flow thread model and architecture (#791)
* [NOD-1124] Move Router to the router package.

* [NOD-1124] Implement SetOnRouteCapacityReachedHandler.

* [NOD-1124] Use Routes instead of bare channels.

* [NOD-1124] Fix merge errors.

* [NOD-1124] Connect the Router to the Connection.

* [NOD-1124] Fix merge errors.

* [NOD-1124] Move some variables around.

* [NOD-1124] Fix unreachable code.

* [NOD-1124] Fix a variable name.

* [NOD-1124] Rename AddRoute to AddIncomingRoute.

* [NOD-1124] Rename SetRouter to Start.

* [NOD-1124] Make AddIncomingRoute create a Route by itself.

* [NOD-1124] Replace IncomingRoute with EnqueueIncomingMessage.

* [NOD-1124] Make Enqueue and Dequeue return isOpen instead of err.

* [NOD-1124] Remove writeDuringDisconnectLock.

* [NOD-1124] In sendLoop, move outgoingRoute to outside the loop.

* [NOD-1124] Start the connection loops only when Start is called.

* [NOD-1124] Replace OnIDReceivedHandler with AssociateRouterID.

* [NOD-1124] Add isOpen to Enqueue and Dequeue.

* [NOD-1124] Protect errChan from writing during disconnect.
2020-07-13 16:51:13 +03:00
Ori Newman
433cdb6006 [NOD-1126] implement block relay flow (#786)
* [NOD-1126] Implement block relay flow

* [NOD-1126] Add StartGetRelayBlocksListener

* [NOD-1126] Implement block relay flow

* [NOD-1126] Integrate with new interface

* [NOD-1126] Fix comments

* [NOD-1126] Refactor protocol.go

* [NOD-1126] Split long lines

* [NOD-1126] Fix comment

* [NOD-1126] move sharedRequestedBlocks to a separate file

* [NOD-1126] Fix error message

* [NOD-1126] Move handleInv to StartBlockRelay

* [NOD-1126] Create hashesQueueSet type

* [NOD-1126] Make deleteFromRequestedBlocks a method

* [NOD-1126] Fix comment

* [NOD-1126] Add block logger

* [NOD-1126] Rename advertisedProtoVer->advertisedProtocolVer

* [NOD-1126] Fix comment and an error message

* [NOD-1126] Remove redundant loop

* [NOD-1126] Move requestBlocks upper

* [NOD-1126] Remove exiting blocks in requestedBlocks from hashesToRequest

* [NOD-1126] Change comment

* [NOD-1126] Rename stallResponseTimeout->timeout

* [NOD-1126] Use switch inside readMsgBlock

* [NOD-1126] Fix error message and remove redundant log

* [NOD-1126] Rename pacakge names

* [NOD-1126] Fix comment

* [NOD-1126] Change file names

* [NOD-1126] Convert block to partial if needed

* [NOD-1126] Remove function redeclaration

* [NOD-1126] continue instead of return

* [NOD-1126] Rename LogBlockBlueScore->LogBlock

* [NOD-1126] Add minimum functions to utils

* [NOD-1126] Flip condition on readInv

* [NOD-1126] Rename utilMath->mathUtil

* [NOD-1126] Fix comment
2020-07-12 16:11:42 +03:00
Svarog
4a4dca1926 [NOD-1118] Implement gRPC basic connectivity (#790)
* [NOD-1118] Added protobufs for the MessageStream

* [NOD-1118] Implement some of the basic grpc methods

* [NOD-1118] Implemented gRPCConnection send and receive

* [NOD-1118] Implemented basic connection loops

* [NOD-1118] gRPC server implementation ready

* [NOD-1118] Add connection management

* [NOD-1118] Sort out the connection loops

* [NOD-1118] Add temporary testConnection

* [NOD-1118] Send to c.errChan whether error was recieved or not

* [NOD-1118] Call OnConnectHandler in time

* [NOD-1118] Handle closing connections properly

* [NOD-1118] Add comments to exported functions

* [NOD-1118] Call server.addConnection on newConnection

* [NOD-1118] Add a TODO comment

* [NOD-1118] Add a TODO comment

* [NOD-1118] Make connection a Stringer

* [NOD-1118] Made the connection loops 100% synchronic

* [NOD-1118] Make connection.isConnected uint32

* [NOD-1118] Move the Add/Remove connection from grpcConnection to register/unregister connection

* [NOD-1118] Convert error messages to lower case

* [NOD-1118] Remove protoc inline dependency

* [NOD-1118] Fix comment

* [NOD-1118] Exit if there was an error starting the protocol manager

* [NOD-1118] Fix error message

* [NOD-1118] Fixed a few comments

* [NOD-1118] Extract listenOn to a method

* [NOD-1118] Use !=0 for isConnected

* [NOD-1118] Refactor listenOn

* [NOD-1118] Add lock on channelWrites in gRPCConnection

* [NOD-1118] Rename channelWriteLock -> writeDuringDisconnectLock

* [NOD-1118] Reshuffle a comment

* [NOD-1118] Add a TODO comment
2020-07-12 15:22:49 +03:00
stasatdaglabs
6d591dde74 [NOD-1124] Implement the Flow thread model and architecture (#789)
* [NOD-1124] Rename Peer to Connection (because Peer is a business logic term)

* [NOD-1124] Implement Close for Router.

* [NOD-1124] Add SetPeerDisconnectedHandler.

* [NOD-1124] Remove mentions of "peer" from the netadapter package.

* [NOD-1124] Handle errors/stopping in netadapter.

* [NOD-1124] Remove netadapter.Connection.

* [NOD-1124] Add startSendLoop.

* [NOD-1124] Implement network IDs.

* [NOD-1124] Implement a map between IDs and routes.

* [NOD-1124] Implement Broadcast.

* [NOD-1124] Fix rename error.

* [NOD-1124] Fix copy+paste error.

* [NOD-1124] Change the type of NetAdapter.stop to uint32.

* [NOD-1124] If NetAdapter is stopped more than once, return an error.

* [NOD-1124] Add an error case to RouteInputMessage.

* [NOD-1124] Rename CreateID to NewID.

* [NOD-1124] Spawn from outside startReceiveLoop and startSendLoop.

* [NOD-1124] Fix a comment.

* [NOD-1124] Replace break with for condition.

* [NOD-1124] Don't disconnect from disconnected peers.

* [NOD-1124] Fix a for condition.

* [NOD-1124] Handle an error.
2020-07-09 09:34:28 +03:00
Svarog
8e624e057e [NOD-1134] Integrate Protocol into main (#788)
* [NOD-1134] Integrate Protocol into main

* [NOD-1134] Fix typo

* [NOD-1134] Added comments

* [NOD-1134] A series of renames to protocol

* [NOD-1134] Fix comment

* [NOD-1134] protocol.ProtocolManager -> Manager

* [NOD-1134] Update comment

* [NOD-1134] protocol.New() -> protocol.NewManager()
2020-07-08 11:52:53 +03:00
stasatdaglabs
eb2642ba90 [NOD-1124] Implement the Flow thread model and architecture (#787)
* [NOD-1124] Begin implementing netadapter.

* [NOD-1124] Implementing a stub gRPC server..

* [NOD-1124] Construct the server inside the netadapter.

* [NOD-1124] Rewrite protocol.go to fit with the new netAdapter model.

* [NOD-1124] Wrap a connection in Peer.

* [NOD-1124] Add a peerstate object.

* [NOD-1124] Remove the peerstate object.

* [NOD-1124] Remove router out of Peer.

* [NOD-1124] Tag a TODO.

* [NOD-1124] Return an error out of AddRoute if a route already exists for some message type.

* [NOD-1124] Rename the package grpc to grpcserver.

* [NOD-1124] Extracted newConnectionHandler into a type.

* [NOD-1124] Extract routerInitializer into a type.

* [NOD-1124] Panic/Add TODOs everywhere that isn't implemented.

* [NOD-1124] Improve the NetAdapter comment.

* [NOD-1124] Rename NewConnectionHandler to PeerConnectedHandler.

* [NOD-1124] Rename buildRouterInitializer to newRouterInitializer.

* [NOD-1124] Remove unreachable code.

* [NOD-1124] Make go vet happy.
2020-07-08 10:14:52 +03:00
Svarog
1a43cabfb9 [NOD-1119] Refactor main, and remove p2p layer from it (#785)
* [NOD-1119] Removed all p2p server from all the initialization of server

* [NOD-1119] Removed any calling for p2p server in main

* [NOD-1119] Simplified some functions to not take both dag and dagParams

* [NOD-1119] Simplify creation of mempool and rpc server

* [NOD-1119] Setup indexes in separate function

* [NOD-1119] Some cleanup in NewServer

* [NOD-1119] Fix mempool test

* [NOD-1119] Fix go format

* [NOD-1119] Unexport dag.timeSource

* [NOD-1119] Removed server package + renamed the Server object to Kaspad, and made it minimal

* [NOD-1119] Delete redundant functions

* Unexported kaspad and related methods

* [NOD-1119] Unexported newKaspad

* [NOD-1119] Revise comments and remove redundant function

* [NOD-1119] Make comments of unexported methods lower-case

* [NOD-1119] Some more refactoring in newKaspad
2020-07-06 18:00:28 +03:00
Ori Newman
580e37943b [NOD-1117] Write interfaces for P2P layer (#784)
* [NOD-1117] Write interfaces for P2P layer

* [NOD-1117] Add logs
2020-07-05 12:10:01 +03:00
Ori Newman
749775c7ea [NOD-1098] Change timestamp precision to one millisecond (#778)
* [NOD-1098] Change timestamps to be millisecond precision

* [NOD-1098] Change lock times to use milliseconds

* [NOD-1098] Use milliseconds precision everywhere

* [NOD-1098] Implement type mstime.Time

* [NOD-1098] Fix block 100000 timestamp

* [NOD-1098] Change orphan child to be one millisecond delay after its parent

* [NOD-1098] Remove test that checks if header timestamps have the right precision, and instead add tests for mstime, and fix genesis for testnet and devnet

* [NOD-1098] Fix comment

* [NOD-1098] Fix comment

* [NOD-1098] Fix testnet genesis

* [NOD-1098] Rename UnixMilli->UnixMilliseconds
2020-07-01 16:09:04 +03:00
Mike Zak
8ff8c30fb4 Merge remote-tracking branch 'origin/v0.5.0-dev' into v0.6.0-dev 2020-07-01 15:05:17 +03:00
stasatdaglabs
9893b7396c [NOD-1105] When recovering acceptance index, use a database transaction per block instead of for the entire recovery (#781)
* [NOD-1105] Don't use a database transaction when recovering acceptance index.

* Revert "[NOD-1105] Don't use a database transaction when recovering acceptance index."

This reverts commit da550f8e

* [NOD-1105] When recovering acceptance index, use a database transaction per block instead of for the entire recovery.
2020-07-01 13:43:51 +03:00
Svarog
8c90344f28 [NOD-1103] Fix testnetGenesisTxPayload with 8-byte blue-score (#780)
* [NOD-1103] Fix testnetGenesisTxPayload with 8-byte blue-score

* [NOD-1103] Fix genesis block bytes
2020-07-01 09:21:42 +03:00
Mike Zak
e4955729d2 Merge remote-tracking branch 'origin/v0.5.0-dev' into v0.6.0-dev 2020-06-30 08:48:31 +03:00
Mike Zak
8a7b0314e5 Merge branch 'v0.5.0-dev' of github.com:kaspanet/kaspad into v0.5.0-dev 2020-06-29 12:17:41 +03:00
Mike Zak
e87d00c9cf [NOD-1063] Fix a bug in which a block is pointing directly to a block in the selected parent chain below the reindex root
commit e303efef42
Author: stasatdaglabs <stas@daglabs.com>
Date:   Mon Jun 29 11:59:36 2020 +0300

    [NOD-1063] Rename a test.

commit bfecd57470
Author: stasatdaglabs <stas@daglabs.com>
Date:   Mon Jun 29 11:57:36 2020 +0300

    [NOD-1063] Fix a comment.

commit b969e5922d
Author: stasatdaglabs <stas@daglabs.com>
Date:   Sun Jun 28 18:14:44 2020 +0300

    [NOD-1063] Convert modifiedTreeNode to an out param.

commit 170f9872f4
Author: stasatdaglabs <stas@daglabs.com>
Date:   Sun Jun 28 17:05:01 2020 +0300

    [NOD-1063] Fix a bug in which a block is added to the selected parent chain below the reindex root.
2020-06-29 12:16:47 +03:00
stasatdaglabs
336347b3c5 [NOD-1063] Fix a bug in which a block is pointing directly to a block in the selected parent chain below the reindex root (#777)
* [NOD-1063] Fix a bug in which a block is added to the selected parent chain below the reindex root.

* [NOD-1063] Convert modifiedTreeNode to an out param.

* [NOD-1063] Fix a comment.

* [NOD-1063] Rename a test.
2020-06-29 12:13:51 +03:00
Mike Zak
15d0899406 Update to version v0.6.0 2020-06-29 09:17:45 +03:00
Mike Zak
ad096f9781 Update to version 0.5.0 2020-06-29 08:59:56 +03:00
Elichai Turkel
d3c6a3dffc [NOD-1093] Add hashMerkleRoot to GetBlockTemplateResult (#776)
* Add hashMerkleRoot field to GetBlockTemplateResult

* Use hashMerkleRoot from template instead of recalculating

* Move ParseBlock from kaspaminer into rpcclient

* Rename ParseBlock to ConvertGetBlockTemplateResultToBlock and wrap errors
2020-06-28 16:53:09 +03:00
stasatdaglabs
57b1653383 [NOD-1063] Optimize deep reachability tree insertions (#773)
* [NOD-1055] Give higher priority for requesting missing ancestors when sending a getdata message (#767)

* [NOD-1063] Remove the remainingInterval field.

* [NOD-1063] Add helper functions to reachabilityTreeNode.

* [NOD-1063] Add reachabilityReindexRoot.

* [NOD-1063] Start implementing findNextReachabilityReindexRoot.

* [NOD-1063] Implement findCommonAncestor.

* [NOD-1063] Implement findReachabilityTreeAncestorInChildren.

* [NOD-1063] Add reachabilityReindexWindow.

* [NOD-1063] Fix findReachabilityTreeAncestorInChildren.

* [NOD-1063] Remove BlockDAG reference in findReachabilityTreeAncestorInChildren.

* [NOD-1063] Extract updateReachabilityReindexRoot to a separate function.

* [NOD-1063] Add reachabilityReindexSlack.

* [NOD-1063] Implement splitReindexRootChildrenAroundChosen.

* [NOD-1063] Implement calcReachabilityTreeNodeSizes.

* [NOD-1063] Implement propagateChildIntervals.

* [NOD-1063] Extract tightenReachabilityTreeIntervalsBeforeChosenReindexRootChild and tightenReachabilityTreeIntervalsAfterChosenReindexRootChild to separate functions.

* [NOD-1063] Implement expandReachabilityTreeIntervalInChosenReindexRootChild.

* [NOD-1063] Finished implementing concentrateReachabilityTreeIntervalAroundReindexRootChild.

* [NOD-1063] Begin implementing reindexIntervalsBeforeReindexRoot.

* [NOD-1063] Implement top-level logic of reindexIntervalsBeforeReindexRoot.

* [NOD-1063] Implement reclaimIntervalBeforeChosenChild.

* [NOD-1063] Add a debug log for reindexIntervalsBeforeReindexRoot.

* [NOD-1063] Rename reindexIntervalsBeforeReindexRoot to reindexIntervalsEarlierThanReindexRoot.

* [NOD-1063] Implement reclaimIntervalAfterChosenChild.

* [NOD-1063] Add a debug log for updateReachabilityReindexRoot.

* [NOD-1063] Convert modifiedTreeNodes from slices to sets.

* [NOD-1063] Fix findCommonAncestor.

* [NOD-1063] Fix reindexIntervalsEarlierThanReindexRoot.`

* [NOD-1063] Remove redundant nil conditions.

* [NOD-1063] Make map[*reachabilityTreeNode]struct{} into a type alias with a copyAllFrom method.

* [NOD-1063] Remove setInterval.

* [NOD-1063] Create a new struct to hold reachability stuff called reachabilityTree.

* [NOD-1063] Rename functions under reachabilityTree.

* [NOD-1063] Move reachabilityStore into reachabilityTree.

* [NOD-1063] Move the rest of the functions in reachability.go into the reachabilityTree struct.

* [NOD-1063] Update newReachabilityTree to take an instance of reachabilityStore.

* [NOD-1063] Fix merge errors.

* [NOD-1063] Fix merge errors.

* [NOD-1063] Pass a reference to the dag into reachabilityTree.

* [NOD-1063] Use Wrapf instead of Errorf.

* [NOD-1063] Merge assignments.

* [NOD-1063] Disambiguate a varaible name.

* [NOD-1063] Add a test case for intervalBefore.

* [NOD-1063] Simplify splitChildrenAroundChosenChild.

* [NOD-1063] Fold temporary variables into newReachabilityInterval.

* [NOD-1063] Fold more temporary variables into newReachabilityInterval.

* [NOD-1063] Fix a bug in expandIntervalInReindexRootChosenChild.

* [NOD-1063] Remove blockNode from futureCoveringBlock.

* [NOD-1063] Get rid of futureCoveringBlock.

* [NOD-1063] Use findIndex directly in findAncestorAmongChildren.

* [NOD-1063] Make findIndex a bit nicer to use. Also rename it to findAncestorIndexOfNode.

* [NOD-1063] Rename childIntervalAllocationRange to intervalRangeForChildAllocation.

* [NOD-1063] Optimize findCommonAncestor.

* [NOD-1063] In reindexIntervalsBeforeChosenChild, use chosenChild.interval.start - 1 instead of childrenBeforeChosen[len(childrenBeforeChosen)-1].interval.end + 1.

* [NOD-1063] Rename reindexIntervalsBeforeChosenChild to reindexIntervalsBeforeNode.

* [NOD-1063] Add a comment explain what "the chosen child" is.

* [NOD-1063] In concentrateIntervalAroundReindexRootChosenChild, rename modifiedTreeNodes to allModifiedTreeNodes.

* [NOD-1063] Extract propagateIntervals to a function.

* [NOD-1063] Extract interval "contains" logic to a separate function.

* [NOD-1063] Simplify "looping up" logic in reclaimIntervalXXXChosenChild.

* [NOD-1063] Add comments to reclaimIntervalXXXChosenChild.

* [NOD-1063] Rename copyAllFrom to addAll.

* [NOD-1063] Rename reachabilityStore (the variable) to just store.

* [NOD-1063] Fix an error message.

* [NOD-1063] Reword a comment.

* [NOD-1063] Don't return -1 from findAncestorIndexOfNode.

* [NOD-1063] Extract slackReachabilityIntervalForReclaiming to a constant.

* [NOD-1063] Add a missing condition.

* [NOD-1063] Call isAncestorOf directly in insertNode.

* [NOD-1063] Rename chosenReindexRootChild to reindexRootChosenChild.

* [NOD-1063] Rename treeNodeSet to orderedTreeNodeSet.

* [NOD-1063] Add a disclaimer to orderedTreeNodeSet.

* [NOD-1063] Implement StoreReachabilityReindexRoot and FetchReachabilityReindexRoot.

* [NOD-1063] Move storing the reindex root to within reachabilityTree.

* [NOD-1063] Remove isAncestorOf from reachabilityInterval.

* [NOD-1063] Add a comment about graph theory conventions.

* [NOD-1063] Fix tests.

* [NOD-1063] Change inclusion in isAncestorOf functions.

* [NOD-1063] Rename a test.

* [NOD-1063] Implement TestIsInFuture.

* [NOD-1063] Fix error messages in TestIsInFuture.

* [NOD-1063] Fix error messages in TestIsInFuture.

* [NOD-1063] Rename isInSelectedParentChain to isInSelectedParentChainOf.

* [NOD-1063] Rename isInFuture to isInPast.

* [NOD-1063] Expand on a comment.

* [NOD-1063] Rename modifiedTreeNodes.

* [NOD-1063] Implement test: TestReindexIntervalsEarlierThanReindexRoot.

* [NOD-1063] Implement test: TestUpdateReindexRoot.

* [NOD-1063] Explain a check.

* [NOD-1063] Use a method instead of calling reachabilityStore.loaded directly.

* [NOD-1063] Lowercasified an error message.

* [NOD-1063] Fix failing test.

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-06-28 14:27:01 +03:00
Ori Newman
a86255ba51 [NOD-1088] Rename RejectReasion to RejectReason (#775) 2020-06-25 18:08:58 +03:00
Ori Newman
0a7a4ce7d6 [NOD-1085] Use all nonce space in kaspaminer (#774) 2020-06-24 11:12:57 +03:00
Ori Newman
4c3735a897 [NOD-906] Move transaction mass limit from CheckTransactionSanity to mempool (#772)
* [NOD-906] Move transaction mass limit from CheckTransactionSanity to mempool

* [NOD-906] Fix tests

* [NOD-906] Add spaces to comments
2020-06-22 17:20:17 +03:00
Ori Newman
22fd38c053 [NOD-1060] Don't sync from misbehaving peer (#768)
* [NOD-1038] Give higher priority for requesting missing ancestors when sending a getdata message (#767)

* [NOD-1060] Don't sync from peers that break the netsync protocol
2020-06-22 17:15:03 +03:00
Ori Newman
895f67a8d4 [NOD-1035] Use reachability to check finality (#763)
* [NOD-1034] Use reachability to check finality

* [NOD-1034] Add comments and rename variables

* [NOD-1034] Fix comments

* [NOD-1034] Rename checkFinalityRules->checkFinalityViolation

* [NOD-1034] Change isAncestorOf to be exclusive

* [NOD-1034] Make isAncestorOf exclusive and also more explicit, and add TestReachabilityTreeNodeIsAncestorOf
2020-06-22 17:14:59 +03:00
Svarog
56e807b663 [NOD-999] Set TargetOutbound=0 to prevent rogue connectionRequests except the one requested (#771) 2020-06-22 14:20:12 +03:00
Mike Zak
af64c7dc2d Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-21 16:15:05 +03:00
Svarog
1e6458973b [NOD-1064] Don't send GetBlockInvsMsg with lowHash = nil (#769) 2020-06-21 09:09:07 +03:00
Ori Newman
7bf8bb5436 [NOD-1017] Move peers.json to db (#733)
* [NOD-1017] Move peers.json to db

* [NOD-1017] Fix tests

* [NOD-1017] Change comments and rename variables

* [NOD-1017] Separate to smaller functions

* [NOD-1017] Renames

* [NOD-1017] Name newAddrManagerForTest return params

* [NOD-1017] Fix handling of non existing peersState

* [NOD-1017] Add getPeersState rpc command

* [NOD-1017] Fix comment

* [NOD-1017] Split long line

* [NOD-1017] Rename getPeersState->getPeerAddresses

* [NOD-1017] Rename getPeerInfo->getConnectedPeerInfo
2020-06-18 12:12:49 +03:00
Svarog
1358911d95 [NOD-1064] Don't send GetBlockInvsMsg with lowHash = nil (#769) 2020-06-17 14:18:00 +03:00
Ori Newman
1271d2f113 [NOD-1038] Give higher priority for requesting missing ancestors when sending a getdata message (#767) 2020-06-17 11:52:10 +03:00
Ori Newman
bc0227b49b [NOD-1059] Always call sm.restartSyncIfNeeded() when getting selectedTip message (#766) 2020-06-16 16:51:38 +03:00
Ori Newman
dc643c2d76 [NOD-833] Remove getBlockTemplate capabilites and move mining address to getBlockTemplate (#762)
* [NOD-833] Remove getBlockTemplate capabilites and move mining address to getBlockTemplate

* [NOD-833] Fix tests

* [NOD-833] Break long lines
2020-06-16 11:01:06 +03:00
Ori Newman
0744e8ebc0 [NOD-1042] Ignore very high orphans (#761)
* [NOD-530] Remove coinbase inputs and add blue score to payload

* [NOD-1042] Ignore very high orphans

* [NOD-1042] Add ban score to an orphan with malformed blue score

* [NOD-1042] Fix log
2020-06-15 16:08:25 +03:00
Ori Newman
d4c9fdf6ac [NOD-614] Add ban score (#760)
* [NOD-614] Copy bitcoin-core ban score policy

* [NOD-614] Add ban score to disconnects

* [NOD-614] Fix wrong branch of AddBanScore

* [NOD-614] Add ban score on sending too many addresses

* [NOD-614] Add comments

* [NOD-614] Remove redundant reject messages

* [NOD-614] Fix log message

* [NOD-614] Ban every node that sends invalid invs

* [NOD-614] Make constants for ban scores
2020-06-15 12:12:38 +03:00
stasatdaglabs
829979b6c7 [NOD-1007] Split checkBlockSanity subroutines (#743)
* [NOD-1007] Split checkBlockSanity subroutines.

* [NOD-1007] Put back the comments about performance.

* [NOD-1007] Make all the functions in checkBlockSanity take a *util.Block.

* [NOD-1007] Rename checkBlockTransactionsOrderedBySubnetwork to checkBlockTransactionOrder.

* [NOD-1007] Move a comment up a scope level.
2020-06-15 11:07:52 +03:00
Mike Zak
32cd29bf70 Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-14 12:51:59 +03:00
stasatdaglabs
03cb6cbd4d [NOD-1048] Use a smaller writeBuffer and use disableSeeksCompaction directly. (#759) 2020-06-11 16:11:22 +03:00
Ori Newman
ba4a89488e [NOD-530] Remove coinbase inputs and add blue score to payload (#752)
* [NOD-530] Remove coinbase inputs and add blue score to payload

* [NOD-530] Fix comment

* [NOD-530] Change util.Block private fields comments
2020-06-11 15:54:11 +03:00
Ori Newman
b0d4a92e47 [NOD-1046] Delete redundant conversion from rule error (#755) 2020-06-11 12:19:49 +03:00
stasatdaglabs
3e5a840c5a [NOD-1052] Add a lock around clearOldEntries to protect against concurrent access of utxoDiffStore.loaded. (#758) 2020-06-11 11:56:25 +03:00
Ori Newman
d6d34238d2 [NOD-1049] Allow empty addr messages (#753) 2020-06-10 16:13:13 +03:00
Ori Newman
8bbced5925 [NOD-1051] Don't disconnect from sync peer if it sends an orphan (#757) 2020-06-10 16:05:48 +03:00
stasatdaglabs
20da1b9c9a [NOD-1048] Make leveldb compaction much less frequent (#756)
* [NOD-1048] Make leveldb compaction much less frequent. Also, allocate an entire gigabyte for leveldb's blockCache and writeBuffer.

* [NOD-1048] Implement changing the options for testing purposes.

* [NOD-1048] Rename originalOptions to originalLDBOptions.

* [NOD-1048] Add a comment.
2020-06-10 16:05:02 +03:00
Ori Newman
b6a6e577c4 [NOD-1013] Don't block handleBlockDAGNotification when calling peerNotifier (#749)
* [NOD-1013] Don't block handleBlockDAGNotification when calling peerNotifier

* [NOD-1013] Add comment
2020-06-09 12:12:18 +03:00
Mike Zak
84888221ae Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-08 12:23:33 +03:00
stasatdaglabs
222477b33e [NOD-1040] Don't remove DAG tips from the diffStore's loaded set (#750)
* [NOD-1040] Don't remove DAG tips from the diffStore's loaded set

* [NOD-1040] Remove a debug log.
2020-06-08 12:14:58 +03:00
Mike Zak
4a50d94633 Update to v0.4.1 2020-06-07 17:54:30 +03:00
stasatdaglabs
b4dba782fb [NOD-1040] Increase maxBlueScoreDifferenceToKeepLoaded to 1500 (#746)
* [NOD-1040] Don't remove DAG tips from the diffStore's loaded set

* [NOD-1040] Fix TestClearOldEntries.

* Revert "[NOD-1040] Fix TestClearOldEntries."

This reverts commit e0705814

* Revert "[NOD-1040] Don't remove DAG tips from the diffStore's loaded set"

This reverts commit d3eba1c1

* [NOD-1040] Increase maxBlueScoreDifferenceToKeepLoaded to 1500.
2020-06-07 17:50:57 +03:00
stasatdaglabs
9c78a797e4 [NOD-1041] Call outboundPeerConnected and outboundPeerConnectionFailed directly instead of routing them through peerHandler (#748)
* [NOD-1041] Fix a deadlock between connHandler and peerHandler.

* [NOD-1041] Simplified the fix.
2020-06-07 16:35:48 +03:00
Ori Newman
35c733a4c1 [NOD-970] Add isSyncing flag (#747)
* [NOD-970] Add isSyncing flag

* [NOD-970] Rename shouldSendSelectedTip->peerShouldSendSelectedTip
2020-06-07 16:31:17 +03:00
Mike Zak
e5810d023e Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-07 14:21:49 +03:00
stasatdaglabs
96930bd6ea [NOD-1039] Remove the call to SetGCPercent. (#745) 2020-06-07 09:19:28 +03:00
Mike Zak
e09ce32146 Merge remote-tracking branch 'origin/v0.4.1-dev' into v0.5.0-dev 2020-06-04 15:11:40 +03:00
stasatdaglabs
d15c009b3c [NOD-1030] Disconnect from syncPeers that send orphan blocks (#744)
* [NOD-1030] Disconnect from syncPeers that send orphan blocks.

* [NOD-1030] Remove debug log.

* [NOD-1030] Remove unnecessary call to stopSyncFromPeer.
2020-06-04 15:11:05 +03:00
stasatdaglabs
95c8b8e9d8 [NOD-1023] Rename isCurrent/current to isSynced/synced (#742)
* [NOD-1023] Rename BlockDAG.isCurrent to isSynced.

* [NOD-1023] Rename SyncManager.current to synced.

* [NOD-1023] Fix comments.
2020-06-03 16:04:14 +03:00
stasatdaglabs
2d798a5611 [NOD-1020] Do send addr response to getaddr messages even if there aren't any addresses to send. (#740) 2020-06-01 14:09:18 +03:00
stasatdaglabs
3a22249be9 [NOD-1012] Fix erroneous partial node check (#739)
* [NOD-1012] Fix bad partial node check.

* [NOD-1012] Fix unit tests.
2020-05-31 14:13:30 +03:00
stasatdaglabs
a4c1898624 [NOD-1012] Disable subnetworks (#731)
* [NOD-1012] Disallow non-native/coinbase transactions.

* [NOD-1012] Fix logic error.

* [NOD-1012] Fix/skip tests and remove --subnetwork.

* [NOD-1012] Disconnect from non-native peers.

* [NOD-1012] Don't skip subnetwork tests.

* [NOD-1012] Use EnableNonNativeSubnetworks in peer.go.

* [NOD-1012] Set EnableNonNativeSubnetworks = true in the tests that need them rather than by default in Simnet.
2020-05-31 10:50:46 +03:00
Ori Newman
672f02490a [NOD-763] Change genesis version (#737) 2020-05-28 09:55:59 +03:00
stasatdaglabs
fc00275d9c [NOD-553] Get rid of base58 (#735)
* [NOD-553] Get rid of wif.

* [NOD-553] Get rid of base58.
2020-05-27 17:37:03 +03:00
Ori Newman
6219b93430 [NOD-1018] Exit after 2 minutes if graceful shutdown fails (#732)
* [NOD-1018] Exit after 2 minutes if graceful shutdown fails

* [NOD-1018] Change time.Tick to time.After
2020-05-25 14:30:43 +03:00
Ori Newman
3a4571d671 [NOD-965] Make LookupNode return boolean (#729)
* [NOD-965] Make dag.index.LookupNode return false if node is not found

* [NOD-965] Rename blockDAG->dag

* [NOD-965] Remove irrelevant test

* [NOD-965] Use bi.index's ok in LookupNode
2020-05-25 12:51:30 +03:00
Ori Newman
96052ac69a [NOD-809] Change fee rate to fee per megagram (#730) 2020-05-24 16:59:37 +03:00
Ori Newman
6463a4b5d0 [NOD-1011] Don't cache isSynced on getBlockTemplate (#728) 2020-05-20 14:38:24 +03:00
Svarog
0ca127853d [NOD-974] UTXO-Commitments shouldn't include the new block's transactions (#727)
* [NOD-975] Don't include block transactions inside its UTXO commitment (#711)

* [NOD-975] Don't include block transactions inside its UTXO commitment.

* Revert "[NOD-975] Don't include block transactions inside its UTXO commitment."

This reverts commit b1a2ae66

* [NOD-975] Implement a (currently failing) TestUTXOCommitment.

* [NOD-975] Remove the block's own transactions from calcMultiset.

* [NOD-975] Simplify calcMultiset.

* [NOD-975] Add a comment on top of selectedParentMultiset.

* [NOD-975] Use pastUTXO instead of selectedParentUTXO in calcMultiset.

* [NOD-975] Use selected parent's pastUTXO instead of this block's pastUTXO in calcMultiset.

* [NOD-975] Extract selectedParentPastUTXO to a separate function.

* [NOD-975] Remove selectedParentUTXO from pastUTXO's return values.

* [NOD-975] Add txs to TestUTXOCommitment.

* [NOD-975] Remove debug code.

* [NOD-975] In pastUTXOMultiSet, copy the multiset to avoid modifying the original.

* [NOD-975] Add a test: TestPastUTXOMultiSet.

* [NOD-975] Improve TestPastUTXOMultiSet.

* [NOD-976] Implement tests for UTXO commitments (#715)

* [NOD-975] Don't include block transactions inside its UTXO commitment.

* Revert "[NOD-975] Don't include block transactions inside its UTXO commitment."

This reverts commit b1a2ae66

* [NOD-975] Implement a (currently failing) TestUTXOCommitment.

* [NOD-975] Remove the block's own transactions from calcMultiset.

* [NOD-975] Simplify calcMultiset.

* [NOD-975] Add a comment on top of selectedParentMultiset.

* [NOD-975] Use pastUTXO instead of selectedParentUTXO in calcMultiset.

* [NOD-975] Use selected parent's pastUTXO instead of this block's pastUTXO in calcMultiset.

* [NOD-975] Extract selectedParentPastUTXO to a separate function.

* [NOD-975] Remove selectedParentUTXO from pastUTXO's return values.

* [NOD-975] Add txs to TestUTXOCommitment.

* [NOD-976] Generate new blockDB blocks for tests.

* [NOD-976] Fix TestBlueBlockWindow.

* [NOD-976] Fix TestIsKnownBlock.

* [NOD-976] Fix TestGHOSTDAG.

* [NOD-976] Fix TestUTXOCommitment.

* [NOD-976] Remove kaka.

* [NOD-990] Save utxo diffs of past UTXO (#724)

* [NOD-990] Save UTXO diffs of past UTXO

* [NOD-990] Check for block double spends with its past instead of building its UTXO

* [NOD-990] Call resetExtraNonceForTest in TestUTXOCommitment

* [NOD-990] Remove redundant functions diffFromTx and diffFromAcceptedTx

* [NOD-990] Rename i->j to avoid confusion

* [NOD-990] Break long lines

* [NOD-990] Rename ErrDoubleSpendsWithBlockTransaction -> ErrDoubleSpendInSameBlock

* [NOD-990] Make ErrDoubleSpendInSameBlock more detailed

* [NOD-990] Add testProcessBlockRuleError

* [NOD-990] Fix comment

* [NOD-990] Add test for duplicate transactions on the same block

* [NOD-990] Use pkg/errors on panic

* [NOD-990] Make cloneWithoutBase method

* [NOD-990] Break long lines

* [NOD-990] Fix comment

* [NOD-990] Fix wrong variable names

* [NOD-990] Fix comment

* [NOD-974] Generate new test blocks.

* [NOD-974] Fix TestIsKnownBlock and TestGHOSTDAG.

* [NOD-974] Fix TestUTXOCommitment.

* [NOD-974] Fix comments

Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
Co-authored-by: stasatdaglabs <stas@daglabs.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-05-20 12:43:52 +03:00
stasatdaglabs
b884ba128e [NOD-1008] In utxoDiffStore, keep diffData in memory for blocks whose blueScore is at least virtualBlueScore - X (#726)
* [NOD-1008] Use *blockNode as keys in utxoDiffStore.loaded and .dirty.

* [NOD-1008] Implement clearOldEntries.

* [NOD-1008] Increase maxBlueScoreDifferenceToKeepLoaded to 100.

* [NOD-1008] Fix a typo.

* [NOD-1008] Add clearOldEntries to saveChangesFromBlock.

* [NOD-1008] Begin implementing TestClearOldEntries.

* [NOD-1008] Finish implementing TestClearOldEntries.

* [NOD-1008] Fix a comment.

* [NOD-1008] Rename diffDataByHash to diffDataByBlockNode.

* [NOD-1008] Use dag.TipHashes instead of tracking tips manually.
2020-05-20 10:47:01 +03:00
Svarog
fe25ea3d8c [NOD-1001] Make an error in Peer.start() stop the connection process from continuing. (#723)
* [NOD-1001] Move side-effects of connection out of OnVersion

* [NOD-1001] Make AssociateConnection synchronous

* [NOD-1001] Wait for 2 veracks in TestPeerListeners

* [NOD-1001] Made AssociateConnection return error

* [NOD-1001] Remove temporary logs

* [NOD-1001] Fix typos and find-and-replace errors

* [NOD-1001] Move example_test back out of peer package + fix some typos

* [NOD-1001] Use correct remote address in setupPeersWithConns and return to address string literals

* [NOD-1001] Use separate verack channels for inPeer and outPeer

* [NOD-1001] Make verack channels buffered

* [NOD-1001] Removed temporary sleep of 1 second

* [NOD-1001] Removed redundant //
2020-05-20 10:36:44 +03:00
stasatdaglabs
e0f587f599 [NOD-877] Separate UTXO header code to two fields in serialization: blue score and packed flags (#725)
* [NOD-877] In UTXOEntry serialization, extract packedFlags out to a separate Uint8.

* [NOD-877] Generate new test blocks.

* [NOD-877] Fix TestIsKnownBlock.

* [NOD-877] Fix TestBlueBlockWindow.

* [NOD-877] Fix TestUTXOSerialization and TestGHOSTDAG.

* [NOD-877] Fix TestVirtualBlock.
2020-05-19 17:56:07 +03:00
stasatdaglabs
e9e1ef4772 [NOD-1006] Make use of a pool to avoid excessive allocation of big.Ints (#722)
* [NOD-1006] Make CompactToBig take an out param so that we can reuse the same big.Int in averageTarget.

* [NOD-1006] Fix merge errors.

* [NOD-1006] Use CompactToBigWithDestination only in averageTarget.

* [NOD-1006] Fix refactor errors.

* [NOD-1006] Fix refactor errors.

* [NOD-1006] Optimize averageTarget with a big.Int pool.

* [NOD-1006] Defer releasing bigInts.

* [NOD-1006] Use a pool for requiredDifficulty as well.

* [NOD-1006] Move the big int pool to utils.

* [NOD-1006] Remove unnecessary line.
2020-05-19 16:29:21 +03:00
Svarog
eb8b841850 [NOD-1005] Use sm.isSynced to check whether should request blocks from invs (#721)
* [NOD-1005] Moved isSyncedForMining to netsync manager, and renamed to isSynced + removed isCurrent

* [NOD-1005] Use sm.isSynced to check whether should request blocks from invs

* [NOD-1005] Use private version of isSynced to avoid infinite loop

* [NOD-1005] Fix a few typos
2020-05-18 10:42:58 +03:00
Svarog
28681affda [NOD-994] Greatly increase the amount of logs kaspad keeps before rotating them away (#720)
* [NOD-994] Greatly increased the amount of logs kaspad keeps before rotating them away

* [NOD-994] Actually invcrease the log file

* [NOD-994] Update comments

* [NOD-994] Fix typo
2020-05-14 10:58:46 +03:00
Svarog
378f0b659a [NOD-993] Get rid of redundant error types + Use %+v when printing startup errors (#719)
* [NOD-993] Use %+v when printing errors

* [NOD-993] Get rid of AssertError

* [NOD-993] Made ruleError use github.com/pkg/errors

* [NOD-993] remove redundant TODO

* [NOD-993] remove redundant Comment

* [NOD-993] Removed DeploymentError
2020-05-13 17:27:53 +03:00
stasatdaglabs
35b943e04f [NOD-996] Disable kaspad logs in TestScripts (#718)
* [NOD-996] Disable kaspad logs in TestScripts.

* [NOD-996] Return the log level to its original state after TestScripts is done.
2020-05-13 15:57:30 +03:00
stasatdaglabs
65f75c17fc [NOD-982] Log message with level WARN when getting MsgReject (#717)
* [NOD-982] Log message with level WARN when getting MsgReject.

* [NOD-982] Fix wrong logLevel in Write and Writef.

* [NOD-982] Use Write and Writef inside Trace, Tracef, Debug, Debugf, etc...

* [NOD-982] Move peer message logging to a separate file.
2020-05-13 10:03:37 +03:00
stasatdaglabs
806eab817c [NOD-820] When the node isn't synced, make getBlockTemplate return a boolean isSynced instead of an error (#716)
* [NOD-820] Add IsSynced to GetBlockTemplateResult.

* [NOD-820] Add isSynced to the help file.

* [NOD-820] Add MineWhenNotSynced to the kaspaminer config.

* [NOD-820] Implement miner MineWhenNotSynced logic.

* [NOD-820] Fixed capitalization in an error message.
2020-05-12 15:08:24 +03:00
Ori Newman
585510d76c [NOD-847] Fix CIDR protection and prevent connecting to the same address twice (#714)
* [NOD-847] Fix CIDR protection and prevent connecting to the same address twice

* [NOD-847] Fix Tests

* [NOD-847] Add TestDuplicateOutboundConnections and TestSameOutboundGroupConnections

* [NOD-847] Fix TestRetryPermanent, TestNetworkFailure and wait 10 ms before restoring the previous active config

* [NOD-847] Add "is" before boolean methods

* [NOD-847] Fix Connect's lock

* [NOD-847] Make numAddressesInAddressManager an argument

* [NOD-847] Add teardown function for address manager

* [NOD-847] Add stack trace to ConnManager errors

* [NOD-847] Change emptyAddressManagerForTest->createEmptyAddressManagerForTest and fix typos

* [NOD-847] Fix wrong test name for addressManagerForTest

* [NOD-847] Change error message if New fails

* [NOD-847] Add new line on releaseAddress

* [NOD-847] Always try to reconnect on disconnect
2020-05-12 13:47:15 +03:00
Svarog
c8a381d5bb [NOD-981] Fixed error message when both --notls and --rpccert ommited (#713) 2020-05-06 13:05:48 +03:00
Ori Newman
3d04e6bded [NOD-943] Add acceptedBlockHashes to GetBlockVerboseResult (#708)
* [NOD-943] Add acceptedBlockHashes to GetBlockVerboseResult

* [NOD-943] Remove intermediate variables

* [NOD-943] Add block hash to error message

* [NOD-943] Change comment
2020-05-05 17:26:54 +03:00
Svarog
f8e851a6ed [NOD-968] Wrap all ldb errors with pkg/errors (#712) 2020-05-04 16:33:23 +03:00
stasatdaglabs
e70a615135 [NOD-872] Defer all currently undeferred unlocks in the database package (#706)
* [NOD-872] Defer unlocks in write.go.

* [NOD-872] Defer unlocks in rollback.go.

* [NOD-872] Defer unlocks in read.go.

* [NOD-872] Fix duplicate RUnlock.

* [NOD-872] Remove a redundant empty line.

* [NOD-872] Extract closeCurrentWriteCursorFile to a separate method.
2020-05-04 13:07:40 +03:00
Ori Newman
73ad0adf72 [NOD-913] Use sync rate in getBlockTemplate (#705)
* [NOD-913] Use sync rate in getBlockTemplate

* [NOD-913] Rename addBlockProcessTime->addBlockProcessTimestamp, maxDiff->maxTipAge

* [NOD-913] Pass maxDeviation as an argument

* [NOD-913] Change maxDeviation to +5%

* [NOD-913] Rename variables

* [NOD-913] Rename variables and functions and change comments

* [NOD-913] Split addBlockProcessingTimestamp
2020-05-04 09:09:23 +03:00
stasatdaglabs
5b74e51db1 [NOD-956] Increase K to 15. (#710) 2020-05-03 14:56:47 +03:00
stasatdaglabs
2e2492cc5d [NOD-849] Database tests (#695)
* [NOD-849] Cover ffldb/transaction with tests.

* [NOD-849] Cover cursor.go with tests.

* [NOD-849] Cover ldb/transaction with tests.

* [NOD-849] Cover location.go with tests.

* [NOD-849] Write TestFlatFileMultiFileRollback.

* [NOD-849] Fix merge errors.

* [NOD-849] Fix a comment.

* [NOD-849] Fix a comment.

* [NOD-849] Add a test that makes sure that files get deleted on rollback.

* [NOD-849] Add a test that makes sure that serializeLocation serialized to an expected value.

* [NOD-849] Improve TestFlatFileLocationDeserializationErrors.

* [NOD-849] Fix a copy+paste error.

* [NOD-849] Explain maxFileSize = 16.

* [NOD-849] Remove redundant RollbackUnlessClosed call.

* [NOD-849] Extract bucket to a variable in TestCursorSanity.

* [NOD-849] Rename TestKeyValueTransactionCommit to TestTransactionCommitForLevelDBMethods.

* [NOD-849] Extract prepareXXX into separate functions.

* [NOD-849] Simplify function calls in TestTransactionCloseErrors.

* [NOD-849] Extract validateCurrentCursorKeyAndValue to a separate function.

* [NOD-849] Add a comment over TestCursorSanity.

* [NOD-849] Add a comment over function in TestCursorCloseErrors.

* [NOD-849] Add a comment over function in TestTransactionCloseErrors.

* [NOD-849] Separate TestTransactionCloseErrors to TestTransactionCommitErrors and TestTransactionRollbackErrors.

* [NOD-849] Separate TestTransactionCloseErrors to TestTransactionCommitErrors and TestTransactionRollbackErrors.

* [NOD-849] Fix copy+paste error in comments.

* [NOD-849] Fix merge errors.

* [NOD-849] Merge TestTransactionCommitErrors and TestTransactionRollbackErrors into TestTransactionCloseErrors.

* [NOD-849] Move prepareDatabaseForTest into ffldb_test.go.

* [NOD-849] Add cursorKey to Value error messages in validateCurrentCursorKeyAndValue.
2020-05-03 12:19:09 +03:00
Ori Newman
2ef5c2cbac [NOD-915] Check if lockableFile underlying file is nil before closing it (#709) 2020-04-30 14:43:38 +03:00
Ori Newman
3c89e1f7b3 [NOD-952] Fix nil derefernce bug on outboundPeerConnectionFailed (#704) 2020-04-27 13:50:09 +03:00
stasatdaglabs
2910724b49 [NOD-934] Fix addresses not getting their retry attempt counter incremented if they fail to connect (#702)
* [NOD-934] Fix addresses not getting their retry attempt counter incremented if they fail to connect.

* [NOD-922] Inline parseNetAddress.

* [NOD-922] Fix debug logs.
2020-04-23 17:01:09 +03:00
stasatdaglabs
3af945692e [NOD-922] Panic from cursor Next and First (#703)
* [NOD-922] Panic in Cursor First and Next if the cursor is closed.

* [NOD-922] Fix broken tests.

* [NOD-922] Fix a comment.
2020-04-23 16:55:25 +03:00
stasatdaglabs
5fe9dae557 [NOD-863] Write interface tests for the new database (#697)
* [NOD-863] Write TestCursorNext.

* [NOD-863] Write TestCursorFirst.

* [NOD-863] Fix merge errors.

* [NOD-863] Add TestCursorSeek.

* [NOD-863] Add TestCursorCloseErrors.

* [NOD-863] Add TestCursorCloseFirstAndNext.

* [NOD-863] Add TestDataAccessorPut.

* [NOD-863] Add TestDataAccessorGet.

* [NOD-863] Add TestDataAccessorHas.

* [NOD-863] Add TestDatabaseDelete.

* [NOD-863] Add TestDatabaseAppendToStoreAndRetrieveFromStore.

* [NOD-863] Add TestTransactionAppendToStoreAndRetrieveFromStore.

* [NOD-863] Add TestTransactionDelete.

* [NOD-863] Add TestTransactionHas.

* [NOD-863] Add TestTransactionGet.

* [NOD-863] Add TestTransactionPut.

* [NOD-863] Move cursor tests to the bottom of interface_test.go.

* [NOD-863] Move interface_test.go to a database_test package.

* [NOD-863] Make each test in interface_test.go run for every database driver. Currently, only ffldb.

* [NOD-863] Make each cursor test in interface_test.go run for every database driver. Currently, only ffldb.

* [NOD-863] Split interface_test.go into separate files.

* [NOD-863] Rename interface_test.go to common_test.go.

* [NOD-863] Extract testForAllDatabaseTypes to a separate function.

* [NOD-863] Reorganize how test data gets added to the database.

* [NOD-863] Add explanations about testForAllDatabaseTypes.

* [NOD-863] Add tests that make sure that database changes don't affect previously opened transactions.

* [NOD-863] Extract databasePrepareFunc to a type alias.

* [NOD-863] Fix comments.

* [NOD-863] Add cursor exhaustion test to testCursorFirst.

* [NOD-863] Add cursor Next clause to testCursorSeek.

* [NOD-863] Add additional varification to testDatabasePut.

* [NOD-863] Add an additional verification into to testTransactionGet.

* [NOD-863] Add TestTransactionCommit.

* [NOD-863] Add TestTransactionRollback.

* [NOD-863] Add TestTransactionRollbackUnlessClosed.

* [NOD-863] Remove equals sign from databasePrepareFunc declaration.
2020-04-20 12:14:55 +03:00
Svarog
42c53ec3e2 [NOD-869] Add a print after os.Exit(1) to see if it is ever called (#701) 2020-04-16 16:08:32 +03:00
Ori Newman
291df8bfef [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer (#700)
* [NOD-858] Don't switch sync peer if the syncing process hasn't yet started with the current sync peer

* [NOD-858] SetShouldSendBlockLocator(false) on OnBlockLocator

* [NOD-858] Rename shouldSendBlockLocator->wasBlockLocatorRequested

* [NOD-858] Move panic to shouldReplaceSyncPeer
2020-04-13 15:50:55 +03:00
Ori Newman
d015286f65 [NOD-909] Add tests for double spends (#694)
* [NOD-909] Add tests for double spends

* [NOD-909] Add prepareAndProcessBlock that gets parent hashes and transactions as argument

* [NOD-909] Use PrepareAndProcessBlockForTest where possible

* [NOD-909] Use more meaningful names

* [NOD-909] Change a comment

* [NOD-909] Fix comment

* [NOD-909] Fix comment
2020-04-13 12:28:59 +03:00
Ori Newman
fe91b4c878 [NOD-914] Make LevelDB.Cursor receive bucket instead of prefix (#696) 2020-04-12 09:25:40 +03:00
Ori Newman
7609c50641 [NOD-885] Use database.Key and database.Bucket instead of byte slices (#692)
* [NOD-885] Create database.Key type

* [NOD-885] Rename FullKey()->FullKeyBytes() and Key()->KeyBytes()

* [NOD-885] Make Key.String return a hex string

* [NOD-885] Rename key parts

* [NOD-885] Rename separator->bucketSeparator

* [NOD-885] Rename SuffixBytes->Suffix and PrefixBytes->Prefix

* [NOD-885] Change comments

* [NOD-885] Change key prefix to bucket

* [NOD-885] Don't use database.NewKey inside dbaccess

* [NOD-885] Fix nil bug in Bucket.Path()

* [NOD-885] Rename helpers.go -> keys.go

* [NOD-885] Unexport database.NewKey

* [NOD-885] Remove redundant code in Bucket.Path()
2020-04-08 12:12:21 +03:00
Ori Newman
df934990d7 [NOD-822] Don't return rule errors from utxoset code (#693)
* [NOD-822] Remove rule errors from the UTXO diff code

* [NOD-822] Rename applyTransactions -> applyAndVerifyBlockTransactionsToPastUTXO

* [NOD-822] Fix comment
2020-04-07 12:45:12 +03:00
stasatdaglabs
3c4a80f16d [NOD-899] Inside the database, in case we're out of disk space, panic without printing the stack trace (#691)
* [NOD-899] Inside the database, in case we're out of disk space, panic without printing the stack trace.

* [NOD-899] Fix bad variable name.

* [NOD-899] Reduce code duplication.
2020-04-06 16:00:48 +03:00
stasatdaglabs
a31139d4a5 [NOD-895] Break down initDAGState to sub-routines (#690) 2020-04-06 11:08:57 +03:00
Mike Zak
6da3606721 Update to version 0.4.0 2020-04-05 16:23:01 +03:00
Ori Newman
bfbc72724d [NOD-873] Reuse allocated space when updating the UTXO set in database (#688) 2020-04-05 11:46:16 +03:00
stasatdaglabs
956b6f7d95 [NOD-900] Fix bad key in Seek (#687)
* [NOD-900] Fix Seek not working at expected.

* [NOD-900] Wrap error messages.

* [NOD-900] Use ldbIterator.Key instead of LevelDBCursor.Key.

* [NOD-900] Add a comment.
2020-04-02 17:47:51 +03:00
stasatdaglabs
c1a039de3f [NOD-900] Fix Seek not working as expected (#686)
* [NOD-900] Fix Seek not working at expected.

* [NOD-900] Wrap error messages.
2020-04-02 17:05:58 +03:00
stasatdaglabs
f8b18e09d6 [NOD-805] Redesign the database (#685)
* [NOD-828] Reimplement FFLDB (#663)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-887] Add a couple of QoL features to Cursor (#674)

* [NOD-887] Changed First to not return an error.

* [NOD-887] Fix merge error.

* [NOD-887] Make Cursor.Key not return the entire key path.

* [NOD-888] Add RollbackUnlessClosed to Context (#676)

* [NOD-888] Add RollbackUnlessClosed to Context.

* [NOD-888] Fix copy+paste error.

* [NOD-889] Instead of returning a boolean for not-found, return an error (#677)

* [NOD-889] Instead of returning a boolean for not-found, return an error.

* [NOD-889] Wrapped ErrNotFound for Get calls with nicer error messages.

* [NOD-889] Fix format.

* [NOD-889] Fix double space in a comment.

* [NOD-889] Add IsNotFoundError to dbaccess.

* [NOD-862] Replace calls to Tx.StoreBlock, Tx.HasBlock, Tx.FetchBlock with appropriate calls in dbaccess (#672)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-862] Use dbaccess.HasBlock instead of Tx.HasBlock in initDAGState.

* [NOD-862] Use dbaccess.StoreBlock instead of dbStoreBlock.

* [NOD-862] Use dbaccess.FetchBlock instead of various block fetching mechanisms.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Implement a dbaccess.BlockNode struct.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* [NOD-828] Implement toDBBlockNode and fromDBBlockNode.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Implement storing index blocks.

* [NOD-862] Use database transactions where appropriate.

* [NOD-862] Fix tests failing on DAGSetup.

* [NOD-862] Fix bad make call.

* [NOD-862] Fix remaining database opening problems in tests.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Iterate over the new block index in dagio.

* [NOD-862] Fix block index key.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-862] Pass byte slices to dbaccess instead of objects.

* [NOD-862] Fix errors.

* [NOD-862] Fix maybeAcceptBlock not checking block existence.

* [NOD-862] Fix TestAcceptanceIndexRecover.

* [NOD-862] Add comments to StoreBlockIndex and BlockIndexCursor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-862] Fix merge errors.

* [NOD-862] Fix DAGSetup using bad temp directories.

* [NOD-862] Fix TestProcessDelayedBlocks not closing the database properly.

* [NOD-862] Fix merge errors.

* [NOD-862] Merge flushToDBWithContext and flushToDB.

* [NOD-862] Remove TODO.

* [NOD-862] Add prefix to the temp dir in DAGSetup.

* [NOD-862] Bring back dbFetchBlockByHash.

* [NOD-862] Use BlockDAG.BlockByHash in p2p and rpc.

* [NOD-862] Use daghash.Hash in dbaccess.

* [NOD-862] Add defer to RollbackUnlessClosed after NewTx().

* [NOD-862] Extract dbStoreBlock to a separate function.

* [NOD-862] Fix grammar in comment.

* [NOD-862] Fix merge errors.

* [NOD-867] Migrate database logic in blockdag/dagio.go to dbaccess (#675)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-862] Use dbaccess.HasBlock instead of Tx.HasBlock in initDAGState.

* [NOD-862] Use dbaccess.StoreBlock instead of dbStoreBlock.

* [NOD-862] Use dbaccess.FetchBlock instead of various block fetching mechanisms.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Implement a dbaccess.BlockNode struct.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* [NOD-828] Implement toDBBlockNode and fromDBBlockNode.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Implement storing index blocks.

* [NOD-862] Use database transactions where appropriate.

* [NOD-862] Fix tests failing on DAGSetup.

* [NOD-862] Fix bad make call.

* [NOD-862] Fix remaining database opening problems in tests.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Iterate over the new block index in dagio.

* [NOD-862] Fix block index key.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-862] Pass byte slices to dbaccess instead of objects.

* [NOD-862] Fix errors.

* [NOD-862] Fix maybeAcceptBlock not checking block existence.

* [NOD-862] Fix TestAcceptanceIndexRecover.

* [NOD-862] Add comments to StoreBlockIndex and BlockIndexCursor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-862] Fix merge errors.

* [NOD-862] Fix DAGSetup using bad temp directories.

* [NOD-862] Fix TestProcessDelayedBlocks not closing the database properly.

* [NOD-867] Remove blockIndexBucket from dagio.

* [NOD-867] Fix wrong key in StoreIndexBucket.

* [NOD-867] Migrate DAG state to dbaccess.

* [NOD-867] Remove utxoSetVersionKeyName.

* [NOD-862] Fix merge errors.

* [NOD-867] Move localSubnetworkID into dagState.

* [NOD-867] Fix a comment.

* [NOD-867] Remove an unused function.

* [NOD-867] Migrate the database's UTXO set to dbaccess.

* [NOD-867] Add missing error check.

* [NOD-867] Changed First to not return an error.

* [NOD-867] Make Cursor.Key not return the entire key path.

* [NOD-887] Fix the comment above BlockIndexCursorFrom.

* [NOD-862] Merge flushToDBWithContext and flushToDB.

* [NOD-862] Remove TODO.

* [NOD-862] Add prefix to the temp dir in DAGSetup.

* [NOD-862] Bring back dbFetchBlockByHash.

* [NOD-862] Use BlockDAG.BlockByHash in p2p and rpc.

* [NOD-862] Use daghash.Hash in dbaccess.

* [NOD-862] Add defer to RollbackUnlessClosed after NewTx().

* [NOD-862] Extract dbStoreBlock to a separate function.

* [NOD-867] Remove TODOs.

* [NOD-867] Fix merge errors.

* [NOD-867] Fix comments and errors.

* [NOD-867] Unexport blockIndexKey.

* [NOD-867] Fix merge errors.

* [NOD-867] Move a misplaced comment.

* [NOD-867] Fix an error message.

* [NOD-867] Remove preallocation in initDAGState.

* [NOD-866] Migrate database logic in blockdag/indexers package to dbaccess (#682)

* [NOD-865] Delete blockidhash.go.

* [NOD-865] Remove a lot of no-longer relevant logic from indexers.

* [NOD-865] Pass TxContext to ConnectBlock.

* [NOD-865] Migrate the acceptance index to dbaccess.

* [NOD-865] Fix a block not being sent to ConnectBlock.

* [NOD-865] Pass the block's hash instead of the whole block.

* [NOD-865] Add forgotten Commit call.

* [NOD-865] Add comments.

* [NOD-866] Fix a comment.

* [NOD-866] Fix a comment.

* [NOD-866] Remove pointless indirection in acceptanceindex.

* [NOD-866] Fix comment over ForEachHash.

* [NOD-866] Rename ClearAcceptanceIndex to DropAcceptanceIndex.

* [NOD-866] Explain collecting keys before deleting them.

* [NOD-865] Move misc db logic to db access (#681)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-865] Move fee data db operations to dbaccess

* [NOD-865] Move reachability data db operations to dbaccess

* [NOD-865] Move UTXO diff data db operations to dbaccess

* [NOD-865] Move subnetwork data db operations to dbaccess

* [NOD-865] Fix createDAGState

* [NOD-865] Remove old Get signature with "exists"

* [NOD-865] Move multiset db operations to dbaccess

* [NOD-865] Use dbaccess transactions where possible

* [NOD-865] Remove old Get signature with "exists"

* [NOD-881] Recover TestGHOSTDAGErrors

* [NOD-865] Create function for db keys

* [NOD-865] Change Exists to Has, and use accessor.Has where possible

* [NOD-865] Make ClearReachabilityData transactive

* [NOD-865] Don't iterate cursors while changing db data

* [NOD-865] Rename RegisterSubnetwork -> StoreSubnetwork

* [NOD-865] Change bucket from utxodiffs to utxo-diffs

* [NOD-865] Rename SubnetworkExists->HasSubnetwork

* [NOD-865] Change a comment

* [NOD-865] Fix tests

* [NOD-865] Fix comment

* [NOD-865] Remove the prefix "db" from some functions

* [NOD-865] Remove redundant comments

* [NOD-865] Make clearBucket function

* [NOD-865] Make clear functions get a dbTx as an arg

* [NOD-865] Remove erroneous tx commit

Co-authored-by: stasatdaglabs <stas@daglabs.com>

* [NOD-868] Delete the old database package (#683)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-865] Move fee data db operations to dbaccess

* [NOD-865] Move reachability data db operations to dbaccess

* [NOD-865] Move UTXO diff data db operations to dbaccess

* [NOD-865] Move subnetwork data db operations to dbaccess

* [NOD-865] Fix createDAGState

* [NOD-865] Remove old Get signature with "exists"

* [NOD-865] Move multiset db operations to dbaccess

* [NOD-865] Use dbaccess transactions where possible

* [NOD-865] Remove old Get signature with "exists"

* [NOD-881] Recover TestGHOSTDAGErrors

* [NOD-865] Create function for db keys

* [NOD-865] Change Exists to Has, and use accessor.Has where possible

* [NOD-865] Make ClearReachabilityData transactive

* [NOD-865] Don't iterate cursors while changing db data

* [NOD-865] Rename RegisterSubnetwork -> StoreSubnetwork

* [NOD-865] Change bucket from utxodiffs to utxo-diffs

* [NOD-865] Rename SubnetworkExists->HasSubnetwork

* [NOD-865] Change a comment

* [NOD-868] Remove all tests from old database.

* [NOD-868] Remove all unused methods from the old database's interfaces.

* [NOD-865] Fix tests

* [NOD-868] Remove references to DB.

* [NOD-865] Fix comment

* [NOD-868] Remove the old ffldb besides the interface and errors.go.

* [NOD-868] Remove errors.go.

* [NOD-868] Remove the old database package.

* [NOD-868] Add openDB to DAGSetup to emulate the old dbpath in dag.config.

* [NOD-868] Rename database2 to database.

* [NOD-868] Use NewTx instead of NoTx where required.

* [NOD-868] Fix merge errors.

* [NOD-868] Rename dbXXX functions to just xxx.

* [NOD-868] Rename putDAGState to saveDAGState.

* [NOD-868] Replace comments in initDAGState with logs.

* [NOD-868] Explain the openDB parameter in DAGSetup.

* [NOD-868] Fixup doc.go and README.md.

* [NOD-868] Remove pointless transactions.

Co-authored-by: Ori Newman <orinewman1@gmail.com>

* [NOD-805] Fix merge errors.

* [NOD-805] Fix a comment.

* [NOD-805] Don't return virtualTxsAcceptanceData from applyDAGChanges.

* [NOD-805] Add missing error handling in TestAcceptanceDataIndexRecover.

* [NOD-805] Rename blockDAG to dag in indexers/manager.go.

* [NOD-805] Defer cursor.Close() everywhere.

* [NOD-805] Rename scanFlatFiles to findCurrentLocation.

* [NOD-805] Extract crc32ChecksumLength and dataLengthLength to constants.

* [NOD-805] Handle open files properly in rollback.go.

* [NOD-805] Remove unnecessary func wrapper.

* [NOD-805] Remove unnecessary trimming in initialize.

* [NOD-805] Made StoreBlock accept only TxContext.

* [NOD-805] Changed the log level of an error message to Error.

* [NOD-805] Add a note about holding mutexes over deleteFile.

* [NOD-805] Remove a false comment.

* [NOD-805] Fix a comment.

* [NOD-805] Rename blk to block.

* [NOD-805] Extract utxoKey to a separate function.

* [NOD-805] Move dbaccess.xxxKey functions to the tops of their respective files.

* [NOD-805] Fix grammar in dbaccess/db.go.

* [NOD-805] Wrap a failed database corruption recovery error.

* [NOD-805] Split lines with WithStack in them.

* [NOD-805] Fix the comment over initialize.

* [NOD-805] Rename ffdb to flatFileDB and ldb to levelDB.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a comment.

* [NOD-805] Use s.writeCursor instead of cursor.

* [NOD-805] Embed file in lockableFile.

* [NOD-805] the the -> the

* [NOD-805] openDB -> db

* [NOD-805] Use TxContext in all flushToDB functions.

* [NOD-805] Rename context -> dbContext.

* [NOD-805] Reword the comment at the beginning on initDAGState.

* [NOD-805] Explain cursor key trimming.

* [NOD-805] Remove Error from Cursor.

* [NOD-805] Return ErrNotFound from done Cursor Key and Value.

* [NOD-805] Add missing error handling.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a variable name.

* [NOD-805] Remove pointless underscore.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a variable name.

Co-authored-by: Mike Zak <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-04-02 13:56:32 +03:00
Ori Newman
b20a7a679b [NOD-874] If the node is not current call sm.restartSyncIfNeeded() on handleInvMsg (#684)
* [NOD-874] If the node is not current call sm.restartSyncIfNeeded() on handleInvMsg

* [NOD-874] Check haveUnknownInvBlock before restartSyncIfNeeded

* [NOD-874] Fix comment

* [NOD-874] Fix comment

* [NOD-874] Fix comment
2020-04-01 12:56:10 +03:00
Ori Newman
36d866375e [NOD-881] Don't recalculate subtreesize for children (#678)
* [NOD-881] Don't recalculate subtreesize for children

* [NOD-881] Make BenchmarkReindexInterval clearer

* [NOD-881] Use b.ResetTimer

* [NOD-881] Fix BenchmarkReindexInterval to use b.N
2020-03-31 12:43:02 +03:00
Svarog
024edc30a3 [NOD-857] Add generalized profiler package and use it everwhere (#679)
* [NOD-857] Add generalized profiler package and use it everwhere

* [NOD-857] Dependency-inject log into profiling.Start()
2020-03-31 12:41:21 +03:00
Ori Newman
6aa5e0b5a8 [NOD-882] Remove ecc and hdkeychain (#680)
* [NOD-882] Remove ecc and hdkeychain

* [NOD-882] Remove HDCoinType from dagParams
2020-03-31 10:58:11 +03:00
Mike Zak
1a38550fdd Update to version 0.3.0 2020-03-29 14:15:17 +03:00
stasatdaglabs
3e7ebb5a84 [NOD-861] Get rid of dbtool/fetchblockregion.go. (#667) 2020-03-29 12:47:13 +03:00
Svarog
4bca7342d3 [NOD-883] Fix dockerfile in kaspaminer + set real version for go-libsecp256k1 (#673) 2020-03-26 17:50:09 +02:00
Elichai Turkel
f80908fb4e [NOD-876] Replace ecc with go-secp256k1 for public keys (#670)
* Replace ecc with go-secp256k1 in txscript

* Replace ecc with go-secp256k1 in util and cmd

* Replace ecc.Multiset with secp256k1.MultiSet
2020-03-26 17:03:39 +02:00
stasatdaglabs
e000e10738 [NOD-880] Remove CGO_ENABLED=0 from Dockerfile. (#671) 2020-03-26 14:02:57 +02:00
Ori Newman
d83862f36c [NOD-855] Save ECMH for block utxo and not diff utxo (#669)
* [NOD-855] Save ECMH for each block UTXO

* [NOD-855] Remove UpdateExtraNonce method

* [NOD-855] Remove multiset data from UTXO diffs

* [NOD-855] Fix to fetch multiset of selected parent

* [NOD-855] Don't remove coinbase inputs from multiset

* [NOD-855] Create multisetBucketName on startup

* [NOD-855] Remove multiset from UTXO diff tests

* [NOD-855] clear new entries from multisetstore on saveChangesFromBlock

* [NOD-855] Fix tests

* [NOD-855] Use UnacceptedBlueScore when adding current block transactions to multiset

* [NOD-855] Hash utxo before adding it to multiset

* [NOD-855] Pass isCoinbase to NewUTXOEntry

* [NOD-855] Do not use hash when adding entries to multiset

* [NOD-855] When calculating multiset, replace the unaccepted blue score of selected parent transaction with the block blue score

* [NOD-855] Manually add a chained transaction to a block in TestChainedTransactions

* [NOD-855] Change name and comments

* [NOD-855] Use FindAcceptanceData to find a specific block acceptance data

* [NOD-855] Remove redundant copy of txIn.PreviousOutpoint

* [NOD-855] Use fmt.Sprintf when creating internalRPCError
2020-03-26 13:06:12 +02:00
Svarog
1020402b34 [NOD-869] Close panicHandlerDone instead of sending an empty struct + use time.After instead of time.Tick (#668) 2020-03-25 16:14:08 +02:00
Mike Zak
bc6ce6ed53 Update version to v0.2.0 2020-03-25 11:51:14 +02:00
Ori Newman
d3b1953deb [NOD-848] optimize utxo diffs serialize allocations (#666)
* [NOD-848] Optimize allocations when serializing UTXO diffs

* [NOD-848] Use same UTXO serialization everywhere, and use compression as well

* [NOD-848] Fix usage of wrong buffer

* [NOD-848] Fix tests

* [NOD-848] Fix wire tests

* [NOD-848] Fix tests

* [NOD-848] Remove VLQ

* [NOD-848] Fix comments

* [NOD-848] Add varint for big endian encoding

* [NOD-848] In TestVarIntWire, assume the expected decoded value is the same as the serialization input

* [NOD-848] Serialize outpoint index with big endian varint

* [NOD-848] Remove p2pk from compression support

* [NOD-848] Fix comments

* [NOD-848] Remove p2pk from decompression support

* [NOD-848] Make entry compression optional

* [NOD-848] Fix tests

* [NOD-848] Fix comments and var names

* [NOD-848] Remove UTXO compression

* [NOD-848] Fix tests

* [NOD-848] Remove big endian varint

* [NOD-848] Fix comments

* [NOD-848] Rename ReadVarIntLittleEndian->ReadVarInt and fix WriteVarInt comment

* [NOD-848] Add outpointIndexByteOrder variable

* [NOD-848] Remove redundant comment

* [NOD-848] Fix outpointMaxSerializeSize to the correct value

* [NOD-848] Move subBuffer to utils
2020-03-24 16:44:41 +02:00
Svarog
3c67215e76 [NOD-796] Upgrade to go 1.14 (#665) 2020-03-22 14:50:13 +02:00
Svarog
586624c836 [NOD-853] Add profiler server to kaspaminer (#664) 2020-03-19 17:19:31 +02:00
Svarog
49855e6333 [NOD-823] Use WithDiffInPlace for the implementation of WithDiff (#657)
* [NOD-823] Use WithDiffInPlace for the implementation of WithDiff

* [NOD-823] Unexport withDiffInPlace
2020-03-17 11:19:02 +02:00
Ori Newman
624249c0f3 [NOD-842] Use flushToDB with the same transaction as everything else in saveChangesFromBlock and never ignore flushToDB errors (#662) 2020-03-16 11:05:17 +02:00
Ori Newman
1cf443a63b [NOD-841] Fix tests to not be dependent on block rate (#661)
* [NOD-841] Fix TestDifficulty

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Fix TestCheckBlockSanity

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Shorten long lines
2020-03-15 18:08:03 +02:00
Ori Newman
8909679f44 [NOD-818] Remove time adjustment (#658)
* [NOD-818] Remove time adjustment

* [NOD-818] Remove interface ensuring and copyright message

* [NOD-818] Update comment
2020-03-15 17:37:01 +02:00
Ori Newman
e58efbf0ea [NOD-839] Panic from non-rule error from ProcessBlock (#660) 2020-03-15 17:26:53 +02:00
Ori Newman
34fb066590 [NOD-518] Implement getmempoolentry (#656) 2020-03-12 16:00:18 +02:00
stasatdaglabs
299826f392 [NOD-827] Get rid of dbtools insecureimport.go and loadheaders.go (#655)
* [NOD-827] Get rid of dbtools insecureimport.go and loadheaders.go

* [NOD-827] Remove commands from realMain().
2020-03-10 16:31:13 +02:00
stasatdaglabs
3d8dd8724d [NOD-816] Remove TxIndex and AddrIndex (#653)
* [NOD-816] Remove TxIndex.

* [NOD-816] Remove AddrIndex.

* [NOD-816] Remove mentions of TxIndex and AddrIndex.

* [NOD-816] Remove mentions of getrawtransaction.

* [NOD-816] Remove mentions of searchrawtransaction.

* [NOD-816] Remove cmd/addsubnetwork.

* [NOD-816] Fix a comment.

* [NOD-816] Fix a comment.

* [NOD-816] Implement BlockDAG.TxConfirmations.

* [NOD-816] Return confirmations in getTxOut.

* [NOD-816] Rename TxConfirmations to UTXOConfirmations.

* [NOD-816] Rename txConfirmations to utxoConfirmations.

* [NOD-816] Fix capitalization in variable names.

* [NOD-816] Add acceptance index to addblock.

* [NOD-816] Get rid of txrawresult-confirmations.

* [NOD-816] Fix config flag.
2020-03-10 16:09:31 +02:00
Svarog
b8a00f7519 [NOD-778] Optimize RestoreUTXO (#652)
* [NOD-778] Add WithDiffInPlace

* [NOD-778] Fix bug in WithDiffInPlace

* [NOD-778] Add comment to WithDiffInPlace

* [NOD-778] Add double dag.restoreUTXO to benchmark, to remove time for hard-disk loading

* [NOD-778] Also test WithDiffInPlace in TestUTXODiffRules

* [NOD-778] Add tests for all cases possible in TestUTXODiffRules

* [NOD-778] Fix test-case 'first in toAdd in this, second in toRemove in this and toAdd in other'

* [NOD-778] Fixed in WithDiffInPlace

* [NOD-778] Update error messages when diffFrom(withDiffResult) fails in TestUTXODiffRules

* [NOD-778] diffFrom: disallow utxos both in d.toAdd, other.toAdd, and only one of d.toRemove and other.toRemove

* [NOD-778] Fix expected value in 'first in toRemove in this, second in toRemove in other'

* [NOD-778] diffFrom: Disallow situations where utxo both in d.toRemove and other.toRemove with different blue scores and no corresponding utxo in d.toAdd

* [NOD-778] WithDiff: Fix faulty logic that allows updates to blue scores

* [NOD-778] Fix WithDiffInPlace to pass all tests

* [NOD-778] Deleted temporary prints

* [NOD-778] Sorted TestUTXODiffRules tests according to spreadsheet

* [NOD-778] Delete deeputxo_test.go

* [NOD-778] Updated comments

* [NOD-778] Re-order

* [NOD-778] Re-order test-cases to be according to spreadsheet

* [NOD-778] Simplified case when both d.toRemove and other.toRemove have the same outpoint in diffFrom

* [NOD-778] Change a few error messages that say 'transaction' instead of 'outpoint'

* [NOD-778] Rename: utxoToAdd/Remove -> entryToAdd/Remove

* [NOD-788] Remove redundant else

* [NOD-778] Rename: existingUTXO -> existingEntry + remove redundant else

* [NOD-778] Correct test name
2020-03-10 15:32:19 +02:00
stasatdaglabs
4dfc8cf5b0 [NOD-816] Remove addsubnetwork. (#654) 2020-03-10 11:09:33 +02:00
Ori Newman
5a99e4d2f3 [NOD-806] Exit early after panic (#650)
* [NOD-806] After panic, gracefully stop logs, and then exit immediately

* [NOD-806] Convert non-kaspad applications to use the new spawn

* [NOD-806] Fix disabled log at rpcclient

* [NOD-806] Refactor HandlePanic

* [NOD-806] Cancel Logger interface

* [NOD-806] Remove redundant spawn checks from waitgroup_test.go

* [NOD-806] Use caller subsystem when logging panics

* [NOD-806] Fix go vet errors
2020-03-08 11:24:37 +02:00
Svarog
606cd668ff [NOD-810] Fix error text in lookupParentNodes (#651) 2020-03-05 15:49:36 +02:00
Ori Newman
dd537f5143 [NOD-808] Use syndtr/goleveldb instead of btcsuite/goleveldb. (#649) 2020-03-05 12:26:48 +02:00
stasatdaglabs
a1c631be62 [NOD-798] Disconnect from a peer if a block received from it gets rejected (#648)
* [NOD-798] Disconnect from a peer if its block gets rejected.

* [NOD-798] Make a comment less ambiguous.
2020-03-03 09:47:22 +02:00
Ori Newman
707a728656 [NOD-552] Add NormalizeRPCServerAddress and use it where needed (#643)
* [NOD-552] Add NormalizeRPCServerAddress and use it where needed

* [NOD-552] Make NormalizeAddress return an error for an invalid address

* [NOD-552] Use longer lines for a comment
2020-03-01 16:37:26 +02:00
stasatdaglabs
80b5631a48 [NOD-726] Only print "no sync peer" message when not current (#646)
* [NOD-726] Only print "no sync peer" message when not current.

* [NOD-726] Shorten duration in which "no sync peer" messages would not print.
2020-02-27 17:38:39 +02:00
Ori Newman
2373965551 [NOD-576] Rename NextHashes to ChildHashes in GetBlock/GetBlockHeaders rpc call (#645)
* [NOD-576] Rename NextHashes to ChildHashes in GetBlock/GetBlockHeaders rpc call

* [NOD-576] Fix typo
2020-02-27 17:34:38 +02:00
Ori Newman
65cbb6655b [NOD-661] Change BCDB subsystem tag (for logs) to KSDB (#644) 2020-02-27 17:30:08 +02:00
Ori Newman
cdd96d0670 [NOD-664] Remove version from everything inside kaspad/cmd - use kaspad version instead (#642)
* [NOD-664] Remove version from everything inside kaspad/cmd - use kaspad version instead

* [NOD-664] Fix broken import
2020-02-27 13:26:22 +02:00
Dan Aharoni
ad04bbde83 [NOD-782] Make sure errors.As gets parameter that implements error interface (#641)
* [NOD-782] Make sure errors.As gets parameter that implements error interface.

* [NOD-782] Pass pointer to errors.As
2020-02-27 12:27:38 +02:00
Ori Newman
5374d95416 [NOD-656] Log hashrate in kaspaminer (#632)
* [NOD-656] Log hashrate in kaspaminer

* [NOD-656] Measure hash rate in kilohashes

* [NOD-656] Show hash rate once in 10 seconds

* [NOD-656] Put hash rate logic in a separate function

* [NOD-656] Create logHashRateInterval constant
2020-02-24 11:59:02 +02:00
Ori Newman
de9aa39cc5 [NOD-721] Add defers (#638)
* [NOD-721] Defer unlocks

* [NOD-721] Add functions with locks to rpcmodel

* [NOD-721] Defer unlocks

* [NOD-721] Add filterDataWithLock function

* [NOD-721] Defer unlocks

* [NOD-721] Defer .Close()

* [NOD-721] Fix access to wsc.filterData without a lock

* [NOD-721] De-anonymize some anonymous functions

* [NOD-721] Remove redundant assignments

* [NOD-721] Remove redundant assignments

* [NOD-721] Remove redundant assignments

* [NOD-721] Get rid of submitOld, and break handleGetBlockTemplateLongPoll to smaller functions

* [NOD-721] Rename existsUnspentOutpoint->existsUnspentOutpointNoLock, existsUnspentOutpointWithLock->existsUnspentOutpoint

* [NOD-721] Rename filterDataWithLock->FilterData

* [NOD-721] Fixed comments
2020-02-24 09:19:44 +02:00
Ori Newman
98987f4a8f [NOD-603] Update validateParents to use reachability (#640)
* [NOD-603] Update validateParents to use reachability

* [NOD-603] Break a long line

* [NOD-721] Remove redundant check if block parent is a tip
2020-02-24 08:59:12 +02:00
Ori Newman
9745f31b69 [NOD-693] Update link to license (#639) 2020-02-20 17:12:53 +02:00
Ori Newman
ee08531a52 [NOD-610] Rename newSet->newBlockSet and setFromSlice->blockSetFromSlice (#635) 2020-02-20 16:19:28 +02:00
stasatdaglabs
61baf7b260 [NOD-769] Add a log for when a reachability reindex occurs (#637)
*  [NOD-719] Added defers to unlocks  (#618)

* [NOD-719] Added defers to unlocks

* [NOD-719] Added another defer to another Unlock

* [NOD-719] Added yet another defer to yet another Unlock

* [NOD-747] Change FinalityInterval to be 24 hours, isCurrent to be true if the DAG's time is less than 12 hours than the present, and change MaxInvPerMsg to be 1 << 17 (#625)

* [NOD-769] Add a log for when a reachability reindex occurs.

Co-authored-by: Svarog <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-02-19 13:39:45 +02:00
Ori Newman
650e4f735e [NOD-757] Readd addrmanager tests (#628) 2020-02-18 18:12:19 +02:00
Dan Aharoni
550b12b041 [NOD-772] Fix a bug where we ignore the return value of forAllOutboundPeers. (#636) 2020-02-18 18:02:15 +02:00
Ori Newman
a4bb070722 [NOD-754] Fix staticcheck errors (#627)
* [NOD-754] Fix staticcheck errors

* [NOD-754] Remove some unused exported functions

* [NOD-754] Fix staticcheck errors

* [NOD-754] Don't panic if out/in close fails

* [NOD-754] Wrap outside errors with custom message
2020-02-18 16:56:38 +02:00
Ori Newman
30fe0c279b [NOD-738] Move rpcmodel helper functions to pointers package (#629)
* [NOD-738] Move rpcmodel helper functions to copytopointer package

* [NOD-738] Rename copytopointer->pointers
2020-02-18 14:06:34 +02:00
stasatdaglabs
e405dd5981 [NOD-694] Fix requesting blocks that will surely be orphaned during netsync. (#630) 2020-02-18 12:12:34 +02:00
stasatdaglabs
243b4b8021 [NOD-765] Fix database corruption after restart in reachabilitystore and utxodiffstore. (#634) 2020-02-18 12:04:50 +02:00
Ori Newman
dd4c93e1ef [NOD-759] Merge v0.1.1-dev into v0.1.2-dev (#633)
*  [NOD-719] Added defers to unlocks  (#618)

* [NOD-719] Added defers to unlocks

* [NOD-719] Added another defer to another Unlock

* [NOD-719] Added yet another defer to yet another Unlock

* [NOD-747] Change FinalityInterval to be 24 hours, isCurrent to be true if the DAG's time is less than 12 hours than the present, and change MaxInvPerMsg to be 1 << 17 (#625)
2020-02-18 11:02:25 +02:00
Ori Newman
a07335d74d [NOD-737] Remove btc prefix from util file names (#631) 2020-02-17 13:11:24 +02:00
Dan Aharoni
7567cd4cb9 [NOD-744] Wrap go routines with spawn (#626)
* [NOD-744] Wrap go routines with spawn

* [NOD-747] Wrap some more go routines with spawn

* [NOD-744] Some more missing go routines

* [NOD-744] Break lines so make code more readable

* [NOD-744] Declare a local scope variable so the func would use it.

* [NOD-744] Fix type and update comment.

* [NOD-744] Declare local var so go routine would use it

* [NOD-744] Rename variable, use normal assignment;

* [NOD-744] Rename variable.
2020-02-13 13:10:07 +02:00
stasatdaglabs
51ff9e2562 [NOD-571] Cover ghostdag in tests where possible (#613)
* [NOD-571] Cover reachabilityInterval split methods.

* [NOD-571] Cover reindexInterval.

* [NOD-571] Cover reachability String() methods.

* [NOD-571] Cover blueAnticoneSize.

* [NOD-571] Remove unnecessary error from setTreeNode.

* [NOD-571] Add TestGHOSTDAGErrors.

* [NOD-571] Use PrepareBlockForTest in TestBlueAnticoneSizeErrors.

* [NOD-571] Use PrepareBlockForTest in TestGHOSTDAGErrors.

* [NOD-571] Add substring checks to TestSplitFractionErrors.

* [NOD-571] Add substring checks to TestSplitExactErrors and TestSplitWithExponentialBiasErrors.

* [NOD-571] Add comments to TestReindexIntervalErrors.

* [NOD-571] Add additional info in some error messages.

* [NOD-571] Fix error messages.
2020-02-09 11:27:10 +02:00
stasatdaglabs
5b8ab63890 [NOD-717] Fix nodes getting stuck in an infinite loop in addrManager.getAddress (#624)
* [NOD-717] Fix nodes getting stuck in an infinite loop in addrManager.getAddress.

* [NOD-717] Rename ResetFailedAttempts -> NotifyConnectionRequestComplete.
2020-02-06 18:17:10 +02:00
Dan Aharoni
3dd7dc4496 [NOD-727] Do not allow delayed blocks from RPC. (#623)
* [NOD-727] Do not allow delayed blocks from RPC.

* [NOD-727] Refactor sentFromRPC -> DisallowDelay

* [NOD-727] Clarify comment; Clarify error message.

* [NOD-727] Change error message.
2020-02-05 11:14:26 +02:00
Ori Newman
d90a08ecfa [NOD-722] Fix processBlockMsg case in blockHandler to send only one response to msg.reply, and rename blockHandler->messageHandler (#622) 2020-02-04 18:10:15 +02:00
Ori Newman
45dc1a3e7b [NOD-545] Remove headers first related logic (#621)
* [NOD-545] Remove headers first related logic

* [NOD-545] Fix tests

* [NOD-545] Change getTopHeadersMaxHeaders to be equal to getHeadersMaxHeaders
2020-02-04 14:54:42 +02:00
Ori Newman
4ffb5daa37 [NOD-622] Fix populateTemplateFromCandidates to sort txsForBlockTemplate.txMasses and txsForBlockTemplate.txFees (#617)
* [NOD-622] Fix populateTemplateFromCandidates to sort txsForBlockTemplate.txMasses and txsForBlockTemplate.txFees

* [NOD-622] Sort transactions in PrepareBlockForTest

* [NOD-622] Remove duplicate append of selected transactions
2020-02-03 13:42:40 +02:00
Ori Newman
b9138b720d [NOD-597] Make BlockIndex clear its dirty entries only after it successfully written them to disk (#620) 2020-02-03 13:39:25 +02:00
Ori Newman
d8954f1339 [NOD-615] Make bluesAnticoneSizes a map with *blockNode as a key (#619) 2020-02-03 12:40:39 +02:00
Ori Newman
eb953286ec [NOD-641] Upgrade to github.com/pkg/errors v0.9.1 and use errors.As where needed (#614)
* [NOD-641] Upgrade to github.com/pkg/errors v0.9.1 and use errors.As where needed

* [NOD-641] Fix find and replace error

* [NOD-641] Use errors.As for error type checking

* [NOD-641] Fix errors.As for pointer types

* [NOD-641] Use errors.As where needed

* [NOD-641] Rename rErr->ruleErr

* [NOD-641] Rename derr->dbErr

* [NOD-641] e->flagsErr where necessary

* [NOD-641] change jerr to more appropriate name

* [NOD-641] Rename cerr->bdRuleErr

* [NOD-641] Rename serr->scriptErr

* [NOD-641] Use errors.Is instead of testutil.AreErrorsEqual in TestNewHashFromStr

* [NOD-641] Rename bdRuleErr->dagRuleErr

* [NOD-641] Rename mErr->msgErr

* [NOD-641] Rename dErr->deserializeErr
2020-02-03 12:38:33 +02:00
Ori Newman
41c8178ad3 [NOD-648] Add TestProcessDelayedBlocks (#612)
* [NOD-648] Add TestProcessDelayedBlocks

* [NOD-648] Add one second to secondsUntilDelayedBlockIsValid to make sure the delayedBlock timestamp will be valid, and add comments

* [NOD-648] Remove redundant import

* [NOD-648] Use fakeTimeSource instead of time.Sleep

* [NOD-648] Rename dag.HaveBlock->dag.IsKnownBlock,  dag.BlockExists->dag.IsInDAG

* [NOD-648] Add comment

* [NOD-641] Rename HaveBlock->IsKnownBlock, BlockExists->IsInDAG
2020-02-03 11:30:03 +02:00
Ori Newman
aa74b51e6f [NOD-687] Remove -gcflags='-l' from all tests (#616) 2020-02-02 15:26:26 +02:00
Mike Zak
f7800eb5c4 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-02-02 15:17:25 +02:00
Ori Newman
44c55900f8 [NOD-715] Replace testDbRoot with os.TempDir() (#611) 2020-01-30 12:54:15 +02:00
Ori Newman
4c0ea78026 [NOD-586] Remove subTreeSize from reachabilityTreeNode (#610)
* [NOD-586] Remove subTreeSize from reachabilityTreeNode

* [NOD-586] Convert else { if { ... } } to else if { ... }
2020-01-30 10:39:53 +02:00
stasatdaglabs
03a93fe51e [NOD-647] Create a default config file even if the sample default config file is missing (#609)
* [NOD-647] Create a default config file even if the sample default config file is missing.

* [NOD-647] Unfancify WriteString().
2020-01-29 17:40:59 +02:00
Mike Zak
eca0514465 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 14:39:22 +02:00
Mike Zak
5daab45947 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 12:25:52 +02:00
Mike Zak
25bdaeed31 Merge remote-tracking branch 'origin/v0.1.1-dev' into v0.1.2-dev 2020-01-28 11:35:21 +02:00
Mike Zak
a3dc2f7da7 Update version to v0.1.2 2020-01-28 10:53:10 +02:00
814 changed files with 36641 additions and 68058 deletions

View File

@@ -4,7 +4,7 @@ Kaspad
Warning: This is pre-alpha software. There's no guarantee anything works.
====
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad)
Kaspad is the reference full node Kaspa implementation written in Go (golang).
@@ -40,10 +40,8 @@ recommended that `GOPATH` is set to a directory in your home directory such as
```bash
$ git clone https://github.com/kaspanet/kaspad $GOPATH/src/github.com/kaspanet/kaspad
$ cd $GOPATH/src/github.com/kaspanet/kaspad
$ ./test.sh
$ go install . ./cmd/...
```
`./test.sh` tests can be skipped, but some things might not run correctly on your system if tests fail.
- Kaspad (and utilities) should now be installed in `$GOPATH/bin`. If you did
not already add the bin directory to your system path during Go installation,
@@ -75,5 +73,5 @@ The documentation is a work-in-progress. It is located in the [docs](https://git
## License
Kaspad is licensed under the [copyfree](http://copyfree.org) ISC License.
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).

File diff suppressed because it is too large Load Diff

View File

@@ -1,123 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addrmgr
import (
"net"
"testing"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/wire"
)
// naTest is used to describe a test to be performed against the NetAddressKey
// method.
type naTest struct {
in wire.NetAddress
want string
}
// naTests houses all of the tests to be performed against the NetAddressKey
// method.
var naTests = make([]naTest, 0)
// Put some IP in here for convenience. Points to google.
var someIP = "173.194.115.66"
// addNaTests
func addNaTests() {
// IPv4
// Localhost
addNaTest("127.0.0.1", 16111, "127.0.0.1:16111")
addNaTest("127.0.0.1", 16110, "127.0.0.1:16110")
// Class A
addNaTest("1.0.0.1", 16111, "1.0.0.1:16111")
addNaTest("2.2.2.2", 16110, "2.2.2.2:16110")
addNaTest("27.253.252.251", 8335, "27.253.252.251:8335")
addNaTest("123.3.2.1", 8336, "123.3.2.1:8336")
// Private Class A
addNaTest("10.0.0.1", 16111, "10.0.0.1:16111")
addNaTest("10.1.1.1", 16110, "10.1.1.1:16110")
addNaTest("10.2.2.2", 8335, "10.2.2.2:8335")
addNaTest("10.10.10.10", 8336, "10.10.10.10:8336")
// Class B
addNaTest("128.0.0.1", 16111, "128.0.0.1:16111")
addNaTest("129.1.1.1", 16110, "129.1.1.1:16110")
addNaTest("180.2.2.2", 8335, "180.2.2.2:8335")
addNaTest("191.10.10.10", 8336, "191.10.10.10:8336")
// Private Class B
addNaTest("172.16.0.1", 16111, "172.16.0.1:16111")
addNaTest("172.16.1.1", 16110, "172.16.1.1:16110")
addNaTest("172.16.2.2", 8335, "172.16.2.2:8335")
addNaTest("172.16.172.172", 8336, "172.16.172.172:8336")
// Class C
addNaTest("193.0.0.1", 16111, "193.0.0.1:16111")
addNaTest("200.1.1.1", 16110, "200.1.1.1:16110")
addNaTest("205.2.2.2", 8335, "205.2.2.2:8335")
addNaTest("223.10.10.10", 8336, "223.10.10.10:8336")
// Private Class C
addNaTest("192.168.0.1", 16111, "192.168.0.1:16111")
addNaTest("192.168.1.1", 16110, "192.168.1.1:16110")
addNaTest("192.168.2.2", 8335, "192.168.2.2:8335")
addNaTest("192.168.192.192", 8336, "192.168.192.192:8336")
// IPv6
// Localhost
addNaTest("::1", 16111, "[::1]:16111")
addNaTest("fe80::1", 16110, "[fe80::1]:16110")
// Link-local
addNaTest("fe80::1:1", 16111, "[fe80::1:1]:16111")
addNaTest("fe91::2:2", 16110, "[fe91::2:2]:16110")
addNaTest("fea2::3:3", 8335, "[fea2::3:3]:8335")
addNaTest("feb3::4:4", 8336, "[feb3::4:4]:8336")
// Site-local
addNaTest("fec0::1:1", 16111, "[fec0::1:1]:16111")
addNaTest("fed1::2:2", 16110, "[fed1::2:2]:16110")
addNaTest("fee2::3:3", 8335, "[fee2::3:3]:8335")
addNaTest("fef3::4:4", 8336, "[fef3::4:4]:8336")
}
func addNaTest(ip string, port uint16, want string) {
nip := net.ParseIP(ip)
na := *wire.NewNetAddressIPPort(nip, port, wire.SFNodeNetwork)
test := naTest{na, want}
naTests = append(naTests, test)
}
func lookupFunc(host string) ([]net.IP, error) {
return nil, errors.New("not implemented")
}
func TestStartStop(t *testing.T) {
n := New("teststartstop", lookupFunc, nil)
n.Start()
err := n.Stop()
if err != nil {
t.Fatalf("Address Manager failed to stop: %v", err)
}
}
func TestNetAddressKey(t *testing.T) {
addNaTests()
t.Logf("Running %d tests", len(naTests))
for i, test := range naTests {
key := NetAddressKey(&test.in)
if key != test.want {
t.Errorf("NetAddressKey #%d\n got: %s want: %s", i, key, test.want)
continue
}
}
}

View File

@@ -1,25 +0,0 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addrmgr
import (
"time"
"github.com/kaspanet/kaspad/wire"
)
func TstKnownAddressIsBad(ka *KnownAddress) bool {
return ka.isBad()
}
func TstKnownAddressChance(ka *KnownAddress) float64 {
return ka.chance()
}
func TstNewKnownAddress(na *wire.NetAddress, attempts int,
lastattempt, lastsuccess time.Time, tried bool, refs int) *KnownAddress {
return &KnownAddress{na: na, attempts: attempts, lastattempt: lastattempt,
lastsuccess: lastsuccess, tried: tried, refs: refs}
}

View File

@@ -1,114 +0,0 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package addrmgr_test
import (
"math"
"testing"
"time"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/wire"
)
func TestChance(t *testing.T) {
now := time.Unix(time.Now().Unix(), 0)
var tests = []struct {
addr *addrmgr.KnownAddress
expected float64
}{
{
//Test normal case
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1.0,
}, {
//Test case in which lastseen < 0
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)},
0, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1.0,
}, {
//Test case in which lastattempt < 0
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(30*time.Minute), time.Now(), false, 0),
1.0 * .01,
}, {
//Test case in which lastattempt < ten minutes
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
0, time.Now().Add(-5*time.Minute), time.Now(), false, 0),
1.0 * .01,
}, {
//Test case with several failed attempts.
addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)},
2, time.Now().Add(-30*time.Minute), time.Now(), false, 0),
1 / 1.5 / 1.5,
},
}
err := .0001
for i, test := range tests {
chance := addrmgr.TstKnownAddressChance(test.addr)
if math.Abs(test.expected-chance) >= err {
t.Errorf("case %d: got %f, expected %f", i, chance, test.expected)
}
}
}
func TestIsBad(t *testing.T) {
now := time.Unix(time.Now().Unix(), 0)
future := now.Add(35 * time.Minute)
monthOld := now.Add(-43 * time.Hour * 24)
secondsOld := now.Add(-2 * time.Second)
minutesOld := now.Add(-27 * time.Minute)
hoursOld := now.Add(-5 * time.Hour)
zeroTime := time.Time{}
futureNa := &wire.NetAddress{Timestamp: future}
minutesOldNa := &wire.NetAddress{Timestamp: minutesOld}
monthOldNa := &wire.NetAddress{Timestamp: monthOld}
currentNa := &wire.NetAddress{Timestamp: secondsOld}
//Test addresses that have been tried in the last minute.
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) {
t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) {
t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.")
}
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) {
t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.")
}
//Test address that claims to be from the future.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 6: addresses that claim to be from the future are bad.")
}
//Test address that has not been seen in over a month.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 7: addresses more than a month old are bad.")
}
//It has failed at least three times and never succeeded.
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) {
t.Errorf("test case 8: addresses that have never succeeded are bad.")
}
//It has failed ten times in the last week
if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) {
t.Errorf("test case 9: addresses that have not succeeded in too long are bad.")
}
//Test an address that should work.
if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) {
t.Errorf("test case 10: This should be a valid address.")
}
}

248
app/app.go Normal file
View File

@@ -0,0 +1,248 @@
package app
import (
"fmt"
"sync/atomic"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain/blockdag/indexers"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/domain/mining"
"github.com/kaspanet/kaspad/domain/txscript"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/dnsseed"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/rpc"
"github.com/kaspanet/kaspad/infrastructure/os/signal"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/panics"
)
// App is a wrapper for all the kaspad services
type App struct {
cfg *config.Config
rpcServer *rpc.Server
addressManager *addressmanager.AddressManager
protocolManager *protocol.Manager
connectionManager *connmanager.ConnectionManager
netAdapter *netadapter.NetAdapter
started, shutdown int32
}
// Start launches all the kaspad services.
func (a *App) Start() {
// Already started?
if atomic.AddInt32(&a.started, 1) != 1 {
return
}
log.Trace("Starting kaspad")
err := a.protocolManager.Start()
if err != nil {
panics.Exit(log, fmt.Sprintf("Error starting the p2p protocol: %+v", err))
}
a.maybeSeedFromDNS()
a.connectionManager.Start()
if !a.cfg.DisableRPC {
a.rpcServer.Start()
}
}
// Stop gracefully shuts down all the kaspad services.
func (a *App) Stop() {
// Make sure this only happens once.
if atomic.AddInt32(&a.shutdown, 1) != 1 {
log.Infof("Kaspad is already in the process of shutting down")
return
}
log.Warnf("Kaspad shutting down")
a.connectionManager.Stop()
err := a.protocolManager.Stop()
if err != nil {
log.Errorf("Error stopping the p2p protocol: %+v", err)
}
// Shutdown the RPC server if it's not disabled.
if !a.cfg.DisableRPC {
err := a.rpcServer.Stop()
if err != nil {
log.Errorf("Error stopping rpcServer: %+v", err)
}
}
err = a.addressManager.Stop()
if err != nil {
log.Errorf("Error stopping address manager: %s", err)
}
return
}
// New returns a new App instance configured to listen on addr for the
// kaspa network type specified by dagParams. Use start to begin accepting
// connections from peers.
func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{}) (*App, error) {
indexManager, acceptanceIndex := setupIndexes(cfg)
sigCache := txscript.NewSigCache(cfg.SigCacheMaxSize)
// Create a new block DAG instance with the appropriate configuration.
dag, err := setupDAG(cfg, databaseContext, interrupt, sigCache, indexManager)
if err != nil {
return nil, err
}
txMempool := setupMempool(cfg, dag, sigCache)
netAdapter, err := netadapter.NewNetAdapter(cfg)
if err != nil {
return nil, err
}
addressManager, err := addressmanager.New(cfg, databaseContext)
if err != nil {
return nil, err
}
connectionManager, err := connmanager.New(cfg, netAdapter, addressManager)
if err != nil {
return nil, err
}
protocolManager, err := protocol.NewManager(cfg, dag, netAdapter, addressManager, txMempool, connectionManager)
if err != nil {
return nil, err
}
rpcServer, err := setupRPC(
cfg, dag, txMempool, sigCache, acceptanceIndex, connectionManager, addressManager, protocolManager)
if err != nil {
return nil, err
}
return &App{
cfg: cfg,
rpcServer: rpcServer,
protocolManager: protocolManager,
connectionManager: connectionManager,
netAdapter: netAdapter,
addressManager: addressManager,
}, nil
}
func (a *App) maybeSeedFromDNS() {
if !a.cfg.DisableDNSSeed {
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, appmessage.SFNodeNetwork, false, nil,
a.cfg.Lookup, func(addresses []*appmessage.NetAddress) {
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
// IPs of nodes and not its own IP, we can not know real IP of
// source. So we'll take first returned address as source.
a.addressManager.AddAddresses(addresses, addresses[0], nil)
})
}
}
func setupDAG(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt <-chan struct{},
sigCache *txscript.SigCache, indexManager blockdag.IndexManager) (*blockdag.BlockDAG, error) {
dag, err := blockdag.New(&blockdag.Config{
Interrupt: interrupt,
DatabaseContext: databaseContext,
DAGParams: cfg.NetParams(),
TimeSource: blockdag.NewTimeSource(),
SigCache: sigCache,
IndexManager: indexManager,
SubnetworkID: cfg.SubnetworkID,
})
return dag, err
}
func setupIndexes(cfg *config.Config) (blockdag.IndexManager, *indexers.AcceptanceIndex) {
// Create indexes if needed.
var indexes []indexers.Indexer
var acceptanceIndex *indexers.AcceptanceIndex
if cfg.AcceptanceIndex {
log.Info("acceptance index is enabled")
acceptanceIndex = indexers.NewAcceptanceIndex()
indexes = append(indexes, acceptanceIndex)
}
// Create an index manager if any of the optional indexes are enabled.
if len(indexes) < 0 {
return nil, nil
}
indexManager := indexers.NewManager(indexes)
return indexManager, acceptanceIndex
}
func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript.SigCache) *mempool.TxPool {
mempoolConfig := mempool.Config{
Policy: mempool.Policy{
AcceptNonStd: cfg.RelayNonStd,
MaxOrphanTxs: cfg.MaxOrphanTxs,
MaxOrphanTxSize: config.DefaultMaxOrphanTxSize,
MinRelayTxFee: cfg.MinRelayTxFee,
MaxTxVersion: 1,
},
CalcSequenceLockNoLock: func(tx *util.Tx, utxoSet blockdag.UTXOSet) (*blockdag.SequenceLock, error) {
return dag.CalcSequenceLockNoLock(tx, utxoSet)
},
SigCache: sigCache,
DAG: dag,
}
return mempool.New(&mempoolConfig)
}
func setupRPC(cfg *config.Config,
dag *blockdag.BlockDAG,
txMempool *mempool.TxPool,
sigCache *txscript.SigCache,
acceptanceIndex *indexers.AcceptanceIndex,
connectionManager *connmanager.ConnectionManager,
addressManager *addressmanager.AddressManager,
protocolManager *protocol.Manager) (*rpc.Server, error) {
if !cfg.DisableRPC {
policy := mining.Policy{
BlockMaxMass: cfg.BlockMaxMass,
}
blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy, txMempool, dag, sigCache)
rpcServer, err := rpc.NewRPCServer(cfg, dag, txMempool, acceptanceIndex, blockTemplateGenerator,
connectionManager, addressManager, protocolManager)
if err != nil {
return nil, err
}
// Signal process shutdown when the RPC server requests it.
spawn("setupRPC-handleShutdownRequest", func() {
<-rpcServer.RequestedProcessShutdown()
signal.ShutdownRequestChannel <- struct{}{}
})
return rpcServer, nil
}
return nil, nil
}
// P2PNodeID returns the network ID associated with this App
func (a *App) P2PNodeID() *id.ID {
return a.netAdapter.ID()
}
// AddressManager returns the AddressManager associated with this App
func (a *App) AddressManager() *addressmanager.AddressManager {
return a.addressManager
}

View File

@@ -1,7 +1,7 @@
wire
====
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/wire)
=======
@@ -36,7 +36,7 @@ to a remote node running a kaspa peer. Example syntax is:
// Reads and validates the next kaspa message from conn using the
// protocol version pver and the kaspa network kaspanet. The returns
// are a wire.Message, a []byte which contains the unmarshalled
// are a appmessage.Message, a []byte which contains the unmarshalled
// raw payload, and a possible error.
msg, rawPayload, err := wire.ReadMessage(conn, pver, kaspanet)
if err != nil {

View File

@@ -0,0 +1,24 @@
package appmessage
import "time"
type baseMessage struct {
messageNumber uint64
receivedAt time.Time
}
func (b *baseMessage) MessageNumber() uint64 {
return b.messageNumber
}
func (b *baseMessage) SetMessageNumber(messageNumber uint64) {
b.messageNumber = messageNumber
}
func (b *baseMessage) ReceivedAt() time.Time {
return b.receivedAt
}
func (b *baseMessage) SetReceivedAt(receivedAt time.Time) {
b.receivedAt = receivedAt
}

View File

@@ -2,15 +2,13 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"compress/bzip2"
"fmt"
"io/ioutil"
"math"
"net"
"os"
"testing"
@@ -18,7 +16,7 @@ import (
)
// genesisCoinbaseTx is the coinbase transaction for the genesis blocks for
// the main network, regression test network, and test network.
// the main network and test network.
var genesisCoinbaseTxIns = []*TxIn{
{
PreviousOutpoint: Outpoint{
@@ -354,7 +352,7 @@ func BenchmarkReadBlockHeader(b *testing.B) {
0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // MerkleRoot
0x29, 0xab, 0x5f, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0x00, // TxnCount Varint
}
r := bytes.NewReader(buf)
@@ -374,218 +372,6 @@ func BenchmarkWriteBlockHeader(b *testing.B) {
}
}
// BenchmarkDecodeGetHeaders performs a benchmark on how long it takes to
// decode a getheaders message.
func BenchmarkDecodeGetHeaders(b *testing.B) {
pver := ProtocolVersion
var m MsgGetHeaders
m.LowHash = &daghash.Hash{1}
m.HighHash = &daghash.Hash{1}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := m.KaspaEncode(&bb, pver); err != nil {
b.Fatalf("MsgGetHeaders.KaspaEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgGetHeaders
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.KaspaDecode(r, pver)
}
}
// BenchmarkDecodeHeaders performs a benchmark on how long it takes to
// decode a headers message with the maximum number of headers and maximum number of
// parent hashes per header.
func BenchmarkDecodeHeaders(b *testing.B) {
// Create a message with the maximum number of headers.
pver := ProtocolVersion
var m MsgHeaders
for i := 0; i < MaxBlockHeadersPerMsg; i++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
parentHashes := make([]*daghash.Hash, MaxNumParentBlocks)
for j := byte(0); j < MaxNumParentBlocks; j++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x%x", i, j))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
parentHashes[i] = hash
}
m.AddBlockHeader(NewBlockHeader(1, parentHashes, hash, hash, hash, 0, uint64(i)))
}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := m.KaspaEncode(&bb, pver); err != nil {
b.Fatalf("MsgHeaders.KaspaEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgHeaders
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.KaspaDecode(r, pver)
}
}
// BenchmarkDecodeGetBlockInvs performs a benchmark on how long it takes to
// decode a getblockinvs message.
func BenchmarkDecodeGetBlockInvs(b *testing.B) {
pver := ProtocolVersion
var m MsgGetBlockInvs
m.LowHash = &daghash.Hash{1}
m.HighHash = &daghash.Hash{1}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := m.KaspaEncode(&bb, pver); err != nil {
b.Fatalf("MsgGetBlockInvs.KaspaEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgGetBlockInvs
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.KaspaDecode(r, pver)
}
}
// BenchmarkDecodeAddr performs a benchmark on how long it takes to decode an
// addr message with the maximum number of addresses.
func BenchmarkDecodeAddr(b *testing.B) {
// Create a message with the maximum number of addresses.
pver := ProtocolVersion
ip := net.ParseIP("127.0.0.1")
ma := NewMsgAddr(false, nil)
for port := uint16(0); port < MaxAddrPerMsg; port++ {
ma.AddAddress(NewNetAddressIPPort(ip, port, SFNodeNetwork))
}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := ma.KaspaEncode(&bb, pver); err != nil {
b.Fatalf("MsgAddr.KaspaEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgAddr
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.KaspaDecode(r, pver)
}
}
// BenchmarkDecodeInv performs a benchmark on how long it takes to decode an inv
// message with the maximum number of entries.
func BenchmarkDecodeInv(b *testing.B) {
// Create a message with the maximum number of entries.
pver := ProtocolVersion
var m MsgInv
for i := 0; i < MaxInvPerMsg; i++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
m.AddInvVect(NewInvVect(InvTypeBlock, hash))
}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := m.KaspaEncode(&bb, pver); err != nil {
b.Fatalf("MsgInv.KaspaEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgInv
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.KaspaDecode(r, pver)
}
}
// BenchmarkDecodeNotFound performs a benchmark on how long it takes to decode
// a notfound message with the maximum number of entries.
func BenchmarkDecodeNotFound(b *testing.B) {
// Create a message with the maximum number of entries.
pver := ProtocolVersion
var m MsgNotFound
for i := 0; i < MaxInvPerMsg; i++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
m.AddInvVect(NewInvVect(InvTypeBlock, hash))
}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := m.KaspaEncode(&bb, pver); err != nil {
b.Fatalf("MsgNotFound.KaspaEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgNotFound
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.KaspaDecode(r, pver)
}
}
// BenchmarkDecodeMerkleBlock performs a benchmark on how long it takes to
// decode a reasonably sized merkleblock message.
func BenchmarkDecodeMerkleBlock(b *testing.B) {
// Create a message with random data.
pver := ProtocolVersion
var m MsgMerkleBlock
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", 10000))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
m.Header = *NewBlockHeader(1, []*daghash.Hash{hash}, hash, hash, hash, 0, uint64(10000))
for i := 0; i < 105; i++ {
hash, err := daghash.NewHashFromStr(fmt.Sprintf("%x", i))
if err != nil {
b.Fatalf("NewHashFromStr: unexpected error: %v", err)
}
m.AddTxHash(hash)
if i%8 == 0 {
m.Flags = append(m.Flags, uint8(i))
}
}
// Serialize it so the bytes are available to test the decode below.
var bb bytes.Buffer
if err := m.KaspaEncode(&bb, pver); err != nil {
b.Fatalf("MsgMerkleBlock.KaspaEncode: unexpected error: %v", err)
}
buf := bb.Bytes()
r := bytes.NewReader(buf)
var msg MsgMerkleBlock
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
msg.KaspaDecode(r, pver)
}
}
// BenchmarkTxHash performs a benchmark on how long it takes to hash a
// transaction.
func BenchmarkTxHash(b *testing.B) {
@@ -625,3 +411,21 @@ func BenchmarkDoubleHashH(b *testing.B) {
_ = daghash.DoubleHashH(txBytes)
}
}
// BenchmarkDoubleHashWriter performs a benchmark on how long it takes to perform
// a double hash via the writer returning a daghash.Hash.
func BenchmarkDoubleHashWriter(b *testing.B) {
var buf bytes.Buffer
err := genesisCoinbaseTx.Serialize(&buf)
if err != nil {
b.Fatalf("Serialize: unexpected error: %+v", err)
}
txBytes := buf.Bytes()
b.ResetTimer()
for i := 0; i < b.N; i++ {
writer := daghash.NewDoubleHashWriter()
_, _ = writer.Write(txBytes)
writer.Finalize()
}
}

View File

@@ -2,14 +2,14 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"fmt"
"io"
"time"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
)
// BaseBlockHeaderPayload is the base number of bytes a block header can be,
@@ -49,7 +49,7 @@ type BlockHeader struct {
UTXOCommitment *daghash.Hash
// Time the block was created.
Timestamp time.Time
Timestamp mstime.Time
// Difficulty target for the block.
Bits uint32
@@ -66,13 +66,18 @@ func (h *BlockHeader) NumParentBlocks() byte {
// BlockHash computes the block identifier hash for the given block header.
func (h *BlockHeader) BlockHash() *daghash.Hash {
// Encode the header and double sha256 everything prior to the number of
// transactions. Ignore the error returns since there is no way the
// encode could fail except being out of memory which would cause a
// run-time panic.
buf := bytes.NewBuffer(make([]byte, 0, BaseBlockHeaderPayload+h.NumParentBlocks()))
_ = writeBlockHeader(buf, 0, h)
// transactions.
writer := daghash.NewDoubleHashWriter()
err := writeBlockHeader(writer, 0, h)
if err != nil {
// It seems like this could only happen if the writer returned an error.
// and this writer should never return an error (no allocations or possible failures)
// the only non-writer error path here is unknown types in `WriteElement`
panic(fmt.Sprintf("BlockHash() failed. this should never fail unless BlockHeader was changed. err: %+v", err))
}
return daghash.DoubleHashP(buf.Bytes())
res := writer.Finalize()
return &res
}
// IsGenesis returns true iff this block is a genesis block
@@ -83,7 +88,7 @@ func (h *BlockHeader) IsGenesis() bool {
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding block headers stored to disk, such as in a
// database, as opposed to decoding block headers from the wire.
// database, as opposed to decoding block headers from the appmessage.
func (h *BlockHeader) KaspaDecode(r io.Reader, pver uint32) error {
return readBlockHeader(r, pver, h)
}
@@ -91,7 +96,7 @@ func (h *BlockHeader) KaspaDecode(r io.Reader, pver uint32) error {
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding block headers to be stored to disk, such as in a
// database, as opposed to encoding block headers for the wire.
// database, as opposed to encoding block headers for the appmessage.
func (h *BlockHeader) KaspaEncode(w io.Writer, pver uint32) error {
return writeBlockHeader(w, pver, h)
}
@@ -100,7 +105,7 @@ func (h *BlockHeader) KaspaEncode(w io.Writer, pver uint32) error {
// that is suitable for long-term storage such as a database while respecting
// the Version field.
func (h *BlockHeader) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the wire encoding
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of readBlockHeader.
return readBlockHeader(r, 0, h)
@@ -110,7 +115,7 @@ func (h *BlockHeader) Deserialize(r io.Reader) error {
// that is suitable for long-term storage such as a database while respecting
// the Version field.
func (h *BlockHeader) Serialize(w io.Writer) error {
// At the current time, there is no difference between the wire encoding
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of writeBlockHeader.
return writeBlockHeader(w, 0, h)
@@ -128,7 +133,7 @@ func (h *BlockHeader) SerializeSize() int {
func NewBlockHeader(version int32, parentHashes []*daghash.Hash, hashMerkleRoot *daghash.Hash,
acceptedIDMerkleRoot *daghash.Hash, utxoCommitment *daghash.Hash, bits uint32, nonce uint64) *BlockHeader {
// Limit the timestamp to one second precision since the protocol
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
return &BlockHeader{
Version: version,
@@ -136,7 +141,7 @@ func NewBlockHeader(version int32, parentHashes []*daghash.Hash, hashMerkleRoot
HashMerkleRoot: hashMerkleRoot,
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
UTXOCommitment: utxoCommitment,
Timestamp: time.Unix(time.Now().Unix(), 0),
Timestamp: mstime.Now(),
Bits: bits,
Nonce: nonce,
}
@@ -144,7 +149,7 @@ func NewBlockHeader(version int32, parentHashes []*daghash.Hash, hashMerkleRoot
// readBlockHeader reads a kaspa block header from r. See Deserialize for
// decoding block headers stored to disk, such as in a database, as opposed to
// decoding from the wire.
// decoding from the appmessage.
func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
var numParentBlocks byte
err := readElements(r, &bh.Version, &numParentBlocks)
@@ -170,9 +175,9 @@ func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
// writeBlockHeader writes a kaspa block header to w. See Serialize for
// encoding block headers to be stored to disk, such as in a database, as
// opposed to encoding for the wire.
// opposed to encoding for the appmessage.
func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error {
sec := int64(bh.Timestamp.Unix())
timestamp := bh.Timestamp.UnixMilliseconds()
if err := writeElements(w, bh.Version, bh.NumParentBlocks()); err != nil {
return err
}
@@ -181,5 +186,5 @@ func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error {
return err
}
}
return writeElements(w, bh.HashMerkleRoot, bh.AcceptedIDMerkleRoot, bh.UTXOCommitment, sec, bh.Bits, bh.Nonce)
return writeElements(w, bh.HashMerkleRoot, bh.AcceptedIDMerkleRoot, bh.UTXOCommitment, timestamp, bh.Bits, bh.Nonce)
}

View File

@@ -2,17 +2,16 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"reflect"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/random"
"reflect"
"testing"
)
// TestBlockHeader tests the BlockHeader API.
@@ -48,9 +47,9 @@ func TestBlockHeader(t *testing.T) {
}
}
// TestBlockHeaderWire tests the BlockHeader wire encode and decode for various
// TestBlockHeaderEncoding tests the BlockHeader appmessage encode and decode for various
// protocol versions.
func TestBlockHeaderWire(t *testing.T) {
func TestBlockHeaderEncoding(t *testing.T) {
nonce := uint64(123123) // 0x000000000001e0f3
pver := ProtocolVersion
@@ -62,12 +61,12 @@ func TestBlockHeaderWire(t *testing.T) {
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment,
Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST
Timestamp: mstime.UnixMilliseconds(0x17315ed0f99),
Bits: bits,
Nonce: nonce,
}
// baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr.
// baseBlockHdrEncoded is the appmessage encoded bytes of baseBlockHdr.
baseBlockHdrEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumParentBlocks
@@ -91,16 +90,16 @@ func TestBlockHeaderWire(t *testing.T) {
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x29, 0xab, 0x5f, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
}
tests := []struct {
in *BlockHeader // Data to encode
out *BlockHeader // Expected decoded data
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded data
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
{
@@ -113,7 +112,7 @@ func TestBlockHeaderWire(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
var buf bytes.Buffer
err := writeBlockHeader(&buf, test.pver, test.in)
if err != nil {
@@ -138,7 +137,7 @@ func TestBlockHeaderWire(t *testing.T) {
continue
}
// Decode the block header from wire format.
// Decode the block header from appmessage format.
var bh BlockHeader
rbuf := bytes.NewReader(test.buf)
err = readBlockHeader(rbuf, test.pver, &bh)
@@ -178,12 +177,12 @@ func TestBlockHeaderSerialize(t *testing.T) {
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment,
Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 12:15:05 -0600 CST
Timestamp: mstime.UnixMilliseconds(0x17315ed0f99),
Bits: bits,
Nonce: nonce,
}
// baseBlockHdrEncoded is the wire encoded bytes of baseBlockHdr.
// baseBlockHdrEncoded is the appmessage encoded bytes of baseBlockHdr.
baseBlockHdrEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumParentBlocks
@@ -207,9 +206,9 @@ func TestBlockHeaderSerialize(t *testing.T) {
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x29, 0xab, 0x5f, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
}
tests := []struct {
@@ -260,7 +259,7 @@ func TestBlockHeaderSerialize(t *testing.T) {
func TestBlockHeaderSerializeSize(t *testing.T) {
nonce := uint64(123123) // 0x1e0f3
bits := uint32(0x1d00ffff)
timestamp := time.Unix(0x495fab29, 0) // 2009-01-03 12:15:05 -0600 CST
timestamp := mstime.UnixMilliseconds(0x495fab29000)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
@@ -308,7 +307,7 @@ func TestBlockHeaderSerializeSize(t *testing.T) {
func TestIsGenesis(t *testing.T) {
nonce := uint64(123123) // 0x1e0f3
bits := uint32(0x1d00ffff)
timestamp := time.Unix(0x495fab29, 0) // 2009-01-03 12:15:05 -0600 CST
timestamp := mstime.UnixMilliseconds(0x495fab29000)
baseBlockHdr := &BlockHeader{
Version: 1,

View File

@@ -2,23 +2,27 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"encoding/binary"
"fmt"
"io"
"math"
"time"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
)
// MaxVarIntPayload is the maximum payload size for a variable length integer.
const MaxVarIntPayload = 9
// MaxInvPerMsg is the maximum number of inventory vectors that can be in any type of kaspa inv message.
const MaxInvPerMsg = 1 << 17
var (
// littleEndian is a convenience variable since binary.LittleEndian is
// quite long.
@@ -34,10 +38,13 @@ var (
var errNonCanonicalVarInt = "non-canonical varint %x - discriminant %x must " +
"encode a value greater than %x"
// int64Time represents a unix timestamp encoded with an int64. It is used as
// a way to signal the readElement function how to decode a timestamp into a Go
// time.Time since it is otherwise ambiguous.
type int64Time time.Time
// errNoEncodingForType signifies that there's no encoding for the given type.
var errNoEncodingForType = errors.New("there's no encoding for this type")
// int64Time represents a unix timestamp with milliseconds precision encoded with
// an int64. It is used as a way to signal the readElement function how to decode
// a timestamp into a Go mstime.Time since it is otherwise ambiguous.
type int64Time mstime.Time
// ReadElement reads the next sequence of bytes from r using little endian
// depending on the concrete type of element pointed to.
@@ -77,6 +84,14 @@ func ReadElement(r io.Reader, element interface{}) error {
*e = rv
return nil
case *uint8:
rv, err := binaryserializer.Uint8(r)
if err != nil {
return err
}
*e = rv
return nil
case *bool:
rv, err := binaryserializer.Uint8(r)
if err != nil {
@@ -95,7 +110,7 @@ func ReadElement(r io.Reader, element interface{}) error {
if err != nil {
return err
}
*e = int64Time(time.Unix(int64(rv), 0))
*e = int64Time(mstime.UnixMilliseconds(int64(rv)))
return nil
// Message header checksum.
@@ -107,11 +122,12 @@ func ReadElement(r io.Reader, element interface{}) error {
return nil
// Message header command.
case *[CommandSize]uint8:
_, err := io.ReadFull(r, e[:])
case *MessageCommand:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = MessageCommand(rv)
return nil
// IP address.
@@ -129,6 +145,9 @@ func ReadElement(r io.Reader, element interface{}) error {
}
return nil
case *id.ID:
return e.Deserialize(r)
case *subnetworkid.SubnetworkID:
_, err := io.ReadFull(r, e[:])
if err != nil {
@@ -144,14 +163,6 @@ func ReadElement(r io.Reader, element interface{}) error {
*e = ServiceFlag(rv)
return nil
case *InvType:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = InvType(rv)
return nil
case *KaspaNet:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
@@ -159,27 +170,9 @@ func ReadElement(r io.Reader, element interface{}) error {
}
*e = KaspaNet(rv)
return nil
case *BloomUpdateType:
rv, err := binaryserializer.Uint8(r)
if err != nil {
return err
}
*e = BloomUpdateType(rv)
return nil
case *RejectCode:
rv, err := binaryserializer.Uint8(r)
if err != nil {
return err
}
*e = RejectCode(rv)
return nil
}
// Fall back to the slower binary.Read if a fast path was not available
// above.
return binary.Read(r, littleEndian, element)
return errors.Wrapf(errNoEncodingForType, "couldn't find a way to read type %T", element)
}
// readElements reads multiple items from r. It is equivalent to multiple
@@ -227,6 +220,13 @@ func WriteElement(w io.Writer, element interface{}) error {
}
return nil
case uint8:
err := binaryserializer.PutUint8(w, e)
if err != nil {
return err
}
return nil
case bool:
var err error
if e {
@@ -248,8 +248,8 @@ func WriteElement(w io.Writer, element interface{}) error {
return nil
// Message header command.
case [CommandSize]uint8:
_, err := w.Write(e[:])
case MessageCommand:
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
@@ -270,6 +270,9 @@ func WriteElement(w io.Writer, element interface{}) error {
}
return nil
case *id.ID:
return e.Serialize(w)
case *subnetworkid.SubnetworkID:
_, err := w.Write(e[:])
if err != nil {
@@ -284,38 +287,15 @@ func WriteElement(w io.Writer, element interface{}) error {
}
return nil
case InvType:
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
return nil
case KaspaNet:
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
return nil
case BloomUpdateType:
err := binaryserializer.PutUint8(w, uint8(e))
if err != nil {
return err
}
return nil
case RejectCode:
err := binaryserializer.PutUint8(w, uint8(e))
if err != nil {
return err
}
return nil
}
// Fall back to the slower binary.Write if a fast path was not available
// above.
return binary.Write(w, littleEndian, element)
return errors.Wrapf(errNoEncodingForType, "couldn't find a way to write type %T", element)
}
// writeElements writes multiple items to w. It is equivalent to multiple
@@ -350,7 +330,7 @@ func ReadVarInt(r io.Reader) (uint64, error) {
// encoded using fewer bytes.
min := uint64(0x100000000)
if rv < min {
return 0, messageError("ReadVarInt", fmt.Sprintf(
return 0, messageError("readVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
@@ -365,7 +345,7 @@ func ReadVarInt(r io.Reader) (uint64, error) {
// encoded using fewer bytes.
min := uint64(0x10000)
if rv < min {
return 0, messageError("ReadVarInt", fmt.Sprintf(
return 0, messageError("readVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
@@ -380,7 +360,7 @@ func ReadVarInt(r io.Reader) (uint64, error) {
// encoded using fewer bytes.
min := uint64(0xfd)
if rv < min {
return 0, messageError("ReadVarInt", fmt.Sprintf(
return 0, messageError("readVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}

View File

@@ -2,10 +2,11 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"github.com/pkg/errors"
"io"
"reflect"
"strings"
@@ -56,15 +57,13 @@ var exampleUTXOCommitment = &daghash.Hash{
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
}
// TestElementWire tests wire encode and decode for various element types. This
// TestElementEncoding tests appmessage encode and decode for various element types. This
// is mainly to test the "fast" paths in readElement and writeElement which use
// type assertions to avoid reflection when possible.
func TestElementWire(t *testing.T) {
type writeElementReflect int32
func TestElementEncoding(t *testing.T) {
tests := []struct {
in interface{} // Value to encode
buf []byte // Wire encoding
buf []byte // Encoded value
}{
{int32(1), []byte{0x01, 0x00, 0x00, 0x00}},
{uint32(256), []byte{0x00, 0x01, 0x00, 0x00}},
@@ -89,13 +88,9 @@ func TestElementWire(t *testing.T) {
[]byte{0x01, 0x02, 0x03, 0x04},
},
{
[CommandSize]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c,
},
MessageCommand(0x10),
[]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c,
0x10, 0x00, 0x00, 0x00,
},
},
{
@@ -126,24 +121,15 @@ func TestElementWire(t *testing.T) {
ServiceFlag(SFNodeNetwork),
[]byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
},
{
InvType(InvTypeTx),
[]byte{0x01, 0x00, 0x00, 0x00},
},
{
KaspaNet(Mainnet),
[]byte{0x1d, 0xf7, 0xdc, 0x3d},
},
// Type not supported by the "fast" path and requires reflection.
{
writeElementReflect(1),
[]byte{0x01, 0x00, 0x00, 0x00},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Write to wire format.
// Write to appmessage format.
var buf bytes.Buffer
err := WriteElement(&buf, test.in)
if err != nil {
@@ -156,7 +142,7 @@ func TestElementWire(t *testing.T) {
continue
}
// Read from wire format.
// Read from appmessage format.
rbuf := bytes.NewReader(test.buf)
val := test.in
if reflect.ValueOf(test.in).Kind() != reflect.Ptr {
@@ -179,9 +165,11 @@ func TestElementWire(t *testing.T) {
}
}
// TestElementWireErrors performs negative tests against wire encode and decode
// TestElementEncodingErrors performs negative tests against appmessage encode and decode
// of various element types to confirm error paths work correctly.
func TestElementWireErrors(t *testing.T) {
func TestElementEncodingErrors(t *testing.T) {
type writeElementReflect int32
tests := []struct {
in interface{} // Value to encode
max int // Max size of fixed buffer to induce errors
@@ -194,10 +182,7 @@ func TestElementWireErrors(t *testing.T) {
{true, 0, io.ErrShortWrite, io.EOF},
{[4]byte{0x01, 0x02, 0x03, 0x04}, 0, io.ErrShortWrite, io.EOF},
{
[CommandSize]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c,
},
MessageCommand(10),
0, io.ErrShortWrite, io.EOF,
},
{
@@ -217,29 +202,30 @@ func TestElementWireErrors(t *testing.T) {
0, io.ErrShortWrite, io.EOF,
},
{ServiceFlag(SFNodeNetwork), 0, io.ErrShortWrite, io.EOF},
{InvType(InvTypeTx), 0, io.ErrShortWrite, io.EOF},
{KaspaNet(Mainnet), 0, io.ErrShortWrite, io.EOF},
// Type with no supported encoding.
{writeElementReflect(0), 0, errNoEncodingForType, errNoEncodingForType},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteElement(w, test.in)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("writeElement #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
// Decode from appmessage format.
r := newFixedReader(test.max, nil)
val := test.in
if reflect.ValueOf(test.in).Kind() != reflect.Ptr {
val = reflect.New(reflect.TypeOf(test.in)).Interface()
}
err = ReadElement(r, val)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("readElement #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -247,43 +233,42 @@ func TestElementWireErrors(t *testing.T) {
}
}
// TestVarIntWire tests wire encode and decode for variable length integers.
func TestVarIntWire(t *testing.T) {
// TestVarIntEncoding tests appmessage encode and decode for variable length integers.
func TestVarIntEncoding(t *testing.T) {
tests := []struct {
in uint64 // Value to encode
out uint64 // Expected decoded value
buf []byte // Wire encoding
value uint64 // Value to encode
buf []byte // Encoded value
}{
// Latest protocol version.
// Single byte
{0, 0, []byte{0x00}},
{0, []byte{0x00}},
// Max single byte
{0xfc, 0xfc, []byte{0xfc}},
{0xfc, []byte{0xfc}},
// Min 2-byte
{0xfd, 0xfd, []byte{0xfd, 0x0fd, 0x00}},
{0xfd, []byte{0xfd, 0x0fd, 0x00}},
// Max 2-byte
{0xffff, 0xffff, []byte{0xfd, 0xff, 0xff}},
{0xffff, []byte{0xfd, 0xff, 0xff}},
// Min 4-byte
{0x10000, 0x10000, []byte{0xfe, 0x00, 0x00, 0x01, 0x00}},
{0x10000, []byte{0xfe, 0x00, 0x00, 0x01, 0x00}},
// Max 4-byte
{0xffffffff, 0xffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff}},
{0xffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff}},
// Min 8-byte
{
0x100000000, 0x100000000,
0x100000000,
[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00},
},
// Max 8-byte
{
0xffffffffffffffff, 0xffffffffffffffff,
0xffffffffffffffff,
[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
var buf bytes.Buffer
err := WriteVarInt(&buf, test.in)
// Encode to appmessage format.
buf := &bytes.Buffer{}
err := WriteVarInt(buf, test.value)
if err != nil {
t.Errorf("WriteVarInt #%d error %v", i, err)
continue
@@ -294,27 +279,27 @@ func TestVarIntWire(t *testing.T) {
continue
}
// Decode from wire format.
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
val, err := ReadVarInt(rbuf)
if err != nil {
t.Errorf("ReadVarInt #%d error %v", i, err)
continue
}
if val != test.out {
t.Errorf("ReadVarInt #%d\n got: %d want: %d", i,
val, test.out)
if val != test.value {
t.Errorf("ReadVarInt #%d\n got: %x want: %x", i,
val, test.value)
continue
}
}
}
// TestVarIntWireErrors performs negative tests against wire encode and decode
// TestVarIntEncodingErrors performs negative tests against appmessage encode and decode
// of variable length integers to confirm error paths work correctly.
func TestVarIntWireErrors(t *testing.T) {
func TestVarIntEncodingErrors(t *testing.T) {
tests := []struct {
in uint64 // Value to encode
buf []byte // Wire encoding
buf []byte // Encoded value
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
@@ -334,19 +319,19 @@ func TestVarIntWireErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteVarInt(w, test.in)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("WriteVarInt #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
// Decode from appmessage format.
r := newFixedReader(test.max, test.buf)
_, err = ReadVarInt(r)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("ReadVarInt #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -362,7 +347,7 @@ func TestVarIntNonCanonical(t *testing.T) {
tests := []struct {
name string // Test name for easier identification
in []byte // Value to decode
pver uint32 // Protocol version for wire encoding
pver uint32 // Protocol version for appmessage encoding
}{
{
"0 encoded with 3 bytes", []byte{0xfd, 0x00, 0x00},
@@ -394,10 +379,10 @@ func TestVarIntNonCanonical(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from wire format.
// Decode from appmessage format.
rbuf := bytes.NewReader(test.in)
val, err := ReadVarInt(rbuf)
if _, ok := err.(*MessageError); !ok {
if msgErr := &(MessageError{}); !errors.As(err, &msgErr) {
t.Errorf("ReadVarInt #%d (%s) unexpected error %v", i,
test.name, err)
continue
@@ -410,7 +395,7 @@ func TestVarIntNonCanonical(t *testing.T) {
}
}
// TestVarIntWire tests the serialize size for variable length integers.
// TestVarIntEncoding tests the serialize size for variable length integers.
func TestVarIntSerializeSize(t *testing.T) {
tests := []struct {
val uint64 // Value to get the serialized size for
@@ -445,8 +430,8 @@ func TestVarIntSerializeSize(t *testing.T) {
}
}
// TestVarStringWire tests wire encode and decode for variable length strings.
func TestVarStringWire(t *testing.T) {
// TestVarStringEncoding tests appmessage encode and decode for variable length strings.
func TestVarStringEncoding(t *testing.T) {
pver := ProtocolVersion
// str256 is a string that takes a 2-byte varint to encode.
@@ -455,8 +440,8 @@ func TestVarStringWire(t *testing.T) {
tests := []struct {
in string // String to encode
out string // String to decoded value
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
// Empty string
@@ -469,7 +454,7 @@ func TestVarStringWire(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
var buf bytes.Buffer
err := WriteVarString(&buf, test.in)
if err != nil {
@@ -482,7 +467,7 @@ func TestVarStringWire(t *testing.T) {
continue
}
// Decode from wire format.
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
val, err := ReadVarString(rbuf, test.pver)
if err != nil {
@@ -497,9 +482,9 @@ func TestVarStringWire(t *testing.T) {
}
}
// TestVarStringWireErrors performs negative tests against wire encode and
// TestVarStringEncodingErrors performs negative tests against appmessage encode and
// decode of variable length strings to confirm error paths work correctly.
func TestVarStringWireErrors(t *testing.T) {
func TestVarStringEncodingErrors(t *testing.T) {
pver := ProtocolVersion
// str256 is a string that takes a 2-byte varint to encode.
@@ -507,8 +492,8 @@ func TestVarStringWireErrors(t *testing.T) {
tests := []struct {
in string // Value to encode
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
@@ -524,19 +509,19 @@ func TestVarStringWireErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteVarString(w, test.in)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("WriteVarString #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
// Decode from appmessage format.
r := newFixedReader(test.max, test.buf)
_, err = ReadVarString(r, test.pver)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("ReadVarString #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -552,8 +537,8 @@ func TestVarStringOverflowErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
err error // Expected error
}{
{[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
@@ -564,7 +549,7 @@ func TestVarStringOverflowErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from wire format.
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
_, err := ReadVarString(rbuf, test.pver)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
@@ -576,8 +561,8 @@ func TestVarStringOverflowErrors(t *testing.T) {
}
// TestVarBytesWire tests wire encode and decode for variable length byte array.
func TestVarBytesWire(t *testing.T) {
// TestVarBytesEncoding tests appmessage encode and decode for variable length byte array.
func TestVarBytesEncoding(t *testing.T) {
pver := ProtocolVersion
// bytes256 is a byte array that takes a 2-byte varint to encode.
@@ -585,8 +570,8 @@ func TestVarBytesWire(t *testing.T) {
tests := []struct {
in []byte // Byte Array to write
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
// Empty byte array
@@ -599,7 +584,7 @@ func TestVarBytesWire(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
var buf bytes.Buffer
err := WriteVarBytes(&buf, test.pver, test.in)
if err != nil {
@@ -612,7 +597,7 @@ func TestVarBytesWire(t *testing.T) {
continue
}
// Decode from wire format.
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
val, err := ReadVarBytes(rbuf, test.pver, MaxMessagePayload,
"test payload")
@@ -628,9 +613,9 @@ func TestVarBytesWire(t *testing.T) {
}
}
// TestVarBytesWireErrors performs negative tests against wire encode and
// TestVarBytesEncodingErrors performs negative tests against appmessage encode and
// decode of variable length byte arrays to confirm error paths work correctly.
func TestVarBytesWireErrors(t *testing.T) {
func TestVarBytesEncodingErrors(t *testing.T) {
pver := ProtocolVersion
// bytes256 is a byte array that takes a 2-byte varint to encode.
@@ -638,8 +623,8 @@ func TestVarBytesWireErrors(t *testing.T) {
tests := []struct {
in []byte // Byte Array to write
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
@@ -655,20 +640,20 @@ func TestVarBytesWireErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteVarBytes(w, test.pver, test.in)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("WriteVarBytes #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
// Decode from appmessage format.
r := newFixedReader(test.max, test.buf)
_, err = ReadVarBytes(r, test.pver, MaxMessagePayload,
"test payload")
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("ReadVarBytes #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -684,8 +669,8 @@ func TestVarBytesOverflowErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
err error // Expected error
}{
{[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
@@ -696,7 +681,7 @@ func TestVarBytesOverflowErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from wire format.
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
_, err := ReadVarBytes(rbuf, test.pver, MaxMessagePayload,
"test payload")

View File

@@ -1,8 +1,8 @@
/*
Package wire implements the kaspa wire protocol.
Package appmessage implements the kaspa appmessage protocol.
At a high level, this package provides support for marshalling and unmarshalling
supported kaspa messages to and from the wire. This package does not deal
supported kaspa messages to and from the appmessage. This package does not deal
with the specifics of message handling such as what to do when a message is
received. This provides the caller with a high level of flexibility.
@@ -19,7 +19,7 @@ Message which allows messages of any type to be read, written, or passed around
through channels, functions, etc. In addition, concrete implementations of most
of the currently supported kaspa messages are provided. For these supported
messages, all of the details of marshalling and unmarshalling to and from the
wire using kaspa encoding are handled so the caller doesn't have to concern
appmessage using kaspa encoding are handled so the caller doesn't have to concern
themselves with the specifics.
Message Interaction
@@ -37,7 +37,7 @@ interactions in no particular order.
Peer A Sends Peer B Responds
----------------------------------------------------------------------------
getaddr message (MsgGetAddr) addr message (MsgAddr)
getaddr message (MsgRequestAddresses) addr message (MsgAddresses)
getblockinvs message (MsgGetBlockInvs) inv message (MsgInv)
inv message (MsgInv) getdata message (MsgGetData)
getdata message (MsgGetData) block message (MsgBlock) -or-
@@ -55,7 +55,7 @@ Protocol Version
The protocol version should be negotiated with the remote peer at a higher
level than this package via the version (MsgVersion) message exchange, however,
this package provides the wire.ProtocolVersion constant which indicates the
this package provides the appmessage.ProtocolVersion constant which indicates the
latest protocol version this package supports and is typically the value to use
for all outbound connections before a potentially lower protocol version is
negotiated.
@@ -66,11 +66,10 @@ The kaspa network is a magic number which is used to identify the start of a
message and which kaspa network the message applies to. This package provides
the following constants:
wire.Mainnet
wire.Testnet (Test network)
wire.Regtest (Regression test network)
wire.Simnet (Simulation test network)
wire.Devnet (Development network)
appmessage.Mainnet
appmessage.Testnet (Test network)
appmessage.Simnet (Simulation test network)
appmessage.Devnet (Development network)
Determining Message Type
@@ -82,43 +81,43 @@ switch or type assertion. An example of a type switch follows:
// Assumes msg is already a valid concrete message such as one created
// via NewMsgVersion or read via ReadMessage.
switch msg := msg.(type) {
case *wire.MsgVersion:
case *appmessage.MsgVersion:
// The message is a pointer to a MsgVersion struct.
fmt.Printf("Protocol version: %d", msg.ProtocolVersion)
case *wire.MsgBlock:
case *appmessage.MsgBlock:
// The message is a pointer to a MsgBlock struct.
fmt.Printf("Number of tx in block: %d", msg.Header.TxnCount)
}
Reading Messages
In order to unmarshall kaspa messages from the wire, use the ReadMessage
In order to unmarshall kaspa messages from the appmessage, use the ReadMessage
function. It accepts any io.Reader, but typically this will be a net.Conn to
a remote node running a kaspa peer. Example syntax is:
// Reads and validates the next kaspa message from conn using the
// protocol version pver and the kaspa network kaspaNet. The returns
// are a wire.Message, a []byte which contains the unmarshalled
// are a appmessage.Message, a []byte which contains the unmarshalled
// raw payload, and a possible error.
msg, rawPayload, err := wire.ReadMessage(conn, pver, kaspaNet)
msg, rawPayload, err := appmessage.ReadMessage(conn, pver, kaspaNet)
if err != nil {
// Log and handle the error
}
Writing Messages
In order to marshall kaspa messages to the wire, use the WriteMessage
In order to marshall kaspa messages to the appmessage, use the WriteMessage
function. It accepts any io.Writer, but typically this will be a net.Conn to
a remote node running a kaspa peer. Example syntax to request addresses
from a remote peer is:
// Create a new getaddr kaspa message.
msg := wire.NewMsgGetAddr()
msg := appmessage.NewMsgRequestAddresses()
// Writes a kaspa message msg to conn using the protocol version
// pver, and the kaspa network kaspaNet. The return is a possible
// error.
err := wire.WriteMessage(conn, msg, pver, kaspaNet)
err := appmessage.WriteMessage(conn, msg, pver, kaspaNet)
if err != nil {
// Log and handle the error
}
@@ -127,8 +126,8 @@ Errors
Errors returned by this package are either the raw errors provided by underlying
calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and
io.ErrShortWrite, or of type wire.MessageError. This allows the caller to
io.ErrShortWrite, or of type appmessage.MessageError. This allows the caller to
differentiate between general IO errors and malformed messages through type
assertions.
*/
package wire
package appmessage

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"fmt"

View File

@@ -2,20 +2,20 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import "io"
// fakeMessage implements the Message interface and is used to force encode
// errors in messages.
type fakeMessage struct {
command string
command MessageCommand
payload []byte
forceEncodeErr bool
forceLenErr bool
}
// KaspaDecode doesn't do anything. It just satisfies the wire.Message
// KaspaDecode doesn't do anything. It just satisfies the appmessage.Message
// interface.
func (msg *fakeMessage) KaspaDecode(r io.Reader, pver uint32) error {
return nil
@@ -23,7 +23,7 @@ func (msg *fakeMessage) KaspaDecode(r io.Reader, pver uint32) error {
// KaspaEncode writes the payload field of the fake message or forces an error
// if the forceEncodeErr flag of the fake message is set. It also satisfies the
// wire.Message interface.
// appmessage.Message interface.
func (msg *fakeMessage) KaspaEncode(w io.Writer, pver uint32) error {
if msg.forceEncodeErr {
err := &MessageError{
@@ -39,7 +39,7 @@ func (msg *fakeMessage) KaspaEncode(w io.Writer, pver uint32) error {
// Command returns the command field of the fake message and satisfies the
// Message interface.
func (msg *fakeMessage) Command() string {
func (msg *fakeMessage) Command() MessageCommand {
return msg.command
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"

89
app/appmessage/message.go Normal file
View File

@@ -0,0 +1,89 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"fmt"
"time"
)
// MaxMessagePayload is the maximum bytes a message can be regardless of other
// individual limits imposed by messages themselves.
const MaxMessagePayload = (1024 * 1024 * 32) // 32MB
// MessageCommand is a number in the header of a message that represents its type.
type MessageCommand uint32
func (cmd MessageCommand) String() string {
cmdString, ok := MessageCommandToString[cmd]
if !ok {
cmdString = "unknown command"
}
return fmt.Sprintf("%s [code %d]", cmdString, uint8(cmd))
}
// Commands used in kaspa message headers which describe the type of message.
const (
CmdVersion MessageCommand = iota
CmdVerAck
CmdRequestAddresses
CmdAddresses
CmdRequestIBDBlocks
CmdBlock
CmdTx
CmdPing
CmdPong
CmdRequestBlockLocator
CmdBlockLocator
CmdSelectedTip
CmdRequestSelectedTip
CmdInvRelayBlock
CmdRequestRelayBlocks
CmdInvTransaction
CmdRequestTransactions
CmdIBDBlock
CmdRequestNextIBDBlocks
CmdDoneIBDBlocks
CmdTransactionNotFound
CmdReject
)
// MessageCommandToString maps all MessageCommands to their string representation
var MessageCommandToString = map[MessageCommand]string{
CmdVersion: "Version",
CmdVerAck: "VerAck",
CmdRequestAddresses: "RequestAddresses",
CmdAddresses: "Addresses",
CmdRequestIBDBlocks: "RequestBlocks",
CmdBlock: "Block",
CmdTx: "Tx",
CmdPing: "Ping",
CmdPong: "Pong",
CmdRequestBlockLocator: "RequestBlockLocator",
CmdBlockLocator: "BlockLocator",
CmdSelectedTip: "SelectedTip",
CmdRequestSelectedTip: "RequestSelectedTip",
CmdInvRelayBlock: "InvRelayBlock",
CmdRequestRelayBlocks: "RequestRelayBlocks",
CmdInvTransaction: "InvTransaction",
CmdRequestTransactions: "RequestTransactions",
CmdIBDBlock: "IBDBlock",
CmdRequestNextIBDBlocks: "RequestNextIBDBlocks",
CmdDoneIBDBlocks: "DoneIBDBlocks",
CmdTransactionNotFound: "TransactionNotFound",
CmdReject: "Reject",
}
// Message is an interface that describes a kaspa message. A type that
// implements Message has complete control over the representation of its data
// and may therefore contain additional or fewer fields than those which
// are used directly in the protocol encoded message.
type Message interface {
Command() MessageCommand
MessageNumber() uint64
SetMessageNumber(index uint64)
ReceivedAt() time.Time
SetReceivedAt(receivedAt time.Time)
}

View File

@@ -0,0 +1,75 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"fmt"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// MaxAddressesPerMsg is the maximum number of addresses that can be in a single
// kaspa Addresses message (MsgAddresses).
const MaxAddressesPerMsg = 1000
// MsgAddresses implements the Message interface and represents a kaspa
// Addresses message. It is used to provide a list of known active peers on the
// network. An active peer is considered one that has transmitted a message
// within the last 3 hours. Nodes which have not transmitted in that time
// frame should be forgotten. Each message is limited to a maximum number of
// addresses, which is currently 1000. As a result, multiple messages must
// be used to relay the full list.
//
// Use the AddAddress function to build up the list of known addresses when
// sending an Addresses message to another peer.
type MsgAddresses struct {
baseMessage
IncludeAllSubnetworks bool
SubnetworkID *subnetworkid.SubnetworkID
AddrList []*NetAddress
}
// AddAddress adds a known active peer to the message.
func (msg *MsgAddresses) AddAddress(na *NetAddress) error {
if len(msg.AddrList)+1 > MaxAddressesPerMsg {
str := fmt.Sprintf("too many addresses in message [max %d]",
MaxAddressesPerMsg)
return messageError("MsgAddresses.AddAddress", str)
}
msg.AddrList = append(msg.AddrList, na)
return nil
}
// AddAddresses adds multiple known active peers to the message.
func (msg *MsgAddresses) AddAddresses(netAddrs ...*NetAddress) error {
for _, na := range netAddrs {
err := msg.AddAddress(na)
if err != nil {
return err
}
}
return nil
}
// ClearAddresses removes all addresses from the message.
func (msg *MsgAddresses) ClearAddresses() {
msg.AddrList = []*NetAddress{}
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgAddresses) Command() MessageCommand {
return CmdAddresses
}
// NewMsgAddresses returns a new kaspa Addresses message that conforms to the
// Message interface. See MsgAddresses for details.
func NewMsgAddresses(includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) *MsgAddresses {
return &MsgAddresses{
IncludeAllSubnetworks: includeAllSubnetworks,
SubnetworkID: subnetworkID,
AddrList: make([]*NetAddress, 0, MaxAddressesPerMsg),
}
}

View File

@@ -0,0 +1,58 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"net"
"testing"
"github.com/davecgh/go-spew/spew"
)
// TestAddresses tests the MsgAddresses API.
func TestAddresses(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(3)
msg := NewMsgAddresses(false, nil)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgAddresses: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure NetAddresses are added properly.
tcpAddr := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
na := NewNetAddress(tcpAddr, SFNodeNetwork)
err := msg.AddAddress(na)
if err != nil {
t.Errorf("AddAddress: %v", err)
}
if msg.AddrList[0] != na {
t.Errorf("AddAddress: wrong address added - got %v, want %v",
spew.Sprint(msg.AddrList[0]), spew.Sprint(na))
}
// Ensure the address list is cleared properly.
msg.ClearAddresses()
if len(msg.AddrList) != 0 {
t.Errorf("ClearAddresses: address list is not empty - "+
"got %v [%v], want %v", len(msg.AddrList),
spew.Sprint(msg.AddrList[0]), 0)
}
// Ensure adding more than the max allowed addresses per message returns
// error.
for i := 0; i < MaxAddressesPerMsg+1; i++ {
err = msg.AddAddress(na)
}
if err == nil {
t.Errorf("AddAddress: expected error on too many addresses " +
"not received")
}
err = msg.AddAddresses(na)
if err == nil {
t.Errorf("AddAddresses: expected error on too many addresses " +
"not received")
}
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
@@ -27,9 +27,9 @@ const MaxMassPerBlock = 10000000
// MaxMassPerTx is the maximum total mass a transaction may have.
const MaxMassPerTx = MaxMassPerBlock / 2
// maxTxPerBlock is the maximum number of transactions that could
// MaxTxPerBlock is the maximum number of transactions that could
// possibly fit into a block.
const maxTxPerBlock = (MaxMassPerBlock / minTxPayload) + 1
const MaxTxPerBlock = (MaxMassPerBlock / minTxPayload) + 1
// TxLoc holds locator data for the offset and length of where a transaction is
// located within a MsgBlock data buffer.
@@ -42,6 +42,7 @@ type TxLoc struct {
// block message. It is used to deliver block and transaction information in
// response to a getdata message (MsgGetData) for a given block hash.
type MsgBlock struct {
baseMessage
Header BlockHeader
Transactions []*MsgTx
}
@@ -59,7 +60,7 @@ func (msg *MsgBlock) ClearTransactions() {
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding blocks stored to disk, such as in a database, as
// opposed to decoding blocks from the wire.
// opposed to decoding blocks from the appmessage.
func (msg *MsgBlock) KaspaDecode(r io.Reader, pver uint32) error {
err := readBlockHeader(r, pver, &msg.Header)
if err != nil {
@@ -74,9 +75,9 @@ func (msg *MsgBlock) KaspaDecode(r io.Reader, pver uint32) error {
// Prevent more transactions than could possibly fit into a block.
// It would be possible to cause memory exhaustion and panics without
// a sane upper bound on this count.
if txCount > maxTxPerBlock {
if txCount > MaxTxPerBlock {
str := fmt.Sprintf("too many transactions to fit into a block "+
"[count %d, max %d]", txCount, maxTxPerBlock)
"[count %d, max %d]", txCount, MaxTxPerBlock)
return messageError("MsgBlock.KaspaDecode", str)
}
@@ -96,14 +97,14 @@ func (msg *MsgBlock) KaspaDecode(r io.Reader, pver uint32) error {
// Deserialize decodes a block from r into the receiver using a format that is
// suitable for long-term storage such as a database while respecting the
// Version field in the block. This function differs from KaspaDecode in that
// KaspaDecode decodes from the kaspa wire protocol as it was sent across the
// network. The wire encoding can technically differ depending on the protocol
// KaspaDecode decodes from the kaspa appmessage protocol as it was sent across the
// network. The appmessage encoding can technically differ depending on the protocol
// version and doesn't even really need to match the format of a stored block at
// all. As of the time this comment was written, the encoded block is the same
// in both instances, but there is a distinct difference and separating the two
// allows the API to be flexible enough to deal with changes.
func (msg *MsgBlock) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the wire encoding
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaDecode.
return msg.KaspaDecode(r, 0)
@@ -116,9 +117,9 @@ func (msg *MsgBlock) Deserialize(r io.Reader) error {
func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) {
fullLen := r.Len()
// At the current time, there is no difference between the wire encoding
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of existing wire protocol functions.
// a result, make use of existing appmessage protocol functions.
err := readBlockHeader(r, 0, &msg.Header)
if err != nil {
return nil, err
@@ -132,9 +133,9 @@ func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) {
// Prevent more transactions than could possibly fit into a block.
// It would be possible to cause memory exhaustion and panics without
// a sane upper bound on this count.
if txCount > maxTxPerBlock {
if txCount > MaxTxPerBlock {
str := fmt.Sprintf("too many transactions to fit into a block "+
"[count %d, max %d]", txCount, maxTxPerBlock)
"[count %d, max %d]", txCount, MaxTxPerBlock)
return nil, messageError("MsgBlock.DeserializeTxLoc", str)
}
@@ -159,7 +160,7 @@ func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) {
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding blocks to be stored to disk, such as in a
// database, as opposed to encoding blocks for the wire.
// database, as opposed to encoding blocks for the appmessage.
func (msg *MsgBlock) KaspaEncode(w io.Writer, pver uint32) error {
err := writeBlockHeader(w, pver, &msg.Header)
if err != nil {
@@ -184,14 +185,14 @@ func (msg *MsgBlock) KaspaEncode(w io.Writer, pver uint32) error {
// Serialize encodes the block to w using a format that suitable for long-term
// storage such as a database while respecting the Version field in the block.
// This function differs from KaspaEncode in that KaspaEncode encodes the block to
// the kaspa wire protocol in order to be sent across the network. The wire
// the kaspa appmessage protocol in order to be sent across the network. The appmessage
// encoding can technically differ depending on the protocol version and doesn't
// even really need to match the format of a stored block at all. As of the
// time this comment was written, the encoded block is the same in both
// instances, but there is a distinct difference and separating the two allows
// the API to be flexible enough to deal with changes.
func (msg *MsgBlock) Serialize(w io.Writer) error {
// At the current time, there is no difference between the wire encoding
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaEncode.
return msg.KaspaEncode(w, 0)
@@ -213,7 +214,7 @@ func (msg *MsgBlock) SerializeSize() int {
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgBlock) Command() string {
func (msg *MsgBlock) Command() MessageCommand {
return CmdBlock
}

View File

@@ -2,17 +2,17 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
"reflect"
"testing"
"time"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
@@ -32,7 +32,7 @@ func TestBlock(t *testing.T) {
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
// Ensure the command is expected value.
wantCmd := "block"
wantCmd := MessageCommand(5)
msg := NewMsgBlock(bh)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgBlock: wrong command - got %v want %v",
@@ -74,7 +74,7 @@ func TestBlock(t *testing.T) {
// TestBlockHash tests the ability to generate the hash of a block accurately.
func TestBlockHash(t *testing.T) {
// Block 1 hash.
hashStr := "22e72b61a572bfb32b8739f4e5db3eebe9870e394288467958e93e4a3d3b749f"
hashStr := "55d71bd49a8233bc9f0edbcbd0ad5d3eaebffe1fc6a6443a1c1f310fd02c11a5"
wantHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
@@ -144,15 +144,15 @@ func TestConvertToPartial(t *testing.T) {
}
}
// TestBlockWire tests the MsgBlock wire encode and decode for various numbers
// TestBlockEncoding tests the MsgBlock appmessage encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestBlockWire(t *testing.T) {
func TestBlockEncoding(t *testing.T) {
tests := []struct {
in *MsgBlock // Message to encode
out *MsgBlock // Expected decoded message
buf []byte // Wire encoding
buf []byte // Encoded value
txLocs []TxLoc // Expected transaction locations
pver uint32 // Protocol version for wire encoding
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
{
@@ -166,7 +166,7 @@ func TestBlockWire(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to wire format.
// Encode the message to appmessage format.
var buf bytes.Buffer
err := test.in.KaspaEncode(&buf, test.pver)
if err != nil {
@@ -179,7 +179,7 @@ func TestBlockWire(t *testing.T) {
continue
}
// Decode the message from wire format.
// Decode the message from appmessage format.
var msg MsgBlock
rbuf := bytes.NewReader(test.buf)
err = msg.KaspaDecode(rbuf, test.pver)
@@ -195,15 +195,15 @@ func TestBlockWire(t *testing.T) {
}
}
// TestBlockWireErrors performs negative tests against wire encode and decode
// TestBlockEncodingErrors performs negative tests against appmessage encode and decode
// of MsgBlock to confirm error paths work correctly.
func TestBlockWireErrors(t *testing.T) {
func TestBlockEncodingErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
in *MsgBlock // Value to encode
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
@@ -236,20 +236,20 @@ func TestBlockWireErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := test.in.KaspaEncode(w, test.pver)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("KaspaEncode #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
// Decode from appmessage format.
var msg MsgBlock
r := newFixedReader(test.max, test.buf)
err = msg.KaspaDecode(r, test.pver)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -324,7 +324,7 @@ func TestBlockSerialize(t *testing.T) {
}
}
// TestBlockSerializeErrors performs negative tests against wire encode and
// TestBlockSerializeErrors performs negative tests against appmessage encode and
// decode of MsgBlock to confirm error paths work correctly.
func TestBlockSerializeErrors(t *testing.T) {
tests := []struct {
@@ -365,7 +365,7 @@ func TestBlockSerializeErrors(t *testing.T) {
// Serialize the block.
w := newFixedWriter(test.max)
err := test.in.Serialize(w)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("Serialize #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
@@ -375,7 +375,7 @@ func TestBlockSerializeErrors(t *testing.T) {
var block MsgBlock
r := newFixedReader(test.max, test.buf)
err = block.Deserialize(r)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -384,7 +384,7 @@ func TestBlockSerializeErrors(t *testing.T) {
var txLocBlock MsgBlock
br := bytes.NewBuffer(test.buf[0:test.max])
_, err = txLocBlock.DeserializeTxLoc(br)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("DeserializeTxLoc #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -400,8 +400,8 @@ func TestBlockOverflowErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
err error // Expected error
}{
// Block that claims to have ~uint64(0) transactions.
@@ -431,7 +431,7 @@ func TestBlockOverflowErrors(t *testing.T) {
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x61, 0xbc, 0x66, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // TxnCount
}, pver, &MessageError{},
@@ -440,7 +440,7 @@ func TestBlockOverflowErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from wire format.
// Decode from appmessage format.
var msg MsgBlock
r := bytes.NewReader(test.buf)
err := msg.KaspaDecode(r, test.pver)
@@ -450,7 +450,7 @@ func TestBlockOverflowErrors(t *testing.T) {
continue
}
// Deserialize from wire format.
// Deserialize from appmessage format.
r = bytes.NewReader(test.buf)
err = msg.Deserialize(r)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
@@ -459,7 +459,7 @@ func TestBlockOverflowErrors(t *testing.T) {
continue
}
// Deserialize with transaction location info from wire format.
// Deserialize with transaction location info from appmessage format.
br := bytes.NewBuffer(test.buf)
_, err = msg.DeserializeTxLoc(br)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
@@ -507,9 +507,9 @@ var blockOne = MsgBlock{
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment,
Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST
Bits: 0x1d00ffff, // 486604799
Nonce: 0x9962e301, // 2573394689
Timestamp: mstime.UnixMilliseconds(0x17315ed0f99),
Bits: 0x1d00ffff, // 486604799
Nonce: 0x9962e301, // 2573394689
},
Transactions: []*MsgTx{
NewNativeMsgTx(1,
@@ -570,9 +570,9 @@ var blockOneBytes = []byte{
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x61, 0xbc, 0x66, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce. TODO: (Ori) Replace to a real nonce
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0x01, // TxnCount
0x01, 0x00, 0x00, 0x00, // Version
0x01, // Varint for number of transaction inputs

View File

@@ -0,0 +1,31 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MaxBlockLocatorsPerMsg is the maximum number of block locator hashes allowed
// per message.
const MaxBlockLocatorsPerMsg = 500
// MsgBlockLocator implements the Message interface and represents a kaspa
// locator message. It is used to find the blockLocator of a peer that is
// syncing with you.
type MsgBlockLocator struct {
baseMessage
BlockLocatorHashes []*daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgBlockLocator) Command() MessageCommand {
return CmdBlockLocator
}
// NewMsgBlockLocator returns a new kaspa locator message that conforms to
// the Message interface. See MsgBlockLocator for details.
func NewMsgBlockLocator(locatorHashes []*daghash.Hash) *MsgBlockLocator {
return &MsgBlockLocator{
BlockLocatorHashes: locatorHashes,
}
}

View File

@@ -0,0 +1,34 @@
package appmessage
import (
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestBlockLocator tests the MsgBlockLocator API.
func TestBlockLocator(t *testing.T) {
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
locatorHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
msg := NewMsgBlockLocator([]*daghash.Hash{locatorHash})
// Ensure the command is expected value.
wantCmd := MessageCommand(10)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgBlockLocator: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure block locator hashes are added properly.
if msg.BlockLocatorHashes[0] != locatorHash {
t.Errorf("AddBlockLocatorHash: wrong block locator added - "+
"got %v, want %v",
spew.Sprint(msg.BlockLocatorHashes[0]),
spew.Sprint(locatorHash))
}
}

View File

@@ -0,0 +1,22 @@
package appmessage
// MsgDoneIBDBlocks implements the Message interface and represents a kaspa
// DoneIBDBlocks message. It is used to notify the IBD syncing peer that the
// syncer sent all the requested blocks.
//
// This message has no payload.
type MsgDoneIBDBlocks struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgDoneIBDBlocks) Command() MessageCommand {
return CmdDoneIBDBlocks
}
// NewMsgDoneIBDBlocks returns a new kaspa DoneIBDBlocks message that conforms to the
// Message interface.
func NewMsgDoneIBDBlocks() *MsgDoneIBDBlocks {
return &MsgDoneIBDBlocks{}
}

View File

@@ -0,0 +1,31 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
// MsgIBDBlock implements the Message interface and represents a kaspa
// ibdblock message. It is used to deliver block and transaction information in
// response to a RequestIBDBlocks message (MsgRequestIBDBlocks).
type MsgIBDBlock struct {
baseMessage
*MsgBlock
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgIBDBlock) Command() MessageCommand {
return CmdIBDBlock
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver. This is part of the Message interface implementation.
func (msg *MsgIBDBlock) MaxPayloadLength(pver uint32) uint32 {
return MaxMessagePayload
}
// NewMsgIBDBlock returns a new kaspa ibdblock message that conforms to the
// Message interface. See MsgIBDBlock for details.
func NewMsgIBDBlock(msgBlock *MsgBlock) *MsgIBDBlock {
return &MsgIBDBlock{MsgBlock: msgBlock}
}

View File

@@ -0,0 +1,118 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"bytes"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
)
// TestIBDBlock tests the MsgIBDBlock API.
func TestIBDBlock(t *testing.T) {
pver := ProtocolVersion
// Block 1 header.
parentHashes := blockOne.Header.ParentHashes
hashMerkleRoot := blockOne.Header.HashMerkleRoot
acceptedIDMerkleRoot := blockOne.Header.AcceptedIDMerkleRoot
utxoCommitment := blockOne.Header.UTXOCommitment
bits := blockOne.Header.Bits
nonce := blockOne.Header.Nonce
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
// Ensure the command is expected value.
wantCmd := MessageCommand(17)
msg := NewMsgIBDBlock(NewMsgBlock(bh))
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgIBDBlock: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure max payload is expected value for latest protocol version.
wantPayload := uint32(1024 * 1024 * 32)
maxPayload := msg.MaxPayloadLength(pver)
if maxPayload != wantPayload {
t.Errorf("MaxPayloadLength: wrong max payload length for "+
"protocol version %d - got %v, want %v", pver,
maxPayload, wantPayload)
}
// Ensure we get the same block header data back out.
if !reflect.DeepEqual(&msg.Header, bh) {
t.Errorf("NewMsgIBDBlock: wrong block header - got %v, want %v",
spew.Sdump(&msg.Header), spew.Sdump(bh))
}
// Ensure transactions are added properly.
tx := blockOne.Transactions[0].Copy()
msg.AddTransaction(tx)
if !reflect.DeepEqual(msg.Transactions, blockOne.Transactions) {
t.Errorf("AddTransaction: wrong transactions - got %v, want %v",
spew.Sdump(msg.Transactions),
spew.Sdump(blockOne.Transactions))
}
// Ensure transactions are properly cleared.
msg.ClearTransactions()
if len(msg.Transactions) != 0 {
t.Errorf("ClearTransactions: wrong transactions - got %v, want %v",
len(msg.Transactions), 0)
}
}
// TestIBDBlockEncoding tests the MsgIBDBlock appmessage encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestIBDBlockEncoding(t *testing.T) {
tests := []struct {
in *MsgIBDBlock // Message to encode
out *MsgIBDBlock // Expected decoded message
buf []byte // Encoded value
txLocs []TxLoc // Expected transaction locations
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
{
&MsgIBDBlock{MsgBlock: &blockOne},
&MsgIBDBlock{MsgBlock: &blockOne},
blockOneBytes,
blockOneTxLocs,
ProtocolVersion,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to appmessage format.
var buf bytes.Buffer
err := test.in.KaspaEncode(&buf, test.pver)
if err != nil {
t.Errorf("KaspaEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("KaspaEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the message from appmessage format.
var msg MsgIBDBlock
msg.MsgBlock = new(MsgBlock)
rbuf := bytes.NewReader(test.buf)
err = msg.KaspaDecode(rbuf, test.pver)
if err != nil {
t.Errorf("KaspaDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&msg, test.out) {
t.Errorf("KaspaDecode #%d\n got: %s want: %s", i,
spew.Sdump(&msg), spew.Sdump(test.out))
continue
}
}
}

View File

@@ -0,0 +1,27 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgInvRelayBlock implements the Message interface and represents a kaspa
// block inventory message. It is used to notify the network about new block
// by sending their hash, and let the receiving node decide if it needs it.
type MsgInvRelayBlock struct {
baseMessage
Hash *daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgInvRelayBlock) Command() MessageCommand {
return CmdInvRelayBlock
}
// NewMsgInvBlock returns a new kaspa invrelblk message that conforms to
// the Message interface. See MsgInvRelayBlock for details.
func NewMsgInvBlock(hash *daghash.Hash) *MsgInvRelayBlock {
return &MsgInvRelayBlock{
Hash: hash,
}
}

View File

@@ -0,0 +1,31 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MaxInvPerTxInvMsg is the maximum number of hashes that can
// be in a single CmdInvTransaction message.
const MaxInvPerTxInvMsg = MaxInvPerMsg
// MsgInvTransaction implements the Message interface and represents a kaspa
// TxInv message. It is used to notify the network about new transactions
// by sending their ID, and let the receiving node decide if it needs it.
type MsgInvTransaction struct {
baseMessage
TxIDs []*daghash.TxID
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgInvTransaction) Command() MessageCommand {
return CmdInvTransaction
}
// NewMsgInvTransaction returns a new kaspa TxInv message that conforms to
// the Message interface. See MsgInvTransaction for details.
func NewMsgInvTransaction(ids []*daghash.TxID) *MsgInvTransaction {
return &MsgInvTransaction{
TxIDs: ids,
}
}

View File

@@ -2,11 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"io"
)
package appmessage
// MsgPing implements the Message interface and represents a kaspa ping
// message.
@@ -20,46 +16,18 @@ import (
// The payload for this message just consists of a nonce used for identifying
// it later.
type MsgPing struct {
baseMessage
// Unique value associated with message that is used to identify
// specific ping message.
Nonce uint64
}
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
func (msg *MsgPing) KaspaDecode(r io.Reader, pver uint32) error {
err := ReadElement(r, &msg.Nonce)
if err != nil {
return err
}
return nil
}
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
func (msg *MsgPing) KaspaEncode(w io.Writer, pver uint32) error {
err := WriteElement(w, msg.Nonce)
if err != nil {
return err
}
return nil
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgPing) Command() string {
func (msg *MsgPing) Command() MessageCommand {
return CmdPing
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver. This is part of the Message interface implementation.
func (msg *MsgPing) MaxPayloadLength(pver uint32) uint32 {
// Nonce 8 bytes.
return uint32(8)
}
// NewMsgPing returns a new kaspa ping message that conforms to the Message
// interface. See MsgPing for details.
func NewMsgPing(nonce uint64) *MsgPing {

View File

@@ -0,0 +1,32 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/random"
)
// TestPing tests the MsgPing API against the latest protocol version.
func TestPing(t *testing.T) {
// Ensure we get the same nonce back out.
nonce, err := random.Uint64()
if err != nil {
t.Errorf("random.Uint64: Error generating nonce: %v", err)
}
msg := NewMsgPing(nonce)
if msg.Nonce != nonce {
t.Errorf("NewMsgPing: wrong nonce - got %v, want %v",
msg.Nonce, nonce)
}
// Ensure the command is expected value.
wantCmd := MessageCommand(7)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgPing: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -2,11 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"io"
)
package appmessage
// MsgPong implements the Message interface and represents a kaspa pong
// message which is used primarily to confirm that a connection is still valid
@@ -14,36 +10,18 @@ import (
//
// This message was not added until protocol versions AFTER BIP0031Version.
type MsgPong struct {
baseMessage
// Unique value associated with message that is used to identify
// specific ping message.
Nonce uint64
}
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
func (msg *MsgPong) KaspaDecode(r io.Reader, pver uint32) error {
return ReadElement(r, &msg.Nonce)
}
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
func (msg *MsgPong) KaspaEncode(w io.Writer, pver uint32) error {
return WriteElement(w, msg.Nonce)
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgPong) Command() string {
func (msg *MsgPong) Command() MessageCommand {
return CmdPong
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver. This is part of the Message interface implementation.
func (msg *MsgPong) MaxPayloadLength(pver uint32) uint32 {
// Nonce 8 bytes.
return uint32(8)
}
// NewMsgPong returns a new kaspa pong message that conforms to the Message
// interface. See MsgPong for details.
func NewMsgPong(nonce uint64) *MsgPong {

View File

@@ -0,0 +1,31 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/random"
)
// TestPongLatest tests the MsgPong API against the latest protocol version.
func TestPongLatest(t *testing.T) {
nonce, err := random.Uint64()
if err != nil {
t.Errorf("random.Uint64: error generating nonce: %v", err)
}
msg := NewMsgPong(nonce)
if msg.Nonce != nonce {
t.Errorf("NewMsgPong: wrong nonce - got %v, want %v",
msg.Nonce, nonce)
}
// Ensure the command is expected value.
wantCmd := MessageCommand(8)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgPong: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,22 @@
package appmessage
// MsgReject implements the Message interface and represents a kaspa
// Reject message. It is used to notify peers why they are banned.
type MsgReject struct {
baseMessage
Reason string
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgReject) Command() MessageCommand {
return CmdReject
}
// NewMsgReject returns a new kaspa Reject message that conforms to the
// Message interface.
func NewMsgReject(reason string) *MsgReject {
return &MsgReject{
Reason: reason,
}
}

View File

@@ -0,0 +1,36 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// MsgRequestAddresses implements the Message interface and represents a kaspa
// RequestAddresses message. It is used to request a list of known active peers on the
// network from a peer to help identify potential nodes. The list is returned
// via one or more addr messages (MsgAddresses).
//
// This message has no payload.
type MsgRequestAddresses struct {
baseMessage
IncludeAllSubnetworks bool
SubnetworkID *subnetworkid.SubnetworkID
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestAddresses) Command() MessageCommand {
return CmdRequestAddresses
}
// NewMsgRequestAddresses returns a new kaspa RequestAddresses message that conforms to the
// Message interface. See MsgRequestAddresses for details.
func NewMsgRequestAddresses(includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) *MsgRequestAddresses {
return &MsgRequestAddresses{
IncludeAllSubnetworks: includeAllSubnetworks,
SubnetworkID: subnetworkID,
}
}

View File

@@ -0,0 +1,20 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
)
// TestRequestAddresses tests the MsgRequestAddresses API.
func TestRequestAddresses(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(2)
msg := NewMsgRequestAddresses(false, nil)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequestAddresses: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,31 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgRequestBlockLocator implements the Message interface and represents a kaspa
// RequestBlockLocator message. It is used to request a block locator between high
// and low hash.
// The locator is returned via a locator message (MsgBlockLocator).
type MsgRequestBlockLocator struct {
baseMessage
HighHash *daghash.Hash
LowHash *daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestBlockLocator) Command() MessageCommand {
return CmdRequestBlockLocator
}
// NewMsgRequestBlockLocator returns a new RequestBlockLocator message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequestBlockLocator(highHash, lowHash *daghash.Hash) *MsgRequestBlockLocator {
return &MsgRequestBlockLocator{
HighHash: highHash,
LowHash: lowHash,
}
}

View File

@@ -0,0 +1,24 @@
package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestRequestBlockLocator tests the MsgRequestBlockLocator API.
func TestRequestBlockLocator(t *testing.T) {
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
highHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
// Ensure the command is expected value.
wantCmd := MessageCommand(9)
msg := NewMsgRequestBlockLocator(highHash, &daghash.ZeroHash)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequestBlockLocator: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,34 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgRequestIBDBlocks implements the Message interface and represents a kaspa
// RequestIBDBlocks message. It is used to request a list of blocks starting after the
// low hash and until the high hash.
type MsgRequestIBDBlocks struct {
baseMessage
LowHash *daghash.Hash
HighHash *daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestIBDBlocks) Command() MessageCommand {
return CmdRequestIBDBlocks
}
// NewMsgRequstIBDBlocks returns a new kaspa RequestIBDBlocks message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequstIBDBlocks(lowHash, highHash *daghash.Hash) *MsgRequestIBDBlocks {
return &MsgRequestIBDBlocks{
LowHash: lowHash,
HighHash: highHash,
}
}

View File

@@ -0,0 +1,40 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestRequstIBDBlocks tests the MsgRequestIBDBlocks API.
func TestRequstIBDBlocks(t *testing.T) {
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
lowHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
hashStr = "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
highHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
// Ensure we get the same data back out.
msg := NewMsgRequstIBDBlocks(lowHash, highHash)
if !msg.HighHash.IsEqual(highHash) {
t.Errorf("NewMsgRequstIBDBlocks: wrong high hash - got %v, want %v",
msg.HighHash, highHash)
}
// Ensure the command is expected value.
wantCmd := MessageCommand(4)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequstIBDBlocks: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,22 @@
package appmessage
// MsgRequestNextIBDBlocks implements the Message interface and represents a kaspa
// RequestNextIBDBlocks message. It is used to notify the IBD syncer peer to send
// more blocks.
//
// This message has no payload.
type MsgRequestNextIBDBlocks struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestNextIBDBlocks) Command() MessageCommand {
return CmdRequestNextIBDBlocks
}
// NewMsgRequestNextIBDBlocks returns a new kaspa RequestNextIBDBlocks message that conforms to the
// Message interface.
func NewMsgRequestNextIBDBlocks() *MsgRequestNextIBDBlocks {
return &MsgRequestNextIBDBlocks{}
}

View File

@@ -0,0 +1,31 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgRequestRelayBlocksHashes is the maximum number of hashes that can
// be in a single RequestRelayBlocks message.
const MsgRequestRelayBlocksHashes = MaxInvPerMsg
// MsgRequestRelayBlocks implements the Message interface and represents a kaspa
// RequestRelayBlocks message. It is used to request blocks as part of the block
// relay protocol.
type MsgRequestRelayBlocks struct {
baseMessage
Hashes []*daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestRelayBlocks) Command() MessageCommand {
return CmdRequestRelayBlocks
}
// NewMsgRequestRelayBlocks returns a new kaspa RequestRelayBlocks message that conforms to
// the Message interface. See MsgRequestRelayBlocks for details.
func NewMsgRequestRelayBlocks(hashes []*daghash.Hash) *MsgRequestRelayBlocks {
return &MsgRequestRelayBlocks{
Hashes: hashes,
}
}

View File

@@ -0,0 +1,21 @@
package appmessage
// MsgRequestSelectedTip implements the Message interface and represents a kaspa
// RequestSelectedTip message. It is used to request the selected tip of another peer.
//
// This message has no payload.
type MsgRequestSelectedTip struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestSelectedTip) Command() MessageCommand {
return CmdRequestSelectedTip
}
// NewMsgRequestSelectedTip returns a new kaspa RequestSelectedTip message that conforms to the
// Message interface.
func NewMsgRequestSelectedTip() *MsgRequestSelectedTip {
return &MsgRequestSelectedTip{}
}

View File

@@ -0,0 +1,20 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
)
// TestRequestSelectedTip tests the MsgRequestSelectedTip API.
func TestRequestSelectedTip(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(12)
msg := NewMsgRequestSelectedTip()
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequestSelectedTip: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,31 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MaxInvPerRequestTransactionsMsg is the maximum number of hashes that can
// be in a single CmdInvTransaction message.
const MaxInvPerRequestTransactionsMsg = MaxInvPerMsg
// MsgRequestTransactions implements the Message interface and represents a kaspa
// RequestTransactions message. It is used to request transactions as part of the
// transactions relay protocol.
type MsgRequestTransactions struct {
baseMessage
IDs []*daghash.TxID
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestTransactions) Command() MessageCommand {
return CmdRequestTransactions
}
// NewMsgRequestTransactions returns a new kaspa RequestTransactions message that conforms to
// the Message interface. See MsgRequestTransactions for details.
func NewMsgRequestTransactions(ids []*daghash.TxID) *MsgRequestTransactions {
return &MsgRequestTransactions{
IDs: ids,
}
}

View File

@@ -0,0 +1,28 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgSelectedTip implements the Message interface and represents a kaspa
// selectedtip message. It is used to answer getseltip messages and tell
// the asking peer what is the selected tip of this peer.
type MsgSelectedTip struct {
baseMessage
// The selected tip hash of the generator of the message.
SelectedTipHash *daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgSelectedTip) Command() MessageCommand {
return CmdSelectedTip
}
// NewMsgSelectedTip returns a new kaspa selectedtip message that conforms to the
// Message interface.
func NewMsgSelectedTip(selectedTipHash *daghash.Hash) *MsgSelectedTip {
return &MsgSelectedTip{
SelectedTipHash: selectedTipHash,
}
}

View File

@@ -0,0 +1,18 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
"testing"
)
// TestSelectedTip tests the MsgSelectedTip API.
func TestSelectedTip(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(11)
msg := NewMsgSelectedTip(&daghash.ZeroHash)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgSelectedTip: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,30 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgTransactionNotFound defines a kaspa TransactionNotFound message which is sent in response to
// a RequestTransactions message if any of the requested data in not available on the peer.
type MsgTransactionNotFound struct {
baseMessage
ID *daghash.TxID
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgTransactionNotFound) Command() MessageCommand {
return CmdTransactionNotFound
}
// NewMsgTransactionNotFound returns a new kaspa transactionsnotfound message that conforms to the
// Message interface. See MsgTransactionNotFound for details.
func NewMsgTransactionNotFound(id *daghash.TxID) *MsgTransactionNotFound {
return &MsgTransactionNotFound{
ID: id,
}
}

View File

@@ -2,10 +2,9 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"encoding/binary"
"fmt"
"io"
@@ -44,11 +43,11 @@ const (
SequenceLockTimeMask = 0x0000ffff
// SequenceLockTimeGranularity is the defined time based granularity
// for seconds-based relative time locks. When converting from seconds
// for milliseconds-based relative time locks. When converting from milliseconds
// to a sequence number, the value is right shifted by this amount,
// therefore the granularity of relative time locks in 512 or 2^9
// seconds. Enforced relative lock times are multiples of 512 seconds.
SequenceLockTimeGranularity = 9
// therefore the granularity of relative time locks in 524288 or 2^19
// seconds. Enforced relative lock times are multiples of 524288 milliseconds.
SequenceLockTimeGranularity = 19
// defaultTxInOutAlloc is the default size used for the backing array for
// transaction inputs and outputs. The array will dynamically grow as needed,
@@ -84,7 +83,7 @@ const (
minTxPayload = 10
// freeListMaxScriptSize is the size of each buffer in the free list
// that is used for deserializing scripts from the wire before they are
// that is used for deserializing scripts from the appmessage before they are
// concatenated into a single contiguous buffers. This value was chosen
// because it is slightly more than twice the size of the vast majority
// of all "standard" scripts. Larger scripts are still deserialized
@@ -268,6 +267,7 @@ func NewTxOut(value uint64, scriptPubKey []byte) *TxOut {
// Use the AddTxIn and AddTxOut functions to build up the list of transaction
// inputs and outputs.
type MsgTx struct {
baseMessage
Version int32
TxIn []*TxIn
TxOut []*TxOut
@@ -301,29 +301,34 @@ func (msg *MsgTx) IsCoinBase() bool {
// TxHash generates the Hash for the transaction.
func (msg *MsgTx) TxHash() *daghash.Hash {
// Encode the transaction and calculate double sha256 on the result.
// Ignore the error returns since the only way the encode could fail
// is being out of memory or due to nil pointers, both of which would
// cause a run-time panic.
buf := bytes.NewBuffer(make([]byte, 0, msg.serializeSize(txEncodingExcludePayload)))
_ = msg.serialize(buf, txEncodingExcludePayload)
writer := daghash.NewDoubleHashWriter()
err := msg.serialize(writer, txEncodingExcludePayload)
if err != nil {
// this writer never return errors (no allocations or possible failures) so errors can only come from validity checks,
// and we assume we never construct malformed transactions.
panic(fmt.Sprintf("TxHash() failed. this should never fail for structurally-valid transactions. err: %+v", err))
}
hash := daghash.Hash(daghash.DoubleHashH(buf.Bytes()))
hash := writer.Finalize()
return &hash
}
// TxID generates the Hash for the transaction without the signature script, gas and payload fields.
func (msg *MsgTx) TxID() *daghash.TxID {
// Encode the transaction, replace signature script with zeroes, cut off
// payload and calculate double sha256 on the result. Ignore the error
// returns since the only way the encode could fail is being out of memory or
// due to nil pointers, both of which would cause a run-time panic.
// payload and calculate double sha256 on the result.
var encodingFlags txEncoding
if !msg.IsCoinBase() {
encodingFlags = txEncodingExcludeSignatureScript | txEncodingExcludePayload
}
buf := bytes.NewBuffer(make([]byte, 0, msg.serializeSize(encodingFlags)))
_ = msg.serialize(buf, encodingFlags)
txID := daghash.TxID(daghash.DoubleHashH(buf.Bytes()))
writer := daghash.NewDoubleHashWriter()
err := msg.serialize(writer, encodingFlags)
if err != nil {
// this writer never return errors (no allocations or possible failures) so errors can only come from validity checks,
// and we assume we never construct malformed transactions.
panic(fmt.Sprintf("TxID() failed. this should never fail for structurally-valid transactions. err: %+v", err))
}
txID := daghash.TxID(writer.Finalize())
return &txID
}
@@ -401,7 +406,7 @@ func (msg *MsgTx) Copy() *MsgTx {
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding transactions stored to disk, such as in a
// database, as opposed to decoding transactions from the wire.
// database, as opposed to decoding transactions from the appmessage.
func (msg *MsgTx) KaspaDecode(r io.Reader, pver uint32) error {
version, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
@@ -530,6 +535,10 @@ func (msg *MsgTx) KaspaDecode(r io.Reader, pver uint32) error {
msg.Payload = make([]byte, payloadLength)
_, err = io.ReadFull(r, msg.Payload)
if err != nil {
returnScriptBuffers()
return err
}
}
// Create a single allocation to house all of the scripts and set each
@@ -587,15 +596,15 @@ func (msg *MsgTx) KaspaDecode(r io.Reader, pver uint32) error {
// Deserialize decodes a transaction from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field in the transaction. This function differs from KaspaDecode
// in that KaspaDecode decodes from the kaspa wire protocol as it was sent
// across the network. The wire encoding can technically differ depending on
// in that KaspaDecode decodes from the kaspa appmessage protocol as it was sent
// across the network. The appmessage encoding can technically differ depending on
// the protocol version and doesn't even really need to match the format of a
// stored transaction at all. As of the time this comment was written, the
// encoded transaction is the same in both instances, but there is a distinct
// difference and separating the two allows the API to be flexible enough to
// deal with changes.
func (msg *MsgTx) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the wire encoding
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaDecode.
return msg.KaspaDecode(r, 0)
@@ -604,7 +613,7 @@ func (msg *MsgTx) Deserialize(r io.Reader) error {
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding transactions to be stored to disk, such as in a
// database, as opposed to encoding transactions for the wire.
// database, as opposed to encoding transactions for the appmessage.
func (msg *MsgTx) KaspaEncode(w io.Writer, pver uint32) error {
return msg.encode(w, pver, txEncodingFull)
}
@@ -693,22 +702,22 @@ func (msg *MsgTx) encode(w io.Writer, pver uint32, encodingFlags txEncoding) err
// Serialize encodes the transaction to w using a format that suitable for
// long-term storage such as a database while respecting the Version field in
// the transaction. This function differs from KaspaEncode in that KaspaEncode
// encodes the transaction to the kaspa wire protocol in order to be sent
// across the network. The wire encoding can technically differ depending on
// encodes the transaction to the kaspa appmessage protocol in order to be sent
// across the network. The appmessage encoding can technically differ depending on
// the protocol version and doesn't even really need to match the format of a
// stored transaction at all. As of the time this comment was written, the
// encoded transaction is the same in both instances, but there is a distinct
// difference and separating the two allows the API to be flexible enough to
// deal with changes.
func (msg *MsgTx) Serialize(w io.Writer) error {
// At the current time, there is no difference between the wire encoding
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaEncode.
return msg.KaspaEncode(w, 0)
}
func (msg *MsgTx) serialize(w io.Writer, encodingFlags txEncoding) error {
// At the current time, there is no difference between the wire encoding
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of `encode`.
return msg.encode(w, 0, encodingFlags)
@@ -758,7 +767,7 @@ func (msg *MsgTx) serializeSize(encodingFlags txEncoding) int {
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgTx) Command() string {
func (msg *MsgTx) Command() MessageCommand {
return CmdTx
}

View File

@@ -2,11 +2,12 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"io"
"math"
"reflect"
@@ -29,10 +30,10 @@ func TestTx(t *testing.T) {
}
// Ensure the command is expected value.
wantCmd := "tx"
wantCmd := MessageCommand(6)
msg := NewNativeMsgTx(1, nil, nil)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgAddr: wrong command - got %v want %v",
t.Errorf("NewMsgAddresses: wrong command - got %v want %v",
cmd, wantCmd)
}
@@ -191,7 +192,7 @@ func TestTxHashAndID(t *testing.T) {
return
}
payload := []byte{1, 2, 3}
txIns := []*TxIn{&TxIn{
txIns := []*TxIn{{
PreviousOutpoint: Outpoint{
Index: 0,
TxID: daghash.TxID{1, 2, 3},
@@ -252,9 +253,9 @@ func TestTxHashAndID(t *testing.T) {
}
}
// TestTxWire tests the MsgTx wire encode and decode for various numbers
// TestTxEncoding tests the MsgTx appmessage encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestTxWire(t *testing.T) {
func TestTxEncoding(t *testing.T) {
// Empty tx message.
noTx := NewNativeMsgTx(1, nil, nil)
noTxEncoded := []byte{
@@ -270,8 +271,8 @@ func TestTxWire(t *testing.T) {
tests := []struct {
in *MsgTx // Message to encode
out *MsgTx // Expected decoded message
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version with no transactions.
{
@@ -292,7 +293,7 @@ func TestTxWire(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to wire format.
// Encode the message to appmessage format.
var buf bytes.Buffer
err := test.in.KaspaEncode(&buf, test.pver)
if err != nil {
@@ -305,7 +306,7 @@ func TestTxWire(t *testing.T) {
continue
}
// Decode the message from wire format.
// Decode the message from appmessage format.
var msg MsgTx
rbuf := bytes.NewReader(test.buf)
err = msg.KaspaDecode(rbuf, test.pver)
@@ -321,15 +322,15 @@ func TestTxWire(t *testing.T) {
}
}
// TestTxWireErrors performs negative tests against wire encode and decode
// TestTxEncodingErrors performs negative tests against appmessage encode and decode
// of MsgTx to confirm error paths work correctly.
func TestTxWireErrors(t *testing.T) {
func TestTxEncodingErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
in *MsgTx // Value to encode
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
@@ -362,20 +363,20 @@ func TestTxWireErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to wire format.
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := test.in.KaspaEncode(w, test.pver)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("KaspaEncode #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from wire format.
// Decode from appmessage format.
var msg MsgTx
r := newFixedReader(test.max, test.buf)
err = msg.KaspaDecode(r, test.pver)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -527,7 +528,7 @@ func TestTxSerialize(t *testing.T) {
}
}
// TestTxSerializeErrors performs negative tests against wire encode and decode
// TestTxSerializeErrors performs negative tests against appmessage encode and decode
// of MsgTx to confirm error paths work correctly.
func TestTxSerializeErrors(t *testing.T) {
tests := []struct {
@@ -568,7 +569,7 @@ func TestTxSerializeErrors(t *testing.T) {
// Serialize the transaction.
w := newFixedWriter(test.max)
err := test.in.Serialize(w)
if err != test.writeErr {
if !errors.Is(err, test.writeErr) {
t.Errorf("Serialize #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
@@ -578,7 +579,7 @@ func TestTxSerializeErrors(t *testing.T) {
var tx MsgTx
r := newFixedReader(test.max, test.buf)
err = tx.Deserialize(r)
if err != test.readErr {
if !errors.Is(err, test.readErr) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
@@ -627,8 +628,8 @@ func TestTxOverflowErrors(t *testing.T) {
txVer := uint32(1)
tests := []struct {
buf []byte // Wire encoding
pver uint32 // Protocol version for wire encoding
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
version uint32 // Transaction version
err error // Expected error
}{
@@ -690,7 +691,7 @@ func TestTxOverflowErrors(t *testing.T) {
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from wire format.
// Decode from appmessage format.
var msg MsgTx
r := bytes.NewReader(test.buf)
err := msg.KaspaDecode(r, test.pver)
@@ -700,7 +701,7 @@ func TestTxOverflowErrors(t *testing.T) {
continue
}
// Decode from wire format.
// Decode from appmessage format.
r = bytes.NewReader(test.buf)
err = msg.Deserialize(r)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
@@ -890,7 +891,7 @@ var multiTxOuts = []*TxOut{
}
var multiTx = NewNativeMsgTx(1, multiTxIns, multiTxOuts)
// multiTxEncoded is the wire encoded bytes for multiTx using protocol version
// multiTxEncoded is the appmessage encoded bytes for multiTx using protocol version
// 60002 and is used in the various tests.
var multiTxEncoded = []byte{
0x01, 0x00, 0x00, 0x00, // Version

View File

@@ -0,0 +1,26 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
// MsgVerAck defines a kaspa verack message which is used for a peer to
// acknowledge a version message (MsgVersion) after it has used the information
// to negotiate parameters. It implements the Message interface.
//
// This message has no payload.
type MsgVerAck struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgVerAck) Command() MessageCommand {
return CmdVerAck
}
// NewMsgVerAck returns a new kaspa verack message that conforms to the
// Message interface.
func NewMsgVerAck() *MsgVerAck {
return &MsgVerAck{}
}

View File

@@ -0,0 +1,20 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
)
// TestVerAck tests the MsgVerAck API.
func TestVerAck(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(1)
msg := NewMsgVerAck()
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgVerAck: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -0,0 +1,130 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"fmt"
"github.com/kaspanet/kaspad/version"
"strings"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// MaxUserAgentLen is the maximum allowed length for the user agent field in a
// version message (MsgVersion).
const MaxUserAgentLen = 256
// DefaultUserAgent for appmessage in the stack
var DefaultUserAgent = fmt.Sprintf("/kaspad:%s/", version.Version())
// MsgVersion implements the Message interface and represents a kaspa version
// message. It is used for a peer to advertise itself as soon as an outbound
// connection is made. The remote peer then uses this information along with
// its own to negotiate. The remote peer must then respond with a version
// message of its own containing the negotiated values followed by a verack
// message (MsgVerAck). This exchange must take place before any further
// communication is allowed to proceed.
type MsgVersion struct {
baseMessage
// Version of the protocol the node is using.
ProtocolVersion uint32
// The peer's network (mainnet, testnet, etc.)
Network string
// Bitfield which identifies the enabled services.
Services ServiceFlag
// Time the message was generated. This is encoded as an int64 on the appmessage.
Timestamp mstime.Time
// Address of the local peer.
Address *NetAddress
// The peer unique ID
ID *id.ID
// The user agent that generated messsage. This is a encoded as a varString
// on the appmessage. This has a max length of MaxUserAgentLen.
UserAgent string
// The selected tip hash of the generator of the version message.
SelectedTipHash *daghash.Hash
// Don't announce transactions to peer.
DisableRelayTx bool
// The subnetwork of the generator of the version message. Should be nil in full nodes
SubnetworkID *subnetworkid.SubnetworkID
}
// HasService returns whether the specified service is supported by the peer
// that generated the message.
func (msg *MsgVersion) HasService(service ServiceFlag) bool {
return msg.Services&service == service
}
// AddService adds service as a supported service by the peer generating the
// message.
func (msg *MsgVersion) AddService(service ServiceFlag) {
msg.Services |= service
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgVersion) Command() MessageCommand {
return CmdVersion
}
// NewMsgVersion returns a new kaspa version message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
selectedTipHash *daghash.Hash, subnetworkID *subnetworkid.SubnetworkID) *MsgVersion {
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
return &MsgVersion{
ProtocolVersion: ProtocolVersion,
Network: network,
Services: 0,
Timestamp: mstime.Now(),
Address: addr,
ID: id,
UserAgent: DefaultUserAgent,
SelectedTipHash: selectedTipHash,
DisableRelayTx: false,
SubnetworkID: subnetworkID,
}
}
// ValidateUserAgent checks userAgent length against MaxUserAgentLen
func ValidateUserAgent(userAgent string) error {
if len(userAgent) > MaxUserAgentLen {
str := fmt.Sprintf("user agent too long [len %d, max %d]",
len(userAgent), MaxUserAgentLen)
return messageError("MsgVersion", str)
}
return nil
}
// AddUserAgent adds a user agent to the user agent string for the version
// message. The version string is not defined to any strict format, although
// it is recommended to use the form "major.minor.revision" e.g. "2.6.41".
func (msg *MsgVersion) AddUserAgent(name string, version string,
comments ...string) {
newUserAgent := fmt.Sprintf("%s:%s", name, version)
if len(comments) != 0 {
newUserAgent = fmt.Sprintf("%s(%s)", newUserAgent,
strings.Join(comments, "; "))
}
newUserAgent = fmt.Sprintf("%s%s/", msg.UserAgent, newUserAgent)
msg.UserAgent = newUserAgent
}

View File

@@ -0,0 +1,96 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/daghash"
"net"
"reflect"
"testing"
)
// TestVersion tests the MsgVersion API.
func TestVersion(t *testing.T) {
pver := ProtocolVersion
// Create version message data.
selectedTipHash := &daghash.Hash{12, 34}
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
me := NewNetAddress(tcpAddrMe, SFNodeNetwork)
generatedID, err := id.GenerateID()
if err != nil {
t.Fatalf("id.GenerateID: %s", err)
}
// Ensure we get the correct data back out.
msg := NewMsgVersion(me, generatedID, "mainnet", selectedTipHash, nil)
if msg.ProtocolVersion != pver {
t.Errorf("NewMsgVersion: wrong protocol version - got %v, want %v",
msg.ProtocolVersion, pver)
}
if !reflect.DeepEqual(msg.Address, me) {
t.Errorf("NewMsgVersion: wrong me address - got %v, want %v",
spew.Sdump(&msg.Address), spew.Sdump(me))
}
if msg.ID.String() != generatedID.String() {
t.Errorf("NewMsgVersion: wrong nonce - got %s, want %s",
msg.ID, generatedID)
}
if msg.UserAgent != DefaultUserAgent {
t.Errorf("NewMsgVersion: wrong user agent - got %v, want %v",
msg.UserAgent, DefaultUserAgent)
}
if !msg.SelectedTipHash.IsEqual(selectedTipHash) {
t.Errorf("NewMsgVersion: wrong selected tip hash - got %s, want %s",
msg.SelectedTipHash, selectedTipHash)
}
if msg.DisableRelayTx {
t.Errorf("NewMsgVersion: disable relay tx is not false by "+
"default - got %v, want %v", msg.DisableRelayTx, false)
}
msg.AddUserAgent("myclient", "1.2.3", "optional", "comments")
customUserAgent := DefaultUserAgent + "myclient:1.2.3(optional; comments)/"
if msg.UserAgent != customUserAgent {
t.Errorf("AddUserAgent: wrong user agent - got %s, want %s",
msg.UserAgent, customUserAgent)
}
msg.AddUserAgent("mygui", "3.4.5")
customUserAgent += "mygui:3.4.5/"
if msg.UserAgent != customUserAgent {
t.Errorf("AddUserAgent: wrong user agent - got %s, want %s",
msg.UserAgent, customUserAgent)
}
// Version message should not have any services set by default.
if msg.Services != 0 {
t.Errorf("NewMsgVersion: wrong default services - got %v, want %v",
msg.Services, 0)
}
if msg.HasService(SFNodeNetwork) {
t.Errorf("HasService: SFNodeNetwork service is set")
}
// Ensure the command is expected value.
wantCmd := MessageCommand(0)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgVersion: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure adding the full service node flag works.
msg.AddService(SFNodeNetwork)
if msg.Services != SFNodeNetwork {
t.Errorf("AddService: wrong services - got %v, want %v",
msg.Services, SFNodeNetwork)
}
if !msg.HasService(SFNodeNetwork) {
t.Errorf("HasService: SFNodeNetwork service not set")
}
}

View File

@@ -0,0 +1,74 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/util/mstime"
"net"
)
// NetAddress defines information about a peer on the network including the time
// it was last seen, the services it supports, its IP address, and port.
type NetAddress struct {
// Last time the address was seen.
Timestamp mstime.Time
// Bitfield which identifies the services supported by the address.
Services ServiceFlag
// IP address of the peer.
IP net.IP
// Port the peer is using. This is encoded in big endian on the appmessage
// which differs from most everything else.
Port uint16
}
// HasService returns whether the specified service is supported by the address.
func (na *NetAddress) HasService(service ServiceFlag) bool {
return na.Services&service == service
}
// AddService adds service as a supported service by the peer generating the
// message.
func (na *NetAddress) AddService(service ServiceFlag) {
na.Services |= service
}
// TCPAddress converts the NetAddress to *net.TCPAddr
func (na *NetAddress) TCPAddress() *net.TCPAddr {
return &net.TCPAddr{
IP: na.IP,
Port: int(na.Port),
}
}
// NewNetAddressIPPort returns a new NetAddress using the provided IP, port, and
// supported services with defaults for the remaining fields.
func NewNetAddressIPPort(ip net.IP, port uint16, services ServiceFlag) *NetAddress {
return NewNetAddressTimestamp(mstime.Now(), services, ip, port)
}
// NewNetAddressTimestamp returns a new NetAddress using the provided
// timestamp, IP, port, and supported services. The timestamp is rounded to
// single millisecond precision.
func NewNetAddressTimestamp(
timestamp mstime.Time, services ServiceFlag, ip net.IP, port uint16) *NetAddress {
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
na := NetAddress{
Timestamp: timestamp,
Services: services,
IP: ip,
Port: port,
}
return &na
}
// NewNetAddress returns a new NetAddress using the provided TCP address and
// supported services with defaults for the remaining fields.
func NewNetAddress(addr *net.TCPAddr, services ServiceFlag) *NetAddress {
return NewNetAddressIPPort(addr.IP, uint16(addr.Port), services)
}

View File

@@ -0,0 +1,45 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"net"
"testing"
)
// TestNetAddress tests the NetAddress API.
func TestNetAddress(t *testing.T) {
ip := net.ParseIP("127.0.0.1")
port := 16111
// Test NewNetAddress.
na := NewNetAddress(&net.TCPAddr{IP: ip, Port: port}, 0)
// Ensure we get the same ip, port, and services back out.
if !na.IP.Equal(ip) {
t.Errorf("NetNetAddress: wrong ip - got %v, want %v", na.IP, ip)
}
if na.Port != uint16(port) {
t.Errorf("NetNetAddress: wrong port - got %v, want %v", na.Port,
port)
}
if na.Services != 0 {
t.Errorf("NetNetAddress: wrong services - got %v, want %v",
na.Services, 0)
}
if na.HasService(SFNodeNetwork) {
t.Errorf("HasService: SFNodeNetwork service is set")
}
// Ensure adding the full service node flag works.
na.AddService(SFNodeNetwork)
if na.Services != SFNodeNetwork {
t.Errorf("AddService: wrong services - got %v, want %v",
na.Services, SFNodeNetwork)
}
if !na.HasService(SFNodeNetwork) {
t.Errorf("HasService: SFNodeNetwork service not set")
}
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import (
"fmt"
@@ -10,10 +10,13 @@ import (
"strings"
)
// XXX pedro: we will probably need to bump this.
const (
// ProtocolVersion is the latest protocol version this package supports.
ProtocolVersion uint32 = 1
// DefaultServices describes the default services that are supported by
// the server.
DefaultServices = SFNodeNetwork | SFNodeBloom | SFNodeCF
)
// ServiceFlag identifies services supported by a kaspa peer.
@@ -103,9 +106,6 @@ const (
// Testnet represents the test network.
Testnet KaspaNet = 0xddb8af8f
// Regtest represents the regression test network.
Regtest KaspaNet = 0xf396cdd6
// Simnet represents the simulation test network.
Simnet KaspaNet = 0x374dcf1c
@@ -118,7 +118,6 @@ const (
var bnStrings = map[KaspaNet]string{
Mainnet: "Mainnet",
Testnet: "Testnet",
Regtest: "Regtest",
Simnet: "Simnet",
Devnet: "Devnet",
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
package appmessage
import "testing"
@@ -40,7 +40,6 @@ func TestKaspaNetStringer(t *testing.T) {
want string
}{
{Mainnet, "Mainnet"},
{Regtest, "Regtest"},
{Testnet, "Testnet"},
{Simnet, "Simnet"},
{0xffffffff, "Unknown KaspaNet (4294967295)"},

14
app/log.go Normal file
View File

@@ -0,0 +1,14 @@
// Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2017 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package app
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.KASD)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -0,0 +1,64 @@
// Copyright (c) 2015-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blocklogger
import (
"github.com/kaspanet/kaspad/util/mstime"
"sync"
"time"
"github.com/kaspanet/kaspad/util"
)
var (
receivedLogBlocks int64
receivedLogTx int64
lastBlockLogTime = mstime.Now()
mtx sync.Mutex
)
// LogBlock logs a new block blue score as an information message
// to show progress to the user. In order to prevent spam, it limits logging to
// one message every 10 seconds with duration and totals included.
func LogBlock(block *util.Block) error {
mtx.Lock()
defer mtx.Unlock()
receivedLogBlocks++
receivedLogTx += int64(len(block.MsgBlock().Transactions))
now := mstime.Now()
duration := now.Sub(lastBlockLogTime)
if duration < time.Second*10 {
return nil
}
// Truncate the duration to 10s of milliseconds.
tDuration := duration.Round(10 * time.Millisecond)
// Log information about new block blue score.
blockStr := "blocks"
if receivedLogBlocks == 1 {
blockStr = "block"
}
txStr := "transactions"
if receivedLogTx == 1 {
txStr = "transaction"
}
blueScore, err := block.BlueScore()
if err != nil {
return err
}
log.Infof("Processed %d %s in the last %s (%d %s, blue score %d, %s)",
receivedLogBlocks, blockStr, tDuration, receivedLogTx,
txStr, blueScore, block.MsgBlock().Header.Timestamp)
receivedLogBlocks = 0
receivedLogTx = 0
lastBlockLogTime = now
return nil
}

View File

@@ -0,0 +1,11 @@
// Copyright (c) 2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blocklogger
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log, _ = logger.Get(logger.SubsystemTags.PROT)

View File

@@ -0,0 +1,14 @@
package common
import (
"time"
"github.com/pkg/errors"
)
// DefaultTimeout is the default duration to wait for enqueuing/dequeuing
// to/from routes.
const DefaultTimeout = 30 * time.Second
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")

View File

@@ -0,0 +1,10 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
)
// AddressManager returns the address manager associated to the flow context.
func (f *FlowContext) AddressManager() *addressmanager.AddressManager {
return f.addressManager
}

View File

@@ -0,0 +1,74 @@
package flowcontext
import (
"sync/atomic"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
)
// OnNewBlock updates the mempool after a new block arrival, and
// relays newly unorphaned transactions and possibly rebroadcast
// manually added transactions when not in IBD.
func (f *FlowContext) OnNewBlock(block *util.Block) error {
transactionsAcceptedToMempool, err := f.txPool.HandleNewBlock(block)
if err != nil {
return err
}
return f.broadcastTransactionsAfterBlockAdded(block, transactionsAcceptedToMempool)
}
func (f *FlowContext) broadcastTransactionsAfterBlockAdded(block *util.Block, transactionsAcceptedToMempool []*util.Tx) error {
f.updateTransactionsToRebroadcast(block)
// Don't relay transactions when in IBD.
if atomic.LoadUint32(&f.isInIBD) != 0 {
return nil
}
var txIDsToRebroadcast []*daghash.TxID
if f.shouldRebroadcastTransactions() {
txIDsToRebroadcast = f.txIDsToRebroadcast()
}
txIDsToBroadcast := make([]*daghash.TxID, len(transactionsAcceptedToMempool)+len(txIDsToRebroadcast))
for i, tx := range transactionsAcceptedToMempool {
txIDsToBroadcast[i] = tx.ID()
}
offset := len(transactionsAcceptedToMempool)
for i, txID := range txIDsToRebroadcast {
txIDsToBroadcast[offset+i] = txID
}
if len(txIDsToBroadcast) == 0 {
return nil
}
if len(txIDsToBroadcast) > appmessage.MaxInvPerTxInvMsg {
txIDsToBroadcast = txIDsToBroadcast[:appmessage.MaxInvPerTxInvMsg]
}
inv := appmessage.NewMsgInvTransaction(txIDsToBroadcast)
return f.Broadcast(inv)
}
// SharedRequestedBlocks returns a *blockrelay.SharedRequestedBlocks for sharing
// data about requested blocks between different peers.
func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks {
return f.sharedRequestedBlocks
}
// AddBlock adds the given block to the DAG and propagates it.
func (f *FlowContext) AddBlock(block *util.Block, flags blockdag.BehaviorFlags) error {
_, _, err := f.DAG().ProcessBlock(block, flags)
if err != nil {
return err
}
err = f.OnNewBlock(block)
if err != nil {
return err
}
return f.Broadcast(appmessage.NewMsgInvBlock(block.Hash()))
}

View File

@@ -0,0 +1,8 @@
package flowcontext
import "github.com/kaspanet/kaspad/infrastructure/config"
// Config returns an instance of *config.Config associated to the flow context.
func (f *FlowContext) Config() *config.Config {
return f.cfg
}

View File

@@ -0,0 +1,8 @@
package flowcontext
import "github.com/kaspanet/kaspad/domain/blockdag"
// DAG returns the DAG associated to the flow context.
func (f *FlowContext) DAG() *blockdag.BlockDAG {
return f.dag
}

View File

@@ -0,0 +1,31 @@
package flowcontext
import (
"errors"
"sync/atomic"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
)
// HandleError handles an error from a flow,
// It sends the error to errChan if isStopping == 0 and increments isStopping
//
// If this is ErrRouteClosed - forward it to errChan
// If this is ProtocolError - logs the error, and forward it to errChan
// Otherwise - panics
func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error) {
isErrRouteClosed := errors.Is(err, router.ErrRouteClosed)
if !isErrRouteClosed {
if protocolErr := &(protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
panic(err)
}
log.Errorf("error from %s: %+v", flowName, err)
}
if atomic.AddUint32(isStopping, 1) == 1 {
errChan <- err
}
}

View File

@@ -0,0 +1,63 @@
package flowcontext
import (
"sync"
"time"
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/relaytransactions"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
)
// FlowContext holds state that is relevant to more than one flow or one peer, and allows communication between
// different flows that can be associated to different peers.
type FlowContext struct {
cfg *config.Config
netAdapter *netadapter.NetAdapter
txPool *mempool.TxPool
dag *blockdag.BlockDAG
addressManager *addressmanager.AddressManager
connectionManager *connmanager.ConnectionManager
transactionsToRebroadcastLock sync.Mutex
transactionsToRebroadcast map[daghash.TxID]*util.Tx
lastRebroadcastTime time.Time
sharedRequestedTransactions *relaytransactions.SharedRequestedTransactions
sharedRequestedBlocks *blockrelay.SharedRequestedBlocks
isInIBD uint32
startIBDMutex sync.Mutex
ibdPeer *peerpkg.Peer
peers map[id.ID]*peerpkg.Peer
peersMutex sync.RWMutex
}
// New returns a new instance of FlowContext.
func New(cfg *config.Config, dag *blockdag.BlockDAG, addressManager *addressmanager.AddressManager,
txPool *mempool.TxPool, netAdapter *netadapter.NetAdapter,
connectionManager *connmanager.ConnectionManager) *FlowContext {
return &FlowContext{
cfg: cfg,
netAdapter: netAdapter,
dag: dag,
addressManager: addressManager,
connectionManager: connectionManager,
txPool: txPool,
sharedRequestedTransactions: relaytransactions.NewSharedRequestedTransactions(),
sharedRequestedBlocks: blockrelay.NewSharedRequestedBlocks(),
peers: make(map[id.ID]*peerpkg.Peer),
transactionsToRebroadcast: make(map[daghash.TxID]*util.Tx),
}
}

View File

@@ -0,0 +1,89 @@
package flowcontext
import (
"sync/atomic"
"time"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/blockdag"
)
// StartIBDIfRequired selects a peer and starts IBD against it
// if required
func (f *FlowContext) StartIBDIfRequired() {
f.startIBDMutex.Lock()
defer f.startIBDMutex.Unlock()
if f.IsInIBD() {
return
}
peer := f.selectPeerForIBD(f.dag)
if peer == nil {
spawn("StartIBDIfRequired-requestSelectedTipsIfRequired", f.requestSelectedTipsIfRequired)
return
}
atomic.StoreUint32(&f.isInIBD, 1)
f.ibdPeer = peer
spawn("StartIBDIfRequired-peer.StartIBD", peer.StartIBD)
}
// IsInIBD is true if IBD is currently running
func (f *FlowContext) IsInIBD() bool {
return atomic.LoadUint32(&f.isInIBD) != 0
}
// selectPeerForIBD returns the first peer whose selected tip
// hash is not in our DAG
func (f *FlowContext) selectPeerForIBD(dag *blockdag.BlockDAG) *peerpkg.Peer {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
for _, peer := range f.peers {
peerSelectedTipHash := peer.SelectedTipHash()
if !dag.IsInDAG(peerSelectedTipHash) {
return peer
}
}
return nil
}
func (f *FlowContext) requestSelectedTipsIfRequired() {
if f.isDAGTimeCurrent() {
return
}
f.requestSelectedTips()
}
func (f *FlowContext) isDAGTimeCurrent() bool {
const minDurationToRequestSelectedTips = time.Minute
return f.dag.Now().Sub(f.dag.SelectedTipHeader().Timestamp) > minDurationToRequestSelectedTips
}
func (f *FlowContext) requestSelectedTips() {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
for _, peer := range f.peers {
peer.RequestSelectedTipIfRequired()
}
}
// FinishIBD finishes the current IBD flow and starts a new one if required.
func (f *FlowContext) FinishIBD() {
f.ibdPeer = nil
atomic.StoreUint32(&f.isInIBD, 0)
f.StartIBDIfRequired()
}
// IBDPeer returns the currently active IBD peer.
// Returns nil if we aren't currently in IBD
func (f *FlowContext) IBDPeer() *peerpkg.Peer {
if !f.IsInIBD() {
return nil
}
return f.ibdPeer
}

View File

@@ -0,0 +1,9 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -0,0 +1,74 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/pkg/errors"
)
// NetAdapter returns the net adapter that is associated to the flow context.
func (f *FlowContext) NetAdapter() *netadapter.NetAdapter {
return f.netAdapter
}
// ConnectionManager returns the connection manager that is associated to the flow context.
func (f *FlowContext) ConnectionManager() *connmanager.ConnectionManager {
return f.connectionManager
}
// AddToPeers marks this peer as ready and adds it to the ready peers list.
func (f *FlowContext) AddToPeers(peer *peerpkg.Peer) error {
f.peersMutex.Lock()
defer f.peersMutex.Unlock()
if _, ok := f.peers[*peer.ID()]; ok {
return errors.Wrapf(common.ErrPeerWithSameIDExists, "peer with ID %s already exists", peer.ID())
}
f.peers[*peer.ID()] = peer
return nil
}
// RemoveFromPeers remove this peer from the peers list.
func (f *FlowContext) RemoveFromPeers(peer *peerpkg.Peer) {
f.peersMutex.Lock()
defer f.peersMutex.Unlock()
delete(f.peers, *peer.ID())
}
// readyPeerConnections returns the NetConnections of all the ready peers.
func (f *FlowContext) readyPeerConnections() []*netadapter.NetConnection {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
peerConnections := make([]*netadapter.NetConnection, len(f.peers))
i := 0
for _, peer := range f.peers {
peerConnections[i] = peer.Connection()
i++
}
return peerConnections
}
// Broadcast broadcast the given message to all the ready peers.
func (f *FlowContext) Broadcast(message appmessage.Message) error {
return f.netAdapter.Broadcast(f.readyPeerConnections(), message)
}
// Peers returns the currently active peers
func (f *FlowContext) Peers() []*peerpkg.Peer {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
peers := make([]*peerpkg.Peer, len(f.peers))
i := 0
for _, peer := range f.peers {
peers[i] = peer
i++
}
return peers
}

View File

@@ -0,0 +1,70 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/relaytransactions"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
"time"
)
// AddTransaction adds transaction to the mempool and propagates it.
func (f *FlowContext) AddTransaction(tx *util.Tx) error {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
transactionsAcceptedToMempool, err := f.txPool.ProcessTransaction(tx, false)
if err != nil {
return err
}
if len(transactionsAcceptedToMempool) > 1 {
return errors.New("got more than one accepted transactions when no orphans were allowed")
}
f.transactionsToRebroadcast[*tx.ID()] = tx
inv := appmessage.NewMsgInvTransaction([]*daghash.TxID{tx.ID()})
return f.Broadcast(inv)
}
func (f *FlowContext) updateTransactionsToRebroadcast(block *util.Block) {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
// Note: if the block is red, its transactions won't be rebroadcasted
// anymore, although they are not included in the UTXO set.
// This is probably ok, since red blocks are quite rare.
for _, tx := range block.Transactions() {
delete(f.transactionsToRebroadcast, *tx.ID())
}
}
func (f *FlowContext) shouldRebroadcastTransactions() bool {
const rebroadcastInterval = 30 * time.Second
return time.Since(f.lastRebroadcastTime) > rebroadcastInterval
}
func (f *FlowContext) txIDsToRebroadcast() []*daghash.TxID {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
txIDs := make([]*daghash.TxID, len(f.transactionsToRebroadcast))
i := 0
for _, tx := range f.transactionsToRebroadcast {
txIDs[i] = tx.ID()
i++
}
return txIDs
}
// SharedRequestedTransactions returns a *relaytransactions.SharedRequestedTransactions for sharing
// data about requested transactions between different peers.
func (f *FlowContext) SharedRequestedTransactions() *relaytransactions.SharedRequestedTransactions {
return f.sharedRequestedTransactions
}
// TxPool returns the transaction pool associated to the manager.
func (f *FlowContext) TxPool() *mempool.TxPool {
return f.txPool
}

View File

@@ -0,0 +1,57 @@
package addressexchange
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// ReceiveAddressesContext is the interface for the context needed for the ReceiveAddresses flow.
type ReceiveAddressesContext interface {
Config() *config.Config
AddressManager() *addressmanager.AddressManager
}
// ReceiveAddresses asks a peer for more addresses if needed.
func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
if !context.AddressManager().NeedMoreAddresses() {
return nil
}
subnetworkID := peer.SubnetworkID()
msgGetAddresses := appmessage.NewMsgRequestAddresses(false, subnetworkID)
err := outgoingRoute.Enqueue(msgGetAddresses)
if err != nil {
return err
}
message, err := incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
msgAddresses := message.(*appmessage.MsgAddresses)
if len(msgAddresses.AddrList) > addressmanager.GetAddressesMax {
return protocolerrors.Errorf(true, "address count exceeded %d", addressmanager.GetAddressesMax)
}
if msgAddresses.IncludeAllSubnetworks {
return protocolerrors.Errorf(true, "got unexpected "+
"IncludeAllSubnetworks=true in [%s] command", msgAddresses.Command())
}
if !msgAddresses.SubnetworkID.IsEqual(context.Config().SubnetworkID) && msgAddresses.SubnetworkID != nil {
return protocolerrors.Errorf(false, "only full nodes and %s subnetwork IDs "+
"are allowed in [%s] command, but got subnetwork ID %s",
context.Config().SubnetworkID, msgAddresses.Command(), msgAddresses.SubnetworkID)
}
sourceAddress := peer.Connection().NetAddress()
context.AddressManager().AddAddresses(msgAddresses.AddrList, sourceAddress, msgAddresses.SubnetworkID)
return nil
}

View File

@@ -0,0 +1,52 @@
package addressexchange
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"math/rand"
)
// SendAddressesContext is the interface for the context needed for the SendAddresses flow.
type SendAddressesContext interface {
AddressManager() *addressmanager.AddressManager
}
// SendAddresses sends addresses to a peer that requests it.
func SendAddresses(context SendAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
msgGetAddresses := message.(*appmessage.MsgRequestAddresses)
addresses := context.AddressManager().AddressCache(msgGetAddresses.IncludeAllSubnetworks,
msgGetAddresses.SubnetworkID)
msgAddresses := appmessage.NewMsgAddresses(msgGetAddresses.IncludeAllSubnetworks, msgGetAddresses.SubnetworkID)
err = msgAddresses.AddAddresses(shuffleAddresses(addresses)...)
if err != nil {
return err
}
return outgoingRoute.Enqueue(msgAddresses)
}
// shuffleAddresses randomizes the given addresses sent if there are more than the maximum allowed in one message.
func shuffleAddresses(addresses []*appmessage.NetAddress) []*appmessage.NetAddress {
addressCount := len(addresses)
if addressCount < appmessage.MaxAddressesPerMsg {
return addresses
}
shuffleAddresses := make([]*appmessage.NetAddress, addressCount)
copy(shuffleAddresses, addresses)
rand.Shuffle(addressCount, func(i, j int) {
shuffleAddresses[i], shuffleAddresses[j] = shuffleAddresses[j], shuffleAddresses[i]
})
// Truncate it to the maximum size.
shuffleAddresses = shuffleAddresses[:appmessage.MaxAddressesPerMsg]
return shuffleAddresses
}

View File

@@ -0,0 +1,55 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// RelayBlockRequestsContext is the interface for the context needed for the HandleRelayBlockRequests flow.
type RelayBlockRequestsContext interface {
DAG() *blockdag.BlockDAG
}
// HandleRelayBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends
// their corresponding blocks to the requesting peer.
func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
getRelayBlocksMessage := message.(*appmessage.MsgRequestRelayBlocks)
for _, hash := range getRelayBlocksMessage.Hashes {
// Fetch the block from the database.
block, err := context.DAG().BlockByHash(hash)
if blockdag.IsNotInDAGErr(err) {
return protocolerrors.Errorf(true, "block %s not found", hash)
} else if err != nil {
return errors.Wrapf(err, "unable to fetch requested block hash %s", hash)
}
msgBlock := block.MsgBlock()
// If we are a full node and the peer is a partial node, we must convert
// the block to a partial block.
nodeSubnetworkID := context.DAG().SubnetworkID()
peerSubnetworkID := peer.SubnetworkID()
isNodeFull := nodeSubnetworkID == nil
isPeerFull := peerSubnetworkID == nil
if isNodeFull && !isPeerFull {
msgBlock.ConvertToPartial(peerSubnetworkID)
}
err = outgoingRoute.Enqueue(msgBlock)
if err != nil {
return err
}
}
}
}

View File

@@ -0,0 +1,245 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
mathUtil "github.com/kaspanet/kaspad/util/math"
"github.com/pkg/errors"
)
// RelayInvsContext is the interface for the context needed for the HandleRelayInvs flow.
type RelayInvsContext interface {
NetAdapter() *netadapter.NetAdapter
DAG() *blockdag.BlockDAG
OnNewBlock(block *util.Block) error
SharedRequestedBlocks() *SharedRequestedBlocks
StartIBDIfRequired()
IsInIBD() bool
Broadcast(message appmessage.Message) error
}
type handleRelayInvsFlow struct {
RelayInvsContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
invsQueue []*appmessage.MsgInvRelayBlock
}
// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they
// are missing, adds them to the DAG and propagates them to the rest of the network.
func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
flow := &handleRelayInvsFlow{
RelayInvsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
invsQueue: make([]*appmessage.MsgInvRelayBlock, 0),
}
return flow.start()
}
func (flow *handleRelayInvsFlow) start() error {
for {
inv, err := flow.readInv()
if err != nil {
return err
}
log.Debugf("Got relay inv for block %s", inv.Hash)
if flow.DAG().IsKnownBlock(inv.Hash) {
if flow.DAG().IsKnownInvalid(inv.Hash) {
return protocolerrors.Errorf(true, "sent inv of an invalid block %s",
inv.Hash)
}
continue
}
flow.StartIBDIfRequired()
if flow.IsInIBD() {
// Block relay is disabled during IBD
continue
}
requestQueue := newHashesQueueSet()
requestQueue.enqueueIfNotExists(inv.Hash)
for requestQueue.len() > 0 {
err := flow.requestBlocks(requestQueue)
if err != nil {
return err
}
}
}
}
func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error) {
if len(flow.invsQueue) > 0 {
var inv *appmessage.MsgInvRelayBlock
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
return inv, nil
}
msg, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
inv, ok := msg.(*appmessage.MsgInvRelayBlock)
if !ok {
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+
"expecting an inv message", msg.Command())
}
return inv, nil
}
func (flow *handleRelayInvsFlow) requestBlocks(requestQueue *hashesQueueSet) error {
numHashesToRequest := mathUtil.MinInt(appmessage.MsgRequestRelayBlocksHashes, requestQueue.len())
hashesToRequest := requestQueue.dequeue(numHashesToRequest)
pendingBlocks := map[daghash.Hash]struct{}{}
var filteredHashesToRequest []*daghash.Hash
for _, hash := range hashesToRequest {
exists := flow.SharedRequestedBlocks().addIfNotExists(hash)
if exists {
continue
}
// The block can become known from another peer in the process of orphan resolution
if flow.DAG().IsKnownBlock(hash) {
continue
}
pendingBlocks[*hash] = struct{}{}
filteredHashesToRequest = append(filteredHashesToRequest, hash)
}
// Exit early if we've filtered out all the hashes
if len(filteredHashesToRequest) == 0 {
return nil
}
// In case the function returns earlier than expected, we want to make sure requestedBlocks is
// clean from any pending blocks.
defer flow.SharedRequestedBlocks().removeSet(pendingBlocks)
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks(filteredHashesToRequest)
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
if err != nil {
return err
}
for len(pendingBlocks) > 0 {
msgBlock, err := flow.readMsgBlock()
if err != nil {
return err
}
block := util.NewBlock(msgBlock)
blockHash := block.Hash()
if _, ok := pendingBlocks[*blockHash]; !ok {
return protocolerrors.Errorf(true, "got unrequested block %s", block.Hash())
}
err = flow.processAndRelayBlock(requestQueue, block)
if err != nil {
return err
}
delete(pendingBlocks, *blockHash)
flow.SharedRequestedBlocks().remove(blockHash)
}
return nil
}
// readMsgBlock returns the next msgBlock in msgChan, and populates invsQueue with any inv messages that meanwhile arrive.
//
// Note: this function assumes msgChan can contain only appmessage.MsgInvRelayBlock and appmessage.MsgBlock messages.
func (flow *handleRelayInvsFlow) readMsgBlock() (
msgBlock *appmessage.MsgBlock, err error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
switch message := message.(type) {
case *appmessage.MsgInvRelayBlock:
flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgBlock:
return message, nil
default:
return nil, errors.Errorf("unexpected message %s", message.Command())
}
}
}
func (flow *handleRelayInvsFlow) processAndRelayBlock(requestQueue *hashesQueueSet, block *util.Block) error {
blockHash := block.Hash()
isOrphan, isDelayed, err := flow.DAG().ProcessBlock(block, blockdag.BFNone)
if err != nil {
if !errors.As(err, &blockdag.RuleError{}) {
return errors.Wrapf(err, "failed to process block %s", blockHash)
}
log.Infof("Rejected block %s from %s: %s", blockHash, flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
}
if isDelayed {
return nil
}
if isOrphan {
blueScore, err := block.BlueScore()
if err != nil {
return protocolerrors.Errorf(true, "received an orphan "+
"block %s with malformed blue score", blockHash)
}
const maxOrphanBlueScoreDiff = 10000
selectedTipBlueScore := flow.DAG().SelectedTipBlueScore()
if blueScore > selectedTipBlueScore+maxOrphanBlueScoreDiff {
log.Infof("Orphan block %s has blue score %d and the selected tip blue score is "+
"%d. Ignoring orphans with a blue score difference from the selected tip greater than %d",
blockHash, blueScore, selectedTipBlueScore, maxOrphanBlueScoreDiff)
return nil
}
// Request the parents for the orphan block from the peer that sent it.
missingAncestors := flow.DAG().GetOrphanMissingAncestorHashes(blockHash)
for _, missingAncestor := range missingAncestors {
requestQueue.enqueueIfNotExists(missingAncestor)
}
return nil
}
err = blocklogger.LogBlock(block)
if err != nil {
return err
}
err = flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
if err != nil {
return err
}
flow.StartIBDIfRequired()
err = flow.OnNewBlock(block)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,35 @@
package blockrelay
import "github.com/kaspanet/kaspad/util/daghash"
type hashesQueueSet struct {
queue []*daghash.Hash
set map[daghash.Hash]struct{}
}
func (r *hashesQueueSet) enqueueIfNotExists(hash *daghash.Hash) {
if _, ok := r.set[*hash]; ok {
return
}
r.queue = append(r.queue, hash)
r.set[*hash] = struct{}{}
}
func (r *hashesQueueSet) dequeue(numItems int) []*daghash.Hash {
var hashes []*daghash.Hash
hashes, r.queue = r.queue[:numItems], r.queue[numItems:]
for _, hash := range hashes {
delete(r.set, *hash)
}
return hashes
}
func (r *hashesQueueSet) len() int {
return len(r.queue)
}
func newHashesQueueSet() *hashesQueueSet {
return &hashesQueueSet{
set: make(map[daghash.Hash]struct{}),
}
}

View File

@@ -0,0 +1,9 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -0,0 +1,46 @@
package blockrelay
import (
"sync"
"github.com/kaspanet/kaspad/util/daghash"
)
// SharedRequestedBlocks is a data structure that is shared between peers that
// holds the hashes of all the requested blocks to prevent redundant requests.
type SharedRequestedBlocks struct {
blocks map[daghash.Hash]struct{}
sync.Mutex
}
func (s *SharedRequestedBlocks) remove(hash *daghash.Hash) {
s.Lock()
defer s.Unlock()
delete(s.blocks, *hash)
}
func (s *SharedRequestedBlocks) removeSet(blockHashes map[daghash.Hash]struct{}) {
s.Lock()
defer s.Unlock()
for hash := range blockHashes {
delete(s.blocks, hash)
}
}
func (s *SharedRequestedBlocks) addIfNotExists(hash *daghash.Hash) (exists bool) {
s.Lock()
defer s.Unlock()
_, ok := s.blocks[*hash]
if ok {
return true
}
s.blocks[*hash] = struct{}{}
return false
}
// NewSharedRequestedBlocks returns a new instance of SharedRequestedBlocks.
func NewSharedRequestedBlocks() *SharedRequestedBlocks {
return &SharedRequestedBlocks{
blocks: make(map[daghash.Hash]struct{}),
}
}

View File

@@ -0,0 +1,116 @@
package handshake
import (
"sync"
"sync/atomic"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
routerpkg "github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/locks"
"github.com/pkg/errors"
)
// HandleHandshakeContext is the interface for the context needed for the HandleHandshake flow.
type HandleHandshakeContext interface {
Config() *config.Config
NetAdapter() *netadapter.NetAdapter
DAG() *blockdag.BlockDAG
AddressManager() *addressmanager.AddressManager
StartIBDIfRequired()
AddToPeers(peer *peerpkg.Peer) error
HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error)
}
// HandleHandshake sets up the handshake protocol - It sends a version message and waits for an incoming
// version message, as well as a verack for the sent version
func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.NetConnection,
receiveVersionRoute *routerpkg.Route, sendVersionRoute *routerpkg.Route, outgoingRoute *routerpkg.Route,
) (*peerpkg.Peer, error) {
// For HandleHandshake to finish, we need to get from the other node
// a version and verack messages, so we increase the wait group by 2
// and block HandleHandshake with wg.Wait().
wg := sync.WaitGroup{}
wg.Add(2)
isStopping := uint32(0)
errChan := make(chan error)
peer := peerpkg.New(netConnection)
var peerAddress *appmessage.NetAddress
spawn("HandleHandshake-ReceiveVersion", func() {
address, err := ReceiveVersion(context, receiveVersionRoute, outgoingRoute, peer)
if err != nil {
handleError(err, "ReceiveVersion", &isStopping, errChan)
return
}
peerAddress = address
wg.Done()
})
spawn("HandleHandshake-SendVersion", func() {
err := SendVersion(context, sendVersionRoute, outgoingRoute, peer)
if err != nil {
handleError(err, "SendVersion", &isStopping, errChan)
return
}
wg.Done()
})
select {
case err := <-errChan:
if err != nil {
return nil, err
}
return nil, nil
case <-locks.ReceiveFromChanWhenDone(func() { wg.Wait() }):
}
err := context.AddToPeers(peer)
if err != nil {
if errors.As(err, &common.ErrPeerWithSameIDExists) {
return nil, protocolerrors.Wrap(false, err, "peer already exists")
}
return nil, err
}
if peerAddress != nil {
subnetworkID := peer.SubnetworkID()
context.AddressManager().AddAddress(peerAddress, peerAddress, subnetworkID)
context.AddressManager().Good(peerAddress, subnetworkID)
}
context.StartIBDIfRequired()
return peer, nil
}
// Handshake is different from other flows, since in it should forward router.ErrRouteClosed to errChan
// Therefore we implement a separate handleError for handshake
func handleError(err error, flowName string, isStopping *uint32, errChan chan error) {
if errors.Is(err, routerpkg.ErrRouteClosed) {
if atomic.AddUint32(isStopping, 1) == 1 {
errChan <- err
}
return
}
if protocolErr := &(protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
log.Errorf("Handshake protocol error from %s: %s", flowName, err)
if atomic.AddUint32(isStopping, 1) == 1 {
errChan <- err
}
return
}
panic(err)
}

View File

@@ -0,0 +1,9 @@
package handshake
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -0,0 +1,101 @@
package handshake
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
var (
// allowSelfConnections is only used to allow the tests to bypass the self
// connection detecting and disconnect logic since they intentionally
// do so for testing purposes.
allowSelfConnections bool
// minAcceptableProtocolVersion is the lowest protocol version that a
// connected peer may support.
minAcceptableProtocolVersion = appmessage.ProtocolVersion
)
type receiveVersionFlow struct {
HandleHandshakeContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// ReceiveVersion waits for the peer to send a version message, sends a
// verack in response, and updates its info accordingly.
func ReceiveVersion(context HandleHandshakeContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) (*appmessage.NetAddress, error) {
flow := &receiveVersionFlow{
HandleHandshakeContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
msgVersion, ok := message.(*appmessage.MsgVersion)
if !ok {
return nil, protocolerrors.New(true, "a version message must precede all others")
}
if !allowSelfConnections && flow.NetAdapter().ID().IsEqual(msgVersion.ID) {
return nil, protocolerrors.New(true, "connected to self")
}
// Disconnect and ban peers from a different network
if msgVersion.Network != flow.Config().ActiveNetParams.Name {
return nil, protocolerrors.Errorf(true, "wrong network")
}
// Notify and disconnect clients that have a protocol version that is
// too old.
//
// NOTE: If minAcceptableProtocolVersion is raised to be higher than
// appmessage.RejectVersion, this should send a reject packet before
// disconnecting.
if msgVersion.ProtocolVersion < minAcceptableProtocolVersion {
return nil, protocolerrors.Errorf(false, "protocol version must be %d or greater",
minAcceptableProtocolVersion)
}
// Disconnect from partial nodes in networks that don't allow them
if !flow.DAG().Params.EnableNonNativeSubnetworks && msgVersion.SubnetworkID != nil {
return nil, protocolerrors.New(true, "partial nodes are not allowed")
}
// Disconnect if:
// - we are a full node and the outbound connection we've initiated is a partial node
// - the remote node is partial and our subnetwork doesn't match their subnetwork
localSubnetworkID := flow.Config().SubnetworkID
isLocalNodeFull := localSubnetworkID == nil
isRemoteNodeFull := msgVersion.SubnetworkID == nil
isOutbound := flow.peer.Connection().IsOutbound()
if (isLocalNodeFull && !isRemoteNodeFull && isOutbound) ||
(!isLocalNodeFull && !isRemoteNodeFull && !msgVersion.SubnetworkID.IsEqual(localSubnetworkID)) {
return nil, protocolerrors.New(false, "incompatible subnetworks")
}
flow.peer.UpdateFieldsFromMsgVersion(msgVersion)
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgVerAck())
if err != nil {
return nil, err
}
flow.peer.Connection().SetID(msgVersion.ID)
return msgVersion.Address, nil
}

View File

@@ -0,0 +1,78 @@
package handshake
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/version"
)
var (
// userAgentName is the user agent name and is used to help identify
// ourselves to other kaspa peers.
userAgentName = "kaspad"
// userAgentVersion is the user agent version and is used to help
// identify ourselves to other kaspa peers.
userAgentVersion = version.Version()
// defaultServices describes the default services that are supported by
// the server.
defaultServices = appmessage.DefaultServices
// defaultRequiredServices describes the default services that are
// required to be supported by outbound peers.
defaultRequiredServices = appmessage.SFNodeNetwork
)
type sendVersionFlow struct {
HandleHandshakeContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// SendVersion sends a version to a peer and waits for verack.
func SendVersion(context HandleHandshakeContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
flow := &sendVersionFlow{
HandleHandshakeContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *sendVersionFlow) start() error {
selectedTipHash := flow.DAG().SelectedTipHash()
subnetworkID := flow.Config().SubnetworkID
// Version message.
localAddress := flow.AddressManager().GetBestLocalAddress(flow.peer.Connection().NetAddress())
msg := appmessage.NewMsgVersion(localAddress, flow.NetAdapter().ID(),
flow.Config().ActiveNetParams.Name, selectedTipHash, subnetworkID)
msg.AddUserAgent(userAgentName, userAgentVersion, flow.Config().UserAgentComments...)
// Advertise the services flag
msg.Services = defaultServices
// Advertise our max supported protocol version.
msg.ProtocolVersion = appmessage.ProtocolVersion
// Advertise if inv messages for transactions are desired.
msg.DisableRelayTx = flow.Config().BlocksOnly
err := flow.outgoingRoute.Enqueue(msg)
if err != nil {
return err
}
// Wait for verack
_, err = flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,72 @@
package ibd
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/daghash"
)
// RequestBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow.
type RequestBlockLocatorContext interface {
DAG() *blockdag.BlockDAG
}
type handleRequestBlockLocatorFlow struct {
RequestBlockLocatorContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestBlockLocator handles getBlockLocator messages
func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute *router.Route,
outgoingRoute *router.Route) error {
flow := &handleRequestBlockLocatorFlow{
RequestBlockLocatorContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestBlockLocatorFlow) start() error {
for {
lowHash, highHash, err := flow.receiveGetBlockLocator()
if err != nil {
return err
}
locator, err := flow.DAG().BlockLocatorFromHashes(highHash, lowHash)
if err != nil || len(locator) == 0 {
return protocolerrors.Errorf(true, "couldn't build a block "+
"locator between blocks %s and %s", lowHash, highHash)
}
err = flow.sendBlockLocator(locator)
if err != nil {
return err
}
}
}
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (lowHash *daghash.Hash,
highHash *daghash.Hash, err error) {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
}
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
return msgGetBlockLocator.LowHash, msgGetBlockLocator.HighHash, nil
}
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator blockdag.BlockLocator) error {
msgBlockLocator := appmessage.NewMsgBlockLocator(locator)
err := flow.outgoingRoute.Enqueue(msgBlockLocator)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,126 @@
package ibd
import (
"errors"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/daghash"
)
const ibdBatchSize = router.DefaultMaxMessages
// RequestIBDBlocksContext is the interface for the context needed for the HandleRequestIBDBlocks flow.
type RequestIBDBlocksContext interface {
DAG() *blockdag.BlockDAG
}
type handleRequestBlocksFlow struct {
RequestIBDBlocksContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestIBDBlocks handles getBlocks messages
func HandleRequestIBDBlocks(context RequestIBDBlocksContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &handleRequestBlocksFlow{
RequestIBDBlocksContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestBlocksFlow) start() error {
for {
lowHash, highHash, err := receiveRequestIBDBlocks(flow.incomingRoute)
if err != nil {
return err
}
msgIBDBlocks, err := flow.buildMsgIBDBlocks(lowHash, highHash)
if err != nil {
return err
}
for offset := 0; offset < len(msgIBDBlocks); offset += ibdBatchSize {
end := offset + ibdBatchSize
if end > len(msgIBDBlocks) {
end = len(msgIBDBlocks)
}
blocksToSend := msgIBDBlocks[offset:end]
err = flow.sendMsgIBDBlocks(blocksToSend)
if err != nil {
return nil
}
// Exit the loop and don't wait for the GetNextIBDBlocks message if the last batch was
// less than ibdBatchSize.
if len(blocksToSend) < ibdBatchSize {
break
}
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
if _, ok := message.(*appmessage.MsgRequestNextIBDBlocks); !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextIBDBlocks, message.Command())
}
}
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneIBDBlocks())
if err != nil {
return err
}
}
}
func receiveRequestIBDBlocks(incomingRoute *router.Route) (lowHash *daghash.Hash,
highHash *daghash.Hash, err error) {
message, err := incomingRoute.Dequeue()
if err != nil {
return nil, nil, err
}
msgRequestIBDBlocks := message.(*appmessage.MsgRequestIBDBlocks)
return msgRequestIBDBlocks.LowHash, msgRequestIBDBlocks.HighHash, nil
}
func (flow *handleRequestBlocksFlow) buildMsgIBDBlocks(lowHash *daghash.Hash,
highHash *daghash.Hash) ([]*appmessage.MsgIBDBlock, error) {
const maxHashesInMsgIBDBlocks = appmessage.MaxInvPerMsg
blockHashes, err := flow.DAG().AntiPastHashesBetween(lowHash, highHash, maxHashesInMsgIBDBlocks)
if err != nil {
if errors.Is(err, blockdag.ErrInvalidParameter) {
return nil, protocolerrors.Wrapf(true, err, "could not get antiPast between "+
"%s and %s", lowHash, highHash)
}
return nil, err
}
msgIBDBlocks := make([]*appmessage.MsgIBDBlock, len(blockHashes))
for i, blockHash := range blockHashes {
block, err := flow.DAG().BlockByHash(blockHash)
if err != nil {
return nil, err
}
msgIBDBlocks[i] = appmessage.NewMsgIBDBlock(block.MsgBlock())
}
return msgIBDBlocks, nil
}
func (flow *handleRequestBlocksFlow) sendMsgIBDBlocks(msgIBDBlocks []*appmessage.MsgIBDBlock) error {
for _, msgIBDBlock := range msgIBDBlocks {
err := flow.outgoingRoute.Enqueue(msgIBDBlock)
if err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,212 @@
package ibd
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
// HandleIBDContext is the interface for the context needed for the HandleIBD flow.
type HandleIBDContext interface {
DAG() *blockdag.BlockDAG
OnNewBlock(block *util.Block) error
StartIBDIfRequired()
FinishIBD()
}
type handleIBDFlow struct {
HandleIBDContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// HandleIBD waits for IBD start and handles it when IBD is triggered for this peer
func HandleIBD(context HandleIBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
peer *peerpkg.Peer) error {
flow := &handleIBDFlow{
HandleIBDContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *handleIBDFlow) start() error {
for {
err := flow.runIBD()
if err != nil {
return err
}
}
}
func (flow *handleIBDFlow) runIBD() error {
flow.peer.WaitForIBDStart()
defer flow.FinishIBD()
peerSelectedTipHash := flow.peer.SelectedTipHash()
log.Debugf("Trying to find highest shared chain block with peer %s with selected tip %s", flow.peer, peerSelectedTipHash)
highestSharedBlockHash, err := flow.findHighestSharedBlockHash(peerSelectedTipHash)
if err != nil {
return err
}
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
if flow.DAG().IsKnownFinalizedBlock(highestSharedBlockHash) {
return protocolerrors.Errorf(false, "cannot initiate "+
"IBD with peer %s because the highest shared chain block (%s) is "+
"below the finality point", flow.peer, highestSharedBlockHash)
}
return flow.downloadBlocks(highestSharedBlockHash, peerSelectedTipHash)
}
func (flow *handleIBDFlow) findHighestSharedBlockHash(peerSelectedTipHash *daghash.Hash) (lowHash *daghash.Hash,
err error) {
lowHash = flow.DAG().Params.GenesisHash
highHash := peerSelectedTipHash
for {
err := flow.sendGetBlockLocator(lowHash, highHash)
if err != nil {
return nil, err
}
blockLocatorHashes, err := flow.receiveBlockLocator()
if err != nil {
return nil, err
}
// We check whether the locator's highest hash is in the local DAG.
// If it is, return it. If it isn't, we need to narrow our
// getBlockLocator request and try again.
locatorHighHash := blockLocatorHashes[0]
if flow.DAG().IsInDAG(locatorHighHash) {
return locatorHighHash, nil
}
highHash, lowHash = flow.DAG().FindNextLocatorBoundaries(blockLocatorHashes)
}
}
func (flow *handleIBDFlow) sendGetBlockLocator(lowHash *daghash.Hash, highHash *daghash.Hash) error {
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(highHash, lowHash)
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
}
func (flow *handleIBDFlow) receiveBlockLocator() (blockLocatorHashes []*daghash.Hash, err error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
msgBlockLocator, ok := message.(*appmessage.MsgBlockLocator)
if !ok {
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
}
return msgBlockLocator.BlockLocatorHashes, nil
}
func (flow *handleIBDFlow) downloadBlocks(highestSharedBlockHash *daghash.Hash,
peerSelectedTipHash *daghash.Hash) error {
err := flow.sendGetBlocks(highestSharedBlockHash, peerSelectedTipHash)
if err != nil {
return err
}
blocksReceived := 0
for {
msgIBDBlock, doneIBD, err := flow.receiveIBDBlock()
if err != nil {
return err
}
if doneIBD {
return nil
}
err = flow.processIBDBlock(msgIBDBlock)
if err != nil {
return err
}
blocksReceived++
if blocksReceived%ibdBatchSize == 0 {
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextIBDBlocks())
if err != nil {
return err
}
}
}
}
func (flow *handleIBDFlow) sendGetBlocks(highestSharedBlockHash *daghash.Hash,
peerSelectedTipHash *daghash.Hash) error {
msgGetBlockInvs := appmessage.NewMsgRequstIBDBlocks(highestSharedBlockHash, peerSelectedTipHash)
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
}
func (flow *handleIBDFlow) receiveIBDBlock() (msgIBDBlock *appmessage.MsgIBDBlock, doneIBD bool, err error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, false, err
}
switch message := message.(type) {
case *appmessage.MsgIBDBlock:
return message, false, nil
case *appmessage.MsgDoneIBDBlocks:
return nil, true, nil
default:
return nil, false,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
}
}
func (flow *handleIBDFlow) processIBDBlock(msgIBDBlock *appmessage.MsgIBDBlock) error {
block := util.NewBlock(msgIBDBlock.MsgBlock)
if flow.DAG().IsInDAG(block.Hash()) {
log.Debugf("IBD block %s is already in the DAG. Skipping...", block.Hash())
return nil
}
isOrphan, isDelayed, err := flow.DAG().ProcessBlock(block, blockdag.BFNone)
if err != nil {
if !errors.As(err, &blockdag.RuleError{}) {
return errors.Wrapf(err, "failed to process block %s during IBD", block.Hash())
}
log.Infof("Rejected block %s from %s during IBD: %s", block.Hash(), flow.peer, err)
return protocolerrors.Wrapf(true, err, "got invalid block %s during IBD", block.Hash())
}
if isOrphan {
return protocolerrors.Errorf(true, "received orphan block %s "+
"during IBD", block.Hash())
}
if isDelayed {
return protocolerrors.Errorf(false, "received delayed block %s "+
"during IBD", block.Hash())
}
err = flow.OnNewBlock(block)
if err != nil {
return err
}
err = blocklogger.LogBlock(block)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,9 @@
package ibd
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.IBDS)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -0,0 +1,61 @@
package selectedtip
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/pkg/errors"
)
// HandleRequestSelectedTipContext is the interface for the context needed for the HandleRequestSelectedTip flow.
type HandleRequestSelectedTipContext interface {
DAG() *blockdag.BlockDAG
}
type handleRequestSelectedTipFlow struct {
HandleRequestSelectedTipContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestSelectedTip handles getSelectedTip messages
func HandleRequestSelectedTip(context HandleRequestSelectedTipContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &handleRequestSelectedTipFlow{
HandleRequestSelectedTipContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestSelectedTipFlow) start() error {
for {
err := flow.receiveGetSelectedTip()
if err != nil {
return err
}
err = flow.sendSelectedTipHash()
if err != nil {
return err
}
}
}
func (flow *handleRequestSelectedTipFlow) receiveGetSelectedTip() error {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
_, ok := message.(*appmessage.MsgRequestSelectedTip)
if !ok {
return errors.Errorf("received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestSelectedTip, message.Command())
}
return nil
}
func (flow *handleRequestSelectedTipFlow) sendSelectedTipHash() error {
msgSelectedTip := appmessage.NewMsgSelectedTip(flow.DAG().SelectedTipHash())
return flow.outgoingRoute.Enqueue(msgSelectedTip)
}

View File

@@ -0,0 +1,79 @@
package selectedtip
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/daghash"
)
// RequestSelectedTipContext is the interface for the context needed for the RequestSelectedTip flow.
type RequestSelectedTipContext interface {
DAG() *blockdag.BlockDAG
StartIBDIfRequired()
}
type requestSelectedTipFlow struct {
RequestSelectedTipContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// RequestSelectedTip waits for selected tip requests and handles them
func RequestSelectedTip(context RequestSelectedTipContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
flow := &requestSelectedTipFlow{
RequestSelectedTipContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *requestSelectedTipFlow) start() error {
for {
err := flow.runSelectedTipRequest()
if err != nil {
return err
}
}
}
func (flow *requestSelectedTipFlow) runSelectedTipRequest() error {
flow.peer.WaitForSelectedTipRequests()
defer flow.peer.FinishRequestingSelectedTip()
err := flow.requestSelectedTip()
if err != nil {
return err
}
peerSelectedTipHash, err := flow.receiveSelectedTip()
if err != nil {
return err
}
flow.peer.SetSelectedTipHash(peerSelectedTipHash)
flow.StartIBDIfRequired()
return nil
}
func (flow *requestSelectedTipFlow) requestSelectedTip() error {
msgGetSelectedTip := appmessage.NewMsgRequestSelectedTip()
return flow.outgoingRoute.Enqueue(msgGetSelectedTip)
}
func (flow *requestSelectedTipFlow) receiveSelectedTip() (selectedTipHash *daghash.Hash, err error) {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, err
}
msgSelectedTip := message.(*appmessage.MsgSelectedTip)
return msgSelectedTip.SelectedTipHash, nil
}

View File

@@ -0,0 +1,42 @@
package ping
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// ReceivePingsContext is the interface for the context needed for the ReceivePings flow.
type ReceivePingsContext interface {
}
type receivePingsFlow struct {
ReceivePingsContext
incomingRoute, outgoingRoute *router.Route
}
// ReceivePings handles all ping messages coming through incomingRoute.
// This function assumes that incomingRoute will only return MsgPing.
func ReceivePings(context ReceivePingsContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &receivePingsFlow{
ReceivePingsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *receivePingsFlow) start() error {
for {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
pingMessage := message.(*appmessage.MsgPing)
pongMessage := appmessage.NewMsgPong(pingMessage.Nonce)
err = flow.outgoingRoute.Enqueue(pongMessage)
if err != nil {
return err
}
}
}

View File

@@ -0,0 +1,66 @@
package ping
import (
"github.com/kaspanet/kaspad/app/protocol/common"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util/random"
)
// SendPingsContext is the interface for the context needed for the SendPings flow.
type SendPingsContext interface {
}
type sendPingsFlow struct {
SendPingsContext
incomingRoute, outgoingRoute *router.Route
peer *peerpkg.Peer
}
// SendPings starts sending MsgPings every pingInterval seconds to the
// given peer.
// This function assumes that incomingRoute will only return MsgPong.
func SendPings(context SendPingsContext, incomingRoute *router.Route, outgoingRoute *router.Route, peer *peerpkg.Peer) error {
flow := &sendPingsFlow{
SendPingsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
peer: peer,
}
return flow.start()
}
func (flow *sendPingsFlow) start() error {
const pingInterval = 2 * time.Minute
ticker := time.NewTicker(pingInterval)
defer ticker.Stop()
for range ticker.C {
nonce, err := random.Uint64()
if err != nil {
return err
}
flow.peer.SetPingPending(nonce)
pingMessage := appmessage.NewMsgPing(nonce)
err = flow.outgoingRoute.Enqueue(pingMessage)
if err != nil {
return err
}
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return err
}
pongMessage := message.(*appmessage.MsgPong)
if pongMessage.Nonce != pingMessage.Nonce {
return protocolerrors.New(true, "nonce mismatch between ping and pong")
}
flow.peer.SetPingIdle()
}
return nil
}

View File

@@ -0,0 +1,42 @@
package rejects
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// HandleRejectsContext is the interface for the context needed for the HandleRejects flow.
type HandleRejectsContext interface {
}
type handleRejectsFlow struct {
HandleRejectsContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRejects handles all reject messages coming through incomingRoute.
// This function assumes that incomingRoute will only return MsgReject.
func HandleRejects(context HandleRejectsContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &handleRejectsFlow{
HandleRejectsContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRejectsFlow) start() error {
message, err := flow.incomingRoute.Dequeue()
if err != nil {
return err
}
rejectMessage := message.(*appmessage.MsgReject)
const maxReasonLength = 255
if len(rejectMessage.Reason) > maxReasonLength {
return protocolerrors.Errorf(false, "got reject message longer than %d", maxReasonLength)
}
return protocolerrors.Errorf(false, "got reject message: `%s`", rejectMessage.Reason)
}

View File

@@ -0,0 +1,223 @@
package relaytransactions
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
// TransactionsRelayContext is the interface for the context needed for the
// HandleRelayedTransactions and HandleRequestedTransactions flows.
type TransactionsRelayContext interface {
NetAdapter() *netadapter.NetAdapter
DAG() *blockdag.BlockDAG
SharedRequestedTransactions() *SharedRequestedTransactions
TxPool() *mempool.TxPool
Broadcast(message appmessage.Message) error
}
type handleRelayedTransactionsFlow struct {
TransactionsRelayContext
incomingRoute, outgoingRoute *router.Route
invsQueue []*appmessage.MsgInvTransaction
}
// HandleRelayedTransactions listens to appmessage.MsgInvTransaction messages, requests their corresponding transactions if they
// are missing, adds them to the mempool and propagates them to the rest of the network.
func HandleRelayedTransactions(context TransactionsRelayContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &handleRelayedTransactionsFlow{
TransactionsRelayContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
invsQueue: make([]*appmessage.MsgInvTransaction, 0),
}
return flow.start()
}
func (flow *handleRelayedTransactionsFlow) start() error {
for {
inv, err := flow.readInv()
if err != nil {
return err
}
requestedIDs, err := flow.requestInvTransactions(inv)
if err != nil {
return err
}
err = flow.receiveTransactions(requestedIDs)
if err != nil {
return err
}
}
}
func (flow *handleRelayedTransactionsFlow) requestInvTransactions(
inv *appmessage.MsgInvTransaction) (requestedIDs []*daghash.TxID, err error) {
idsToRequest := make([]*daghash.TxID, 0, len(inv.TxIDs))
for _, txID := range inv.TxIDs {
if flow.isKnownTransaction(txID) {
continue
}
exists := flow.SharedRequestedTransactions().addIfNotExists(txID)
if exists {
continue
}
idsToRequest = append(idsToRequest, txID)
}
if len(idsToRequest) == 0 {
return idsToRequest, nil
}
msgGetTransactions := appmessage.NewMsgRequestTransactions(idsToRequest)
err = flow.outgoingRoute.Enqueue(msgGetTransactions)
if err != nil {
flow.SharedRequestedTransactions().removeMany(idsToRequest)
return nil, err
}
return idsToRequest, nil
}
func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *daghash.TxID) bool {
// Ask the transaction memory pool if the transaction is known
// to it in any form (main pool or orphan).
if flow.TxPool().HaveTransaction(txID) {
return true
}
// Check if the transaction exists from the point of view of the
// DAG's virtual block. Note that this is only a best effort
// since it is expensive to check existence of every output and
// the only purpose of this check is to avoid downloading
// already known transactions. Only the first two outputs are
// checked because the vast majority of transactions consist of
// two outputs where one is some form of "pay-to-somebody-else"
// and the other is a change output.
prevOut := appmessage.Outpoint{TxID: *txID}
for i := uint32(0); i < 2; i++ {
prevOut.Index = i
_, ok := flow.DAG().GetUTXOEntry(prevOut)
if ok {
return true
}
}
return false
}
func (flow *handleRelayedTransactionsFlow) readInv() (*appmessage.MsgInvTransaction, error) {
if len(flow.invsQueue) > 0 {
var inv *appmessage.MsgInvTransaction
inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:]
return inv, nil
}
msg, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
inv, ok := msg.(*appmessage.MsgInvTransaction)
if !ok {
return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay flow while "+
"expecting an inv message", msg.Command())
}
return inv, nil
}
func (flow *handleRelayedTransactionsFlow) broadcastAcceptedTransactions(acceptedTxs []*mempool.TxDesc) error {
idsToBroadcast := make([]*daghash.TxID, len(acceptedTxs))
for i, tx := range acceptedTxs {
idsToBroadcast[i] = tx.Tx.ID()
}
inv := appmessage.NewMsgInvTransaction(idsToBroadcast)
return flow.Broadcast(inv)
}
// readMsgTxOrNotFound returns the next msgTx or msgTransactionNotFound in incomingRoute,
// returning only one of the message types at a time.
//
// and populates invsQueue with any inv messages that meanwhile arrive.
func (flow *handleRelayedTransactionsFlow) readMsgTxOrNotFound() (
msgTx *appmessage.MsgTx, msgNotFound *appmessage.MsgTransactionNotFound, err error) {
for {
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
if err != nil {
return nil, nil, err
}
switch message := message.(type) {
case *appmessage.MsgInvTransaction:
flow.invsQueue = append(flow.invsQueue, message)
case *appmessage.MsgTx:
return message, nil, nil
case *appmessage.MsgTransactionNotFound:
return nil, message, nil
default:
return nil, nil, errors.Errorf("unexpected message %s", message.Command())
}
}
}
func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransactions []*daghash.TxID) error {
// In case the function returns earlier than expected, we want to make sure sharedRequestedTransactions is
// clean from any pending transactions.
defer flow.SharedRequestedTransactions().removeMany(requestedTransactions)
for _, expectedID := range requestedTransactions {
msgTx, msgTxNotFound, err := flow.readMsgTxOrNotFound()
if err != nil {
return err
}
if msgTxNotFound != nil {
if !msgTxNotFound.ID.IsEqual(expectedID) {
return protocolerrors.Errorf(true, "expected transaction %s, but got %s",
expectedID, msgTxNotFound.ID)
}
continue
}
tx := util.NewTx(msgTx)
if !tx.ID().IsEqual(expectedID) {
return protocolerrors.Errorf(true, "expected transaction %s, but got %s",
expectedID, tx.ID())
}
acceptedTxs, err := flow.TxPool().ProcessTransaction(tx, true)
if err != nil {
ruleErr := &mempool.RuleError{}
if !errors.As(err, ruleErr) {
return errors.Wrapf(err, "failed to process transaction %s", tx.ID())
}
shouldBan := false
if txRuleErr := (&mempool.TxRuleError{}); errors.As(ruleErr.Err, txRuleErr) {
if txRuleErr.RejectCode == mempool.RejectInvalid {
shouldBan = true
}
} else if dagRuleErr := (&blockdag.RuleError{}); errors.As(ruleErr.Err, dagRuleErr) {
shouldBan = true
}
if !shouldBan {
continue
}
return protocolerrors.Errorf(true, "rejected transaction %s", tx.ID())
}
err = flow.broadcastAcceptedTransactions(acceptedTxs)
if err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,59 @@
package relaytransactions
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
type handleRequestedTransactionsFlow struct {
TransactionsRelayContext
incomingRoute, outgoingRoute *router.Route
}
// HandleRequestedTransactions listens to appmessage.MsgRequestTransactions messages, responding with the requested
// transactions if those are in the mempool.
// Missing transactions would be ignored
func HandleRequestedTransactions(context TransactionsRelayContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
flow := &handleRequestedTransactionsFlow{
TransactionsRelayContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
}
return flow.start()
}
func (flow *handleRequestedTransactionsFlow) start() error {
for {
msgRequestTransactions, err := flow.readRequestTransactions()
if err != nil {
return err
}
for _, transactionID := range msgRequestTransactions.IDs {
tx, ok := flow.TxPool().FetchTransaction(transactionID)
if !ok {
msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID)
err := flow.outgoingRoute.Enqueue(msgTransactionNotFound)
if err != nil {
return err
}
continue
}
err := flow.outgoingRoute.Enqueue(tx.MsgTx())
if err != nil {
return err
}
}
}
}
func (flow *handleRequestedTransactionsFlow) readRequestTransactions() (*appmessage.MsgRequestTransactions, error) {
msg, err := flow.incomingRoute.Dequeue()
if err != nil {
return nil, err
}
return msg.(*appmessage.MsgRequestTransactions), nil
}

View File

@@ -0,0 +1,45 @@
package relaytransactions
import (
"github.com/kaspanet/kaspad/util/daghash"
"sync"
)
// SharedRequestedTransactions is a data structure that is shared between peers that
// holds the IDs of all the requested transactions to prevent redundant requests.
type SharedRequestedTransactions struct {
transactions map[daghash.TxID]struct{}
sync.Mutex
}
func (s *SharedRequestedTransactions) remove(txID *daghash.TxID) {
s.Lock()
defer s.Unlock()
delete(s.transactions, *txID)
}
func (s *SharedRequestedTransactions) removeMany(txIDs []*daghash.TxID) {
s.Lock()
defer s.Unlock()
for _, txID := range txIDs {
delete(s.transactions, *txID)
}
}
func (s *SharedRequestedTransactions) addIfNotExists(txID *daghash.TxID) (exists bool) {
s.Lock()
defer s.Unlock()
_, ok := s.transactions[*txID]
if ok {
return true
}
s.transactions[*txID] = struct{}{}
return false
}
// NewSharedRequestedTransactions returns a new instance of SharedRequestedTransactions.
func NewSharedRequestedTransactions() *SharedRequestedTransactions {
return &SharedRequestedTransactions{
transactions: make(map[daghash.TxID]struct{}),
}
}

9
app/protocol/log.go Normal file
View File

@@ -0,0 +1,9 @@
package protocol
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

74
app/protocol/manager.go Normal file
View File

@@ -0,0 +1,74 @@
package protocol
import (
"fmt"
"github.com/kaspanet/kaspad/app/protocol/flowcontext"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/util"
)
// Manager manages the p2p protocol
type Manager struct {
context *flowcontext.FlowContext
}
// NewManager creates a new instance of the p2p protocol manager
func NewManager(cfg *config.Config, dag *blockdag.BlockDAG, netAdapter *netadapter.NetAdapter,
addressManager *addressmanager.AddressManager, txPool *mempool.TxPool,
connectionManager *connmanager.ConnectionManager) (*Manager, error) {
manager := Manager{
context: flowcontext.New(cfg, dag, addressManager, txPool, netAdapter, connectionManager),
}
netAdapter.SetRouterInitializer(manager.routerInitializer)
return &manager, nil
}
// Start starts the p2p protocol
func (m *Manager) Start() error {
return m.context.NetAdapter().Start()
}
// Stop stops the p2p protocol
func (m *Manager) Stop() error {
return m.context.NetAdapter().Stop()
}
// Peers returns the currently active peers
func (m *Manager) Peers() []*peerpkg.Peer {
return m.context.Peers()
}
// IBDPeer returns the currently active IBD peer.
// Returns nil if we aren't currently in IBD
func (m *Manager) IBDPeer() *peerpkg.Peer {
return m.context.IBDPeer()
}
// AddTransaction adds transaction to the mempool and propagates it.
func (m *Manager) AddTransaction(tx *util.Tx) error {
return m.context.AddTransaction(tx)
}
// AddBlock adds the given block to the DAG and propagates it.
func (m *Manager) AddBlock(block *util.Block, flags blockdag.BehaviorFlags) error {
return m.context.AddBlock(block, flags)
}
func (m *Manager) runFlows(flows []*flow, peer *peerpkg.Peer, errChan <-chan error) error {
for _, flow := range flows {
executeFunc := flow.executeFunc // extract to new variable so that it's not overwritten
spawn(fmt.Sprintf("flow-%s", flow.name), func() {
executeFunc(peer)
})
}
return <-errChan
}

9
app/protocol/peer/log.go Normal file
View File

@@ -0,0 +1,9 @@
package peer
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

207
app/protocol/peer/peer.go Normal file
View File

@@ -0,0 +1,207 @@
package peer
import (
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"sync"
"sync/atomic"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/daghash"
mathUtil "github.com/kaspanet/kaspad/util/math"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// Peer holds data about a peer.
type Peer struct {
connection *netadapter.NetConnection
selectedTipHashMtx sync.RWMutex
selectedTipHash *daghash.Hash
userAgent string
services appmessage.ServiceFlag
advertisedProtocolVerion uint32 // protocol version advertised by remote
protocolVersion uint32 // negotiated protocol version
disableRelayTx bool
subnetworkID *subnetworkid.SubnetworkID
timeOffset time.Duration
connectionStarted time.Time
pingLock sync.RWMutex
lastPingNonce uint64 // The nonce of the last ping we sent
lastPingTime time.Time // Time we sent last ping
lastPingDuration time.Duration // Time for last ping to return
isSelectedTipRequested uint32
selectedTipRequestChan chan struct{}
lastSelectedTipRequest mstime.Time
ibdStartChan chan struct{}
}
// New returns a new Peer
func New(connection *netadapter.NetConnection) *Peer {
return &Peer{
connection: connection,
selectedTipRequestChan: make(chan struct{}),
ibdStartChan: make(chan struct{}),
connectionStarted: time.Now(),
}
}
// Connection returns the NetConnection associated with this peer
func (p *Peer) Connection() *netadapter.NetConnection {
return p.connection
}
// SelectedTipHash returns the selected tip of the peer.
func (p *Peer) SelectedTipHash() *daghash.Hash {
p.selectedTipHashMtx.RLock()
defer p.selectedTipHashMtx.RUnlock()
return p.selectedTipHash
}
// SetSelectedTipHash sets the selected tip of the peer.
func (p *Peer) SetSelectedTipHash(hash *daghash.Hash) {
p.selectedTipHashMtx.Lock()
defer p.selectedTipHashMtx.Unlock()
p.selectedTipHash = hash
}
// SubnetworkID returns the subnetwork the peer is associated with.
// It is nil in full nodes.
func (p *Peer) SubnetworkID() *subnetworkid.SubnetworkID {
return p.subnetworkID
}
// ID returns the peer ID.
func (p *Peer) ID() *id.ID {
return p.connection.ID()
}
// TimeOffset returns the peer's time offset.
func (p *Peer) TimeOffset() time.Duration {
return p.timeOffset
}
// UserAgent returns the peer's user agent.
func (p *Peer) UserAgent() string {
return p.userAgent
}
// AdvertisedProtocolVersion returns the peer's advertised protocol version.
func (p *Peer) AdvertisedProtocolVersion() uint32 {
return p.advertisedProtocolVerion
}
// TimeConnected returns the time since the connection to this been has been started.
func (p *Peer) TimeConnected() time.Duration {
return time.Since(p.connectionStarted)
}
// IsOutbound returns whether the peer is an outbound connection.
func (p *Peer) IsOutbound() bool {
return p.connection.IsOutbound()
}
// UpdateFieldsFromMsgVersion updates the peer with the data from the version message.
func (p *Peer) UpdateFieldsFromMsgVersion(msg *appmessage.MsgVersion) {
// Negotiate the protocol version.
p.advertisedProtocolVerion = msg.ProtocolVersion
p.protocolVersion = mathUtil.MinUint32(p.protocolVersion, p.advertisedProtocolVerion)
log.Debugf("Negotiated protocol version %d for peer %s",
p.protocolVersion, p.ID())
// Set the supported services for the peer to what the remote peer
// advertised.
p.services = msg.Services
// Set the remote peer's user agent.
p.userAgent = msg.UserAgent
p.disableRelayTx = msg.DisableRelayTx
p.selectedTipHash = msg.SelectedTipHash
p.subnetworkID = msg.SubnetworkID
p.timeOffset = mstime.Since(msg.Timestamp)
}
// SetPingPending sets the ping state of the peer to 'pending'
func (p *Peer) SetPingPending(nonce uint64) {
p.pingLock.Lock()
defer p.pingLock.Unlock()
p.lastPingNonce = nonce
p.lastPingTime = time.Now()
}
// SetPingIdle sets the ping state of the peer to 'idle'
func (p *Peer) SetPingIdle() {
p.pingLock.Lock()
defer p.pingLock.Unlock()
p.lastPingNonce = 0
p.lastPingDuration = time.Since(p.lastPingTime)
}
func (p *Peer) String() string {
return p.connection.String()
}
// RequestSelectedTipIfRequired notifies the peer that requesting
// a selected tip is required. This triggers the selected tip
// request flow.
func (p *Peer) RequestSelectedTipIfRequired() {
if atomic.SwapUint32(&p.isSelectedTipRequested, 1) != 0 {
return
}
const minGetSelectedTipInterval = time.Minute
if mstime.Since(p.lastSelectedTipRequest) < minGetSelectedTipInterval {
return
}
p.lastSelectedTipRequest = mstime.Now()
p.selectedTipRequestChan <- struct{}{}
}
// WaitForSelectedTipRequests blocks the current thread until
// a selected tip is requested from this peer
func (p *Peer) WaitForSelectedTipRequests() {
<-p.selectedTipRequestChan
}
// FinishRequestingSelectedTip finishes requesting the selected
// tip from this peer
func (p *Peer) FinishRequestingSelectedTip() {
atomic.StoreUint32(&p.isSelectedTipRequested, 0)
}
// StartIBD starts the IBD process for this peer
func (p *Peer) StartIBD() {
p.ibdStartChan <- struct{}{}
}
// WaitForIBDStart blocks the current thread until
// IBD start is requested from this peer
func (p *Peer) WaitForIBDStart() {
<-p.ibdStartChan
}
// Address returns the address associated with this connection
func (p *Peer) Address() string {
return p.connection.Address()
}
// LastPingDuration returns the duration of the last ping to
// this peer
func (p *Peer) LastPingDuration() time.Duration {
p.pingLock.Lock()
defer p.pingLock.Unlock()
return p.lastPingDuration
}

Some files were not shown because too many files have changed in this diff Show More