Compare commits

...

218 Commits

Author SHA1 Message Date
Ori Newman
cba346d753 [NOD-422] Separate request queue and request queue set by inv type (#471)
* [NOD-422] Separate request queue and request queue set by inv type

* [NOD-422] Make one-liner pop and remove redundant nil assignment
2019-11-18 15:34:01 +02:00
Ori Newman
0f34cfb1a2 [NOD-433] Make buildGetBlockVerboseResult use BlockConfirmationsByHashNoLock (#479) 2019-11-18 15:17:51 +02:00
stasatdaglabs
ea846a3284 [NOD-436] Remove unnecessary check before sending a chainChanged notification. (#478) 2019-11-18 14:49:27 +02:00
stasatdaglabs
63bfac9740 [NOD-436] Fix sending empty chainChanged messages. (#477) 2019-11-18 14:33:07 +02:00
Svarog
7284815c21 [NOD-435] Rollback transactions if they were not commited (#475) 2019-11-18 11:14:29 +02:00
Svarog
80307d108b [NOD-430] Print hashes of missing parents in case can't insert block into DB of API-Server (#473)
* [NOD-430] Print hashes of missing parents in case can't insert block into DB of API-Server

* [NOD-430] Use continue OUTER_LOOP instead of break

* [NOD-430] Use lowerCamelCase for label
2019-11-18 10:03:17 +02:00
stasatdaglabs
722437afe9 [NOD-424] Fix API Server not syncing new blocks (#470)
* [NOD-424] Fix typo in SQL query.

* [NOD-424] Rewrite handling chainChangedMsgs.

* [NOD-424] Separated enqueueChainChangedMsg and processChainChangedMsgs.

* [NOD-424] Fix a typo.
2019-11-17 16:52:45 +02:00
Ori Newman
684cf4b5fa [NOD-406] Don't do ECMH operations on mempool (#467)
* [NOD-406] Don't do ECMH operations on mempool

* [NOD-406] Change NewUTXODiff(false) to NewUTXODiffWithoutMultiset

* [NOD-406] Rename dClone -> clone

* [NOD-406] Remove redudnant assignment

* [NOD-406] Remove dag.UTXOToECMHCacheLock and make NewBlockTemplate use dag's write lock

* [NOD-406] Add tests to UTXO diffs without multiset
2019-11-14 17:40:05 +02:00
Ori Newman
c95a7b13a6 [NOD-379] Make a separate limit for block invs in getdata message (#465) 2019-11-14 13:45:24 +02:00
stasatdaglabs
1ce7f21026 [NOD-380] Implement MQTT client in api-server (#468)
* [NOD-380] Add MQTT to the project.

* [NOD-380] Add MQTT params to config.

* [NOD-380] Implement connecting to an mqtt broker.

* [NOD-380] Fix a comment.

* [NOD-380] Removed unnecessary option.

* [NOD-380] Added comments to MQTT functions.

* [NOD-380] Fix copy+paste error.

* [NOD-380] Make it so that all the mqtt flags must be passed together.

* [NOD-380] Use activeConfig instead of passing it everywhere.
2019-11-14 10:44:45 +02:00
Svarog
7d7df10493 [NOD-418] Added IsSpendable field to /utxos/ queries in API server (#466) 2019-11-13 16:25:10 +02:00
Dan Aharoni
8179862e0b [NOD-421] Fix DNSSeeder parsing error caused by NOD-386 (#464) 2019-11-13 12:49:58 +02:00
stasatdaglabs
6828f623b4 [NOD-395] Fix a crash in diffFromAcceptanceData caused by wrong order of iteration over blocks (463)
* [NOD-395] Write a test for the diffFromAcceptanceData crash.

* [NOD-395] Converted MultiBlockTxsAcceptanceData into a slice.

* [NOD-395] Fix failing test.

* [NOD-395] Populate multiBlockTxsAcceptanceData bottom-to-top.

* [NOD-395] Add comment to FindAcceptanceData.

* [NOD-395] Remove no-longer relevant note about probability in TestOrderInDiffFromAcceptanceData.
2019-11-13 12:28:52 +02:00
stasatdaglabs
2c88a5b2fe [NOD-413] Make "Max failed connection attempts reached" logs less frequent (#458)
* [NOD-413] Make "Max failed connection attempts reached" less frequent

* [NOD-413] Throttle only certain types of logs.

* [NOD-413] Add a comment for shouldWriteConnFailedLog.

* [NOD-413] Fix lint error.

* [NOD-413] Make ErrNoAddress a special type to support error wrapping.

* [NOD-413] Make throttledConnFailedLogInterval 10 minutes.

* [NOD-413] Move p2p errors into variables.

* [NOD-413] Reorganize throttled stuff to be next to each other.
2019-11-13 11:25:39 +02:00
Ori Newman
a7f08598f3 [NOD-416] Use errors.Is and add goroutine stack trace to HandlePanic (#459)
* [NOD-416] Use errors.Is and add goroutine stack trace to HandlePanic

* [NOD-416] Don't print goroutineStackTrace if it's nil
2019-11-13 11:20:20 +02:00
Dan Aharoni
83bad65d3a [NOD-419] Btcctl parsing error (introduced by NOD-386) (#462) 2019-11-12 14:25:28 +02:00
Dan Aharoni
1f35378a4d [NOD-386] Extract net parsing functionality to a shared place. (#453)
* [NOD-386] Extract net parsing functionality to a shared place.

* [NOD-386] Add extract ActiveNetParams to cmdconfig

* [NOD-386] Adding comments so go-vet won't shout at me

* [NOD-386] Rename package name to config

* [NOD-386] Rename commandConfig to configFlags

* [NOD-386] Rename function to ResolveNetwork

* [NOD-386] Fix renaming errors

* [NOD-386] Refactor network config to btcd level so APIserver and btcd could use it

* [NOD-386] Refactor network config to config package

* [NOD-386] Move ActiveNetParams to network section

* [NOD-386] Explictly return nil

* [NOD-386] Reuse activeNetParams from netwrok config

* [NOD-386] Set ActiveNetworkFlags instance to be global

* [NOD-386] Remove redundant newline

* [NOD-386] Init ActiveNetParams in address manager test

* [NOD-386] Add dnsseeder network config

* [NOD-386] Use ActiveConfig() method to access configuration
2019-11-12 10:51:36 +02:00
Dan Aharoni
39eab7a6d5 [NOD-373] Schnorr signature scheme (#451)
* [NOD-373] Implement Schnorr digital signatures and remove ECDSA (based on code from gcash/bchd)

* [NOD-374] Add new error to list; Update comments.

* [NOD-373] Remove leftovers of verifyMessage RPC command (which was deleted)

* [NOD-373] Remove redundant test, add Schnorr tests, and fix tests where needed

* [NOD-373] Fix tests and remove redundant ones

* [NOD-373] Refactor functions names

* [NOD-373] Remove empty line

* [NOD-373] Fix comments, rename functions to more meaningful names

* [NOD-373] Additional data in nonceRFC6979 should not be nil

* [NOD-373] Refactor function name

* [NOD-373] Add permalinks for links to bchd code
2019-11-12 10:09:38 +02:00
Dan Aharoni
9dd025d4da [NOD-408] Remove unimplemented redundant RPC command GetTxOutProof (#461) 2019-11-11 12:53:56 +02:00
Dan Aharoni
bb75ea5020 [NOD-414] Remove AES encryption/decryption from btcd (#460) 2019-11-11 11:01:02 +02:00
stasatdaglabs
8dbd4a2bed [NOD-411] Fix underflowing check for resending transactions. (#457) 2019-11-07 16:08:40 +02:00
Ori Newman
24305cda68 [NOD-385] Change confirmation calculation to be relative to the selected tip (#455)
* [NOD-385] Make confirmations be calculated as dag.selectedTip().blueScore - acceptingBlock.blueScore + 2

* [NOD-385] Fix comments

* [NOD-385] Make more explicit check in accepting block for selected tip

* [NOD-385] Put only non accepted transactions in areTxsInBlock

* [NOD-385] fetchSelectedTip only if needed
2019-11-07 13:42:25 +02:00
Ori Newman
770dfd147d [NOD-404] Calculate mass in API server (#452)
* [NOD-404] Calculate mass in API server

* [NOD-404] Fix uninitialized maps

* [NOD-404] Use txID instead of prevDBTransactionsOutput.Transaction.TransactionID
2019-11-07 10:27:12 +02:00
Ori Newman
a9ff9b0e70 [NOD-398] Change API server type HandlerError to work with errors instead of error strings (#454)
* [NOD-398] Change API server type HandlerError to work with errors instead of error strings

* [NOD-398] Rename OriginalError -> Cause and isHandleError -> ok
2019-11-06 16:58:58 +02:00
Ori Newman
3cc6f2d648 [NOD-384] Remove mass from rpc results (#449)
* [NOD-384] Remove mass from rpc results

* [NOD-384] Fix tests
2019-11-04 18:06:01 +02:00
stasatdaglabs
a8f0d7b05b [NOD-400] Fix ECHM cache crashing on concurrent access. (#450) 2019-11-04 17:38:01 +02:00
stasatdaglabs
13f06ca293 [NOD-399] Fix TxGen resending coinbase transactions. (#448) 2019-11-04 13:04:06 +02:00
Ori Newman
c88fa1492e [NOD-375] Move to pkg/errors (#447)
* [NOD-375] Move to pkg/errors

* [NOD-375] Fix tests

* [NOD-375] Make AreErrorsEqual a shared function
2019-11-04 11:24:12 +02:00
Ori Newman
40657a83f5 [NOD-344] Cache ECMH (#445)
* [NOD-134] Change newConnMtx to newConnReqMtx

* [NOD-344] Change ECMH cache size to 4e6

* [NOD-344] Refactor

* [NOD-344] Fix go.mod
2019-11-03 12:29:55 +02:00
Dan Aharoni
44dd58b461 [NOD-396] Fix updateAddedChainBlocks query to select all transactions and not the first one (#444) 2019-11-03 11:29:51 +02:00
Svarog
47891b17ab [NOD-392] If transaction is in mempool - don't try to get number of confirmations (#443) 2019-11-03 11:27:46 +02:00
Ori Newman
f7fbfbf5c4 [NOD-383] Fix updateAddedChainBlocks and updateRemovedChainHashes to update IsChainBlock and AcceptingBlockID appropriately 2019-10-31 15:47:30 +02:00
Ori Newman
0e278ca22b [NOD-350] Implement testnet faucet (#438)
* [NOD-350] Implement testnet faucet

* [NOD-350] Add JSON annotations to api server response types

* [NOD-350] Fix IP check query, update IP usage with upsert, and make IP a primary key

* [NOD-377] Remove redundant float conversion

* [NOD-377] Change not current database error message

* [NOD-377] change API route from /money_request to /request_money

* [NOD-377] Add a constant for 24 hours

* [NOD-377] Remove redundant call for getWalletUTXOSet()

* [NOD-377] Condition refactoring

* [NOD-377] Fix POST request to API server content type

* [NOD-350] Rename day -> timeBetweenRequests

* [NOD-377] Rename timeBetweenRequests -> minRequestInterval, timeBefore24Hours -> minRequestInterval

* [NOD-350] Rename file responsetypes -> response_types

* [NOD-350] Rename convertTxModelToTxResponse -> convertTxDBModelToTxResponse

* [NOD-350] Explicitly select blue_score in fetchSelectedTipBlueScore

* [NOD-350] Refactor and add comments

* [NOD-350] Make calcFee use MassPerTxByte

* [NOD-350] Convert IP column to varchar(39) to allow ipv6 addresses

* [NOD-350] Add comments to isFundedAndIsChangeOutputRequired

* [NOD-350] Remove approximateConfirmationsForCoinbaseMaturity

* [NOD-350] Fix comments
2019-10-31 11:59:56 +02:00
Ori Newman
c66fb294c8 [NOD-377] Don't disconnect from peers with finalized rendezvous point. Instead remove them from sync candidates. (#439) 2019-10-30 17:18:46 +02:00
stasatdaglabs
88b7e7ca03 [NOD-394] Add --cleanup to ./run-dev.sh (#441)
* [NOD-394] Rename --only-build to --no-run.

* [NOD-394] Allow --rm and --no-build to be run together with no-run.

* [NOD-394] Make --cleanup alias for --rm --no-run --no-build.

* [NOD-394] Fix typo in usage string.

* [NOD-394] Set docker/docker-compose.yaml to use devnet instead of testnet.
2019-10-30 17:12:48 +02:00
stasatdaglabs
a9b659a36f [NOD-393] Force docker-compose to actually cleanup before it runs when running ./run-dev.sh --rm. (#440) 2019-10-30 14:43:23 +02:00
stasatdaglabs
90fc6ba3e7 [NOD-376] Fix invalid orphan blocks causing their valid parents to be rejected (#436)
* [NOD-376] Made bad unorphaned blocks not reject the original block.

* [NOD-376] Fix wording in a comment.

* [NOD-376] Add a test to make sure that bad child blocks don't invalidate valid parent blocks.

* [NOD-376] Clarify comments and don't check PoW for child block (it's irrelevant for this test case).
2019-10-29 15:54:59 +02:00
stasatdaglabs
8ea97aa3fd [NOD-356] Add indication in getBlock that a block is an orphan. (#437) 2019-10-29 15:53:56 +02:00
Dan Aharoni
7c9f5a65d8 [NOD-374] Create transaction signer tool (#435)
* [NOD-374 ] Create transaction signer tool

* [NOD-374] Rename variables

* [NOD-374] Add network selection; Move error handling to control-flow function

* [NOD-374] Rename variables

* [NOD-374] Add new line after if blocks

* [NOD-374] Fix formatting error (gofmt)
2019-10-29 15:00:03 +02:00
stasatdaglabs
e2d3c4c821 [NOD-378] Add --no-build and --only-build to run-dev.sh. (#434) 2019-10-28 14:57:14 +02:00
Dan Aharoni
92578e2853 [NOD-368] correct text output of address generator (#433)
* [NOD-368] correct text output of address generator (address instead of public key)

* [NOD-368] add hash160 to output of genaddr

* [NOD-368] change encoding to hexadecimal

* [NOD-368] fix formatting
2019-10-24 18:00:06 +03:00
Dan Aharoni
3018c18616 [NOD-362] Calculate txgen fees by mass instead of size (#431)
* [NOD-362] Calculate txgen fee by mass

* formating

* [NOD-362] add varint size to calculation

* [NOD-362] rename variables

* [NOD-362] formating
2019-10-22 18:08:19 +03:00
Ori Newman
3ac9fa83c1 [NOD-367] Propagate transactions every 100 ms (#432) 2019-10-22 15:17:18 +03:00
Dan Aharoni
c5b0398dac [NOD-357] change finality interval (#430)
* change finality interval to 1000 ( ~16.6 minutes interval)

* [NOD-357] define finality interval in dagParams instead of using a constant.

* use dagParams for FinalityInterval instead of constant

* override parameter so test would pass on CI (Jenkins machine runs out of memory if we use 1000)

* formating the code
2019-10-16 15:32:10 +03:00
Ori Newman
76f23d8a9b [NOD-359] Calculate Transaction mass from previous scriptPubKeys (#429)
* [NOD-359] Calculate Transaction mass from previous scriptPubKeys

* [NOD-359] Add missing block errors
2019-10-15 13:03:16 +03:00
stasatdaglabs
089cee0e1d [NOD-352] Create a Dockerfile for APIServer (#428)
* [NOD-352] Created a Dockerfile for APIServer.

* [NOD-352] Removed unnecessary testing stuff from the APIServer and DNSSeeder Dockerfiles.
2019-10-13 14:07:44 +03:00
stasatdaglabs
982340456d [NOD-361] Fixed Nil dereference in connmgr/seed.go. (#427) 2019-10-08 13:04:07 +03:00
stasatdaglabs
13cf1f7715 [NOD-360] Renamed TestNet3 to TestNet. (#426) 2019-10-08 12:59:54 +03:00
stasatdaglabs
d99af7424c [NOD-353] Fixed passing wrong argument in handleGetRawTransaction. (#424)
* [NOD-353] Fixed passing wrong argument in handleGetRawTransaction.

* [NOD-353] Renamed mtx to msgTx.
2019-10-07 16:14:53 +03:00
Dan Aharoni
40ad9c5d2b [NOD-355] remove 'flushDbCache' and 'getBlockHash' rpc commands (#425)
* [NOD-355] remove 'flushDbCache' and 'getBlockHash' rpc commands

* [NOD-355] remove 'flushDbCache' and 'getBlockHash' from rpc server help
2019-10-07 14:07:20 +03:00
Ori Newman
9dfc3091b4 [NOD-134] Don't connect to an address from the same 16 CIDR (#423)
* [NOD-134] Don't connect to an address from the same 16 CIDR

* [NOD-134] Rename outboundPeerConnected variables

* [NOD-134] Change newConnMtx to newConnReqMtx
2019-10-06 15:53:58 +03:00
Ori Newman
e6a4ed04f3 [NOD-338] recover indexer if didnt work for a while (#422)
* [NOD-338] Recover indexer if it didn't work for a while

* [NOD-338] Recover indexer if it didn't work for a while

* [NOD-338] Recover indexer if it didn't work for a while

* [NOD-338] Add tests and move blockidhash.go to blockdag package

* [NOD-338] Delete index current block id when dropping index, and do some refactoring

* [NOD-338] Change comments

* [NOD-338] Change recover error messages

* [NOD-338] Fix comments

* [NOD-338] Fix comments and fix test name
2019-09-26 18:19:58 +03:00
stasatdaglabs
e3aa8d65dc [NOD-337] In CheckTransactionSanity make max mass of transaction to be half of block max mass (#421)
* [NOD-337] In CheckTransactionSanity, made max mass of transaction to be half of block max mass.

* [NOD-337] Added a comment for MaxMassPerTx.

* [NOD-337] Fixed a couple of comments.
2019-09-24 14:27:04 +03:00
stasatdaglabs
ece0fb83e8 [NOD-342] Renamed CHAN to BDAG. (#420) 2019-09-24 11:42:52 +03:00
stasatdaglabs
683830d574 [NOD-341] Made ChainChanged not fire if the acceptance index is off. (#419) 2019-09-22 17:59:20 +03:00
stasatdaglabs
c5108a4abd [NOD-276] Extracted p2p onXXX methods to separate files. (#418) 2019-09-22 17:40:10 +03:00
stasatdaglabs
40342eb45a [NOD-275] Split rpcserver.go to separate files (#417)
* [NOD-275] Moved getBlockTemplate and related functionality to a separate file.

* [NOD-275] Started moving handlers to separate files.

* [NOD-275] Fixed merge errors.

* [NOD-275] Moved all handlers out of rpcserver.go.

* [NOD-275] Moved non-shared functions out of rpcserver.go.

* [NOD-275] Moved handleGetAllManualNodesInfo to a separate file.

* [NOD-275] Moved handlers out of rpcwebsocket.go to separate files.

* [NOD-275] Fixed import error.

* [NOD-275] Renamed all handler files to include underscores.

* [NOD-275] Moved common rpc helper functions to common.go.
2019-09-22 16:41:37 +03:00
stasatdaglabs
adf4b4380e [NOD-289] Implement API-Server bootstrapping and booting after downtime (#408)
* [NOD-289] Implemented database isCurrent checking and connection.

* [NOD-289] Added GetChainFromBlock to RPCClient.

* [NOD-289] Limited the amount of blocks in GetChainFromBlockResponse.

* [NOD-289] Fixed various issues that were keeping GetChainFromBlocks from working properly.

* [NOD-289] Created blockloop.go.

* [NOD-289] Updated go.mod after merge.

* [NOD-289] Implemented collection of current selected parent chain.

* [NOD-289] Fixed test. Reverted not deleting utxoDiffData from the DB.

* [NOD-289] Implemented GetBlocks.

* [NOD-289] Added comment to BlockHashesFrom.

* [NOD-289] Added GetBlocks to rpcclient.

* [NOD-289] Added verboseBlocks to GetBlocks.

* [NOD-289] Implemented block insertion.

* [NOD-289] Added AUTO_INCREMENT to tables that were missing it.

* [NOD-289] Made gasLimit in subnetwork nullable.

* [NOD-289] Renamed transactions_outputs to transaction_outputs.

* [NOD-289] Fixed weird coinbase behavior in vin.

* [NOD-289] Made collectCurrentBlocks start from the most recent startHash.

* [NOD-289] Added IsChainBlock to GetBlockVerboseResult.

* [NOD-289] Implemented adding a block from onBlockAdded.

* [NOD-289] Added removedParentChainHashes to getChainFromBlock.

* [NOD-289] Implemented updating the selected parent chain from onChainChanged.

* [NOD-289] Implemented some initial logic for updating the UTXO.

* [NOD-289] Fixed merge errors.

* [NOD-326] Fixed some more merge errors.

* [NOD-289] Added error handling for missing required records.

* [NOD-289] Implemented handling removedChainHashes.

* [NOD-289] Implemented handling addedChainBlocks.

* [NOD-289] Fixed incorrect coinbase check.

* [NOD-289] Implemented inserting the transaction output address.

* [NOD-289] Added updating block.IsChainBlock.

* [NOD-289] Split insertBlock into many small functions.

* [NOD-289] Split updateSelectedParentChain into smaller functions.

* [NOD-289] Fixed pointer errors.

* [NOD-289] Fixed a bad exists check.

* [NOD-289] Fixed a couple of small bugs.

* [NOD-289] Fixed a TxID/Hash mixup.

* [NOD-289] Added block/tx mass to getBlockVerboseResponse.

* [NOD-289] Renamed blockLoop.go to sync.go. Added comments.

* [NOD-289] Deleted apiserver README.

* [NOD-289] Fixed golint errors.

* [NOD-289] Renamed findMostRecentBlockHash to findHashOfBluestBlock.

* [NOD-289] Fixed style in syncBlocks and fixed a comment.

* [NOD-289] Copied NewErrorFromDBErrors over from NOD-324.

* [NOD-289] Created a couple of utils to make error handling with gorm slightly less painful.

* [NOD-289] Added error handling for database calls.

* [NOD-289] Fixed some more style/comments.

* [NOD-289] Fixed comments.

* [NOD-289] Renamed TransactionInput.TransactionOutput to TransactionInput.PreviousTransactionOutput.

* [NOD-289] Added a commends about pagination in getBlocks and getChainFromBlock.

* [NOD-289] Removed the coinbase field from Vin.

* [NOD-289] Deferred handling chainChangedMsgs until we have the appropriate data.

* [NOD-289] Optimized queries in updateRemovedChainHashes and updateAddedChainBlocks.

* [NOD-289] Optimized queries in insertBlockParents.

* [NOD-289] Optimized queries in insertTransactionInput.

* [NOD-289] Split Where calls to separate lines.

* [NOD-289] Fixed merge errors.

* [NOD-289] Exited early from insertBlockParents if we're the genesis block.

* [NOD-289] Improved nextChainChangedChan mechanism.

* [NOD-289] Fixed the above sync mechanism a bit.

* [NOD-289] Renamed IsDBRecordNotFoundError to HasDBRecordNotFoundError and IsDBError to HasDBError.

* [NOD-289] Replaced old error handling for db errors with the lovely new stuff.

* [NOD-289] Exited early if we already inserted a block. This saves us checking if a record already exists for some record types.

* [NOD-289] Decoupled syncBlocks from syncSelectedParentChain.

* [NOD-289] Made a comment more explicit.

* [NOD-289] Extracted net resolution to a separate function.

* [NOD-289] Extracted syncing to a separate function.

* [NOD-289] Fixed a comment.

* [NOD-289] Fixed merge erros.

* [NOD-289] Fixed a couple of bugs.

* [NOD-289] Fixed another bug.

* [NOD-289] Extracted ChainChangedMsg conversion to a separate function.

* [NOD-289] Optimized queries in canHandleChainChangedMsg.

* [NOD-289] Moved the sync function closer to its call site.

* [NOD-289] Renamed HasDBRecordNotFoundError to IsDBRecordNotFoundError.

* [NOD-289] Used count instead of first.

* [NOD-289] Renamed address to hexAddress.
2019-09-22 13:14:51 +03:00
Ori Newman
7371120481 [NOD-333] Make ExtractScriptPubKeyAddrs return single address (#415)
* [NOD-333] Make ExtractScriptPubKeyAddrs return single address

* [NOD-333] Remove reference to required signatures from ExtractScriptPubKeyAddrs
2019-09-19 11:19:51 +03:00
stasatdaglabs
1064b5009d [NOD-315] Implement acceptance index (#413)
* [NOD-315] Created acceptanceindex.go including boilerplate.

* [NOD-315] Disallowed calls to notifyChainChanges and getChainFromBlock if the acceptance index is not on.

* [NOD-315] Implemented the acceptance index.

* [NOD-315] Fixed serialization/deserialization. Added test.

* [NOD-315] Fixed/added comments.

* [NOD-315] Fixed copy/paste errors.

* [NOD-315] Added an empty line for readability.
2019-09-19 10:38:33 +03:00
stasatdaglabs
850876e6a7 [NOD-335] Don't print stack-trace when cli flags are invalid (#416)
* [NOD-335] Made it not write a stack trace if the command line flags are wrong.

* [NOD-335] Fixed panic not printing the right error.

* [NOD-335] Removed code duplication.
2019-09-18 17:22:32 +03:00
Svarog
d4083cbdbe [NOD-309] post transaction (#403)
* [NOD-319] Add query params to api server route handler

* Temp commit

* [NOD-322] Make database.DB a function

* [NOD-322] Move context to be the first parameter in all functions

* [NOD-322] Set db to nil on database.Close()

* [NOD-322] Tidy go.mod/go.sum

* [NOD-323] Move rpc-client to separate package

* [NOD-309] Add controller for POST /transaction

* [NOD-309] Added route for POST /transaction

* [NOD-309] in POST /transaction: Forward reject errors to client

* [NOD-309] Added custom client messages to errors in POST /transaction

* [NOD-309] Use utils.NewInternalServerHandlerError where appropriate
2019-09-18 16:09:48 +03:00
Ori Newman
47c5eddf38 [NOD-329] Separate connect timeout and request timeout to JSON-RPC server (#411) 2019-09-18 15:01:31 +03:00
Ori Newman
f6a6508eff [NOD-328] Make API server mainHandler return an object (#412) 2019-09-18 14:47:59 +03:00
Ori Newman
a036618b44 [NOD-324] Properly handle GORM errors in API server (#409)
* [NOD-324] Properly handle GORM errors in API server

* [NOD-324] Handle RecordNotFound error in GetBlockByHashHandler

* [NOD-324] Make a separate function for NewErrorFromDBErrors
2019-09-18 14:09:07 +03:00
Ori Newman
2429b623fc [NOD-327] Add --migrate cli flag to API server (#407)
* [NOD-327] Add --migrate cli flag to API server

* [NOD-327] Change log messages

* [NOD-327] Remove `required` flag from API server RPC CLI arguments

* [NOD-327] Add database version in migrations logs
2019-09-18 13:51:20 +03:00
Ori Newman
f4850b9e7a [NOD-330] Use BTCD logs for gorm (#410) 2019-09-18 11:47:54 +03:00
Ori Newman
e81ac5f19e [NOD-307] Implement get blocks for api server (#405)
* [NOD-307] Implement API-Server GET /blocks

* [NOD-307] Implement API-Server GET /blocks

* [NOD-307] Add comments to exported constants

* [NOD-307] Flatten GET query values and check that 'order' value is valid

* [NOD-307] Validate order values in GetBlocksHandler

* [NOD-307] Add convertQueryParamToInt function
2019-09-16 16:53:57 +03:00
Ori Newman
31ccedf136 [NOD-325] Enable separate error messages for logging and client (#406)
* [NOD-325] Enable separate error messages for logging and client

* [NOD-325] Add json annotation to clientError
2019-09-16 13:26:05 +03:00
Svarog
502b510ccd [NOD-322] Minor api-server refactoring. (#401)
* [NOD-322] Make database.DB a function

* [NOD-322] Move context to be the first parameter in all functions

* [NOD-322] Set db to nil on database.Close()

* [NOD-322] Tidy go.mod/go.sum

* [NOD-322] Use http package const + message for StatusInternalServerError
2019-09-15 12:32:12 +03:00
stasatdaglabs
369031f963 [NOD-326] Replaced the UTXOs table with TransactionOutputs.isSpent (#404)
* [NOD-326] Replaced UTXO table with TransactionOutput.IsSpent.

* [NOD-326] Fixed merge errors.
2019-09-15 12:04:51 +03:00
Ori Newman
a789680db1 [NOD-314] change pkscript to scriptpubkey (#400)
* [NOD-314] Change everywhere PkScript to ScriptPubKey

* [NOD-314] Change everywhere PkScript to ScriptPubKey

* [NOD-314] Rename pkPops -> scriptPubKeyPops
2019-09-15 11:09:36 +03:00
Svarog
90bda69931 [NOD-323] Move rpc-client to separate package (#402) 2019-09-15 10:21:42 +03:00
Svarog
9647cb3e08 [NOD-318] Upgrade everything to Go1.13 (#393) 2019-09-09 15:57:31 +03:00
Ori Newman
79c9060909 [NOD-308] Implement API-Server GET /fee-estimates (#399) 2019-09-09 11:21:56 +03:00
Svarog
20206789e0 [NOD-299] Add waitgroup to wait for all spawns to complete before calling teardown (#385)
* [NOD-299] Add waitgroup to wait for all `spawn`s to complete before calling teardown

* [NOD-299] Restore spawn on teardown + mark spawn done in the correct thread
2019-09-09 11:02:31 +03:00
Ori Newman
1ddae35277 [NOD-305] Implement API-Server GET /utxos/{address} (#398)
* [NOD-305] Implement API-Server GET /utxos/{address}

* [NOD-305] Add accepting block blue score to the resulted utxo
2019-09-08 16:58:28 +03:00
Ori Newman
75a8c6459a [NOD-306] Implement API-Server GET /block/{hash} (#397)
* [NOD-306] Implement API-Server GET /block/{hash}

* [NOD-306] Validate that hash string is a valid hex

* [NOD-306] Unite invalid hash errors
2019-09-08 16:09:09 +03:00
Ori Newman
7fc2430ab1 [NOD-304] Implement get transactions by address for api server (#395)
* [NOD-303] Implement get transactions by address for API server

* [NOD-304] Implement get transactions by address

* [NOD-304] Implement get transactions by address

* [NOD-304] Use structs for where if possible

* [NOD-304] Auto increment IDs

* [NOD-304] Make defaultGetTransactionsLimit constant

* [NOD-304] Delete db directory

* [NOD-304] change db var to query

* [NOD-304] Extract route handle function from addRoutes

* [NOD-304] Order transactions by ID

* [NOD-304] Add error for passing arrays to GET
2019-09-08 10:52:03 +03:00
Ori Newman
cf9af0fb5d [NOD-320] Make txgen explicitly skip mempool transactions (#396) 2019-09-05 16:14:55 +03:00
Ori Newman
db6d6293c7 [NOD-319] Add query params to api server route handler (#394) 2019-09-04 17:34:36 +03:00
Ori Newman
ae25ec2e6b [NOD-303] Implement get transaction by id for api server (#391)
* [NOD-303] Implement get transaction by id for api server

* [NOD-303] Make routeParamTxID a constant

* [NOD-303] Change database is not current error.

* [NOD-303] Add ID to TransactionInput and TransactionOutput models

* [NOD-303] Change transactions_outputs table name to transaction_outputs and transactions_inputs to transaction_inputs

* [NOD-303] Add json annotations to transaction response types

* [NOD-303] Split server package

* [NOD-303] Add GetTransactionByHashHandler

* [NOD-303] Add comments to exported functions and variables

* [NOD-303] Put response types in a separate file

* [NOD-303] Rename functions
2019-09-03 15:54:59 +03:00
Svarog
7521545682 [NOD-311] Removed JSONRPCfyer (#392) 2019-09-03 11:24:25 +03:00
Ori Newman
169e96e851 [NOD-310] Implement REST server in API server (#389)
* [NOD-310] Implement REST server in API server

* [NOD-310] MetaData -> Metadata

* [NOD-310] Make custom context methods instead of custom request functions

* [NOD-310] change "Request ID" prefix to "RID" and convert to apiServerContext with newAPIServerContext everywhere
2019-09-01 17:03:43 +03:00
Ori Newman
893b8a88c8 [NOD-312] Change defaultMinRelayTxFee to 1 satoshi per byte (#390) 2019-09-01 15:21:24 +03:00
Svarog
c60711ab15 [NOD-302] Accept ConnectionEstablished when Pending is expected in TestRetryPermanent (#388) 2019-08-29 15:27:03 +03:00
Ori Newman
1b00e01030 [NOD-301] Don't sync with peer if the rendezvous point is below finality (#387)
* [NOD-301] Don't sync with peer if the rendezvous point is below finality

* [NOD-301] Add block hash and peer address for the warn message

* [NOD-301] Fix perrLog.Warnf arguments order
2019-08-29 10:47:05 +03:00
Ori Newman
f0c80905eb [NOD-300] If node has invalid ancestor set the according status in blockindex (#386)
* [NOD-300] If node has invalid ancestor set the according status in blockindex

* [NOD-300] Test that status is also updated for grand child of an invalid block

* [NOD-300] change make(blockSet) to newSet()
2019-08-28 17:44:33 +03:00
stasatdaglabs
b07a118431 [NOD-292] When writing block to database - also create record in block index (#376)
* [NOD-292] In accept.go, made dbStoreBlock and flushToDB occur within the same transaction.

* [NOD-292] Implemented processing blocks that were not validated on BTCD start.

* [NOD-292] Fixed processing logic on init. Added a test for it.

* [NOD-292] Fixed some comments.

* [NOD-292] Made unlocks deferred in a couple of places.

* [NOD-292] Made unprocessed block reprocess via ProcessBlock rather than maybeAcceptBlock.

* [NOD-292] Fixed grammar in comment. Added an explanation to TestAcceptingInInit.

* [NOD-292] Split flushToDB into two versions.

* [NOD-292] Fixed a bad assignment.

* [NOD-292] Fixed bad spacing.
2019-08-28 14:52:57 +03:00
Ori Newman
0ae06cd277 [NOD-297] fix onchainchanged on rpcclient (#384)
* [NOD-297] Fix onChainChanged on rpcclient

* [NOD-285] create gorm models for db (#378)

* [NOD-285] Map API-Server database using GORM

* [NOD-285] Add accepting block to transactions and blocks models, and remove accepting block model

* [NOD-285] Define model relations

* [NOD-285] Fix many to many for Transaction and Block models

* [NOD-285] Remove redundant main file

* [NOD-296] Send SyncMgr.SubmitBlock errors as rpc errors (#381)

* [NOD-296] Send SyncMgr.SubmitBlock errors as rpc errors

* [NOD-296] Add error message prefix

* [NOD-298] Add comments to gorm models (#382)

* [NOD-294] Fix golint in deploy.sh and fix all lint warnings (#380)

* [NOD-294] Fix golint in deploy.sh and fixed all lint errors

* [NOD-294] Fix typos in comments

* [NOD-294] Convert VirtualForTest into alias of *virtualBlock

* [NOD-294] Fixed some more typos in comments

* [NOD-295] Limit the length of GetData to 50 (#383)

* [NOD-295] Fixed bad break condition in addInvsToGetDataMessageFromQueue.

* [NOD-295] Fixed the fix for bad break condition in addInvsToGetDataMessageFromQueue.

* [NOD-295] Made the check for max invs refer to invsNum instead of MaxInvPerGetDataMsg.

* [NOD-297] Fix onChainChanged on rpcclient

* [NOD-286] Implement API-Server base structure (#379)

* [NOD-286] Implement API-Server base structure

* [NOD-286] Add rpc user and password as command line arguments

* [NOD-286] Make log directory a CLI argument

* [NOD-286] Add db login details as CLI arguments

* [NOD-297] Fix onChainChanged on rpcclient and server

* [NOD-297] Fix variables and functions names

* [NOD-297] Fix AcceptedTxIds -> AcceptedTxIDs
2019-08-28 12:52:07 +03:00
Ori Newman
ed9165f533 [NOD-286] Implement API-Server base structure (#379)
* [NOD-286] Implement API-Server base structure

* [NOD-286] Add rpc user and password as command line arguments

* [NOD-286] Make log directory a CLI argument

* [NOD-286] Add db login details as CLI arguments
2019-08-27 16:19:01 +03:00
stasatdaglabs
c73113a12e [NOD-295] Limit the length of GetData to 50 (#383)
* [NOD-295] Fixed bad break condition in addInvsToGetDataMessageFromQueue.

* [NOD-295] Fixed the fix for bad break condition in addInvsToGetDataMessageFromQueue.

* [NOD-295] Made the check for max invs refer to invsNum instead of MaxInvPerGetDataMsg.
2019-08-27 13:09:36 +03:00
Svarog
480b2ca07c [NOD-294] Fix golint in deploy.sh and fix all lint warnings (#380)
* [NOD-294] Fix golint in deploy.sh and fixed all lint errors

* [NOD-294] Fix typos in comments

* [NOD-294] Convert VirtualForTest into alias of *virtualBlock

* [NOD-294] Fixed some more typos in comments
2019-08-27 12:00:23 +03:00
Ori Newman
c72b914050 [NOD-298] Add comments to gorm models (#382) 2019-08-27 11:48:43 +03:00
Ori Newman
5cf7f01d3f [NOD-296] Send SyncMgr.SubmitBlock errors as rpc errors (#381)
* [NOD-296] Send SyncMgr.SubmitBlock errors as rpc errors

* [NOD-296] Add error message prefix
2019-08-27 11:25:45 +03:00
Ori Newman
552a5917c2 [NOD-285] create gorm models for db (#378)
* [NOD-285] Map API-Server database using GORM

* [NOD-285] Add accepting block to transactions and blocks models, and remove accepting block model

* [NOD-285] Define model relations

* [NOD-285] Fix many to many for Transaction and Block models

* [NOD-285] Remove redundant main file
2019-08-27 11:25:07 +03:00
stasatdaglabs
5c14719f14 [NOD-295] Capped amount of invs in a getData message. (#377) 2019-08-26 15:12:55 +03:00
Svarog
d2353a189a [NOD-291] Remove database check from dag.BlockExists (#375) 2019-08-25 15:00:16 +03:00
Ori Newman
4fcd705ae3 [NOD-290] Remove block_id from transactions table (#374)
* [NOD-290] Remove block_id from transactions table

* [NOD-290] Remove block_id foreign key from transactions table
2019-08-25 10:23:03 +03:00
Ori Newman
744c17b4c8 [NOD-280] Create database for API server (#373)
* [NOD-280] Create database for API server

* [NOD-280] Rename public_key_script to pk_script

* [NOD-280] Change indexes names

* [NOD-280] Add accepting block to blocks and transactions table and remove accepting_blocks table

* [NOD-280] Add readme

* [NOD-280] Change VARCHAR(32) to CHAR(64)

* [NOD-280] Rename location_in_block to index
2019-08-22 17:21:09 +03:00
stasatdaglabs
e2eca24b33 [NOD-282] Fixed Telegram messages not being sent. (#371) 2019-08-22 13:41:48 +03:00
stasatdaglabs
36d5ac189f [NOD-283] Fixed a crash in notifyChainChanged. (#372) 2019-08-22 13:23:30 +03:00
stasatdaglabs
1a569c7bd7 [NOD-270] Implement NotifyChainUpdates api call (#368)
* [NOD-270] Added notifyChainChanges and related commands.

* [NOD-270] Added NTChainChanged to blockdag.

* [NOD-270] Implemented collection and sending of ChainChanged notifications.

* [NOD-270] Fixed an improperly named test.

* [NOD-270] Added a test: TestChainChangedNotification.

* [NOD-270] Fixed a couple copy+paste errors.

* [NOD-270] Added a couple of comments for TestChainChangedNotification.

* [NOD-270] Fixed formatting error.

* [NOD-270] Fixed a comment.

* [NOD-270] Uncoupled chain updates inside blockdag from the concept of a notification.

* [NOD-270] Removed intermediary ChainUpdates object from ChainChangedNotificationData.
2019-08-21 12:58:32 +03:00
Ori Newman
6bb53eaae3 [NOD-256] add error log (#369)
* [NOD-256] Add error log

* [NOD-256] Add error log

* [NOD-256] Fix typo and comment

* [NOD-256] Remove btclog dir

* [NOD-256] Format project

* [NOD-256] Add error log files

* [NOD-256] Add an option to add a log file to write into to an existing backend logger

* [NOD-256] Get rid of redundant logs initialization

* [NOD-256] rename initLogRotators to initLog

* [NOD-256] Get rid ExampleSignTxOutput and convert ExampleBlockDAG_ProcessBlock to a regular test

* [NOD-256] Show error message if os.Exiting from initLog
2019-08-21 11:26:21 +03:00
Svarog
747a9bb944 [NOD-278] Added default for MinRelayTxFee (#367) 2019-08-19 17:52:58 +03:00
Ori Newman
d2daf334a5 [NOD-241] Implement lower resolution peer rendezvous point discovery (#353)
* [NOD-241] Implement lower resolution peer rendezvous point discovery

* [NOD-241] Implement lower resolution peer rendezvous point discovery

* [NOD-241] Find exact rendezvous point

* [NOD-241] Find exact rendezvous point

* [NOD-241] Fix tests

* [NOD-241] Remove hash stop from MsgBlockLocator and add tests to MsgBlockLocator and MsgGetBlockLocator

* [NOD-241] Change everywhere startHash to hashStart and change comments

* [NOD-241] Fix locateBlockNodes to stop at hashStop

* [NOD-241] Formatted locatorSummary.

* [NOD-241] Fix node reversal

* [NOD-241] Fix hash start and hash stop order, and don't include startNode in dag.blockLocator

* [NOD-241] rename locateBlockNodes -> getBlueBlocksBetween and add a comment to it

* [NOD-241] change hash start to start hash and hash stop to stop hash

* [NOD-241] Move block locator stuff to a different file

* [NOD-241] Rename msggetblocks.go to msggetblockinvs.go

* [NOD-241] Format project

* [NOD-241] Rename rpcserverSyncManager.LocateHeaders to GetBlueBlocksHeadersBetween

* [NOD-241] Move the logic of finding the highest shared block to OnBlockLocator

* [NOD-241] Rename chainHeight -> nextChainHeight

* [NOD-241] Fix typo in comment
2019-08-19 15:35:13 +03:00
stasatdaglabs
70737e4e94 [NOD-264] Implement tx-selection algorithm (#358)
* [NOD-264] Implemented calcTxSelectionValue.

* [NOD-264] Fixed bad subnetworkID in calcTxSelectionValue.

* [NOD-264] Implemented sorting the txDescs by value.

* [NOD-264] Got rid of txPrioItem.

* [NOD-264] Moved transaction selection to a separate file.

* [NOD-264] Renamed the result object to txsForBlockTemplate.

* [NOD-264] Implemented tx selection.

* [NOD-264] Fixed trying to get the gas limit for built-in subnetworks.

* [NOD-264] Wrote comments where appropriate.

* [NOD-264] Moved calcTxSelectionValue to the mining package. (Non-mining nodes shouldn't be forced to calc selection value for every transaction)

* [NOD-264] Wrote a test for selectTxs.

* [NOD-264] Fixed a comment.

* [NOD-264] Fixed misunderstood test.

* [NOD-264] Added zero fee check. Added a couple more tests.

* [NOD-264] Added probabilistic tests. Fixed a couple of bugs in tx selection.

* [NOD-264] Fixed tests with missing fees.

* [NOD-264] Added a test over a range of txs with different gas/mass.

* [NOD-264] Added expected probability to the rest of the test cases.

* [NOD-264] Tightened bounds in probability test.

* [NOD-264] Fixed values in probabily test.

* [NOD-264] Added a comments for alpha and rebalanceThreshold.

* [NOD-264] Fixed a couple of comments, renamed result to txsForBlockTemplate.

* [NOD-264] Removed an irrelevant comment. Changed Tracef to Warnf in some logs.

* [NOD-264] Renamed selectionValue -> txValue.

* [NOD-264] Moved rebalancing to the start of the tx selection loop.

* [NOD-264] Added overflow check for gasUsage.

* [NOD-264] Renamed blockSigOps and blockMass to totalSigOps and totalMass.

* [NOD-264] Removed the need to pass usedCount to reblanaceCandidates. Also relaxed bounds in a test.

* [NOD-264] Split selectTxs into smaller functions. Also relaxed bounds in a test some more.

* [NOD-264] Added a comment for findTx.

* [NOD-264] Ordered candidateTxs by subnetwork instead of txValue.

* [NOD-264] Disallowed zero tx fees in mempool and config. Renamed iterateCandidateTxs to populateTemplateFromCandidates.

* [NOD-264] Changed isFinalizedTransaction log level from Warn to Debug.

* [NOD-264] Removed references to SigOps in txSelection.

* [NOD-264] Removed SigOps validation. Validating mass should suffice.

* [NOD-264] Renamed wasUsed to isMarkedForDeletion.

* [NOD-264] Renamed markCandidateTxUsed to markCandidateTxForDeletion.

* [NOD-264] Made some probabilistic tests less likely to fail when they shouldn't.

* [NOD-264] Added a message warning people about probabilistic tests.

* [NOD-264] Rephrased a comment about rebalanceThreshold.

* [NOD-264] Removed IsCoinBase, CheckTransactionInputsAndCalulateFee, and ValidateTransactionScripts from txSelection.

* [NOD-264] Removed a condition that is no longer relevant.

* [NOD-264] "which's" -> "whose"

* [NOD-264] Removed wasteful preallocations.

* [NOD-264] Fixed a comment referring to "used" transactions.
2019-08-19 12:08:48 +03:00
stasatdaglabs
5f49115cac [NOD-269] Implement GetChainFromBlock api-call (#364)
* [NOD-269] Added a skeleton for getChainFromBlock.

* [NOD-269] Made startHash and includeBlocks optional.

* [NOD-269] Implemented chainBlock collection.

* [NOD-269] Extracted GetBlockVerboseResult building to its own method.

* [NOD-269] Implemented the IncludeBlocks part of GetChainFromBlock.

* [NOD-269] Added a comment for NewGetChainFromBlockCmd.

* [NOD-269] Made IsInSelectedPathChain return an error.

* [NOD-269] Fixed a very wrong comment.

* [NOD-269] Made SelectedPathChain allocate only the required amount of space.

* [NOD-269] Renamed pathChain to parentChain.

* [NOD-269] Split handleGetChainFromBlock to separate functions.

* [NOD-269] Fixed some grammar.
2019-08-18 13:31:54 +03:00
stasatdaglabs
534cb2bf5b [NOD-272] In CheckTransactionSanity, multiply mass limit check by massPerTxByte. (#365) 2019-08-15 15:05:26 +03:00
stasatdaglabs
187c525667 [NOD-234] In getBlockTemplate check if node is current (#362)
* [NOD-234] Added an IsCurrent check to handleGetBlockTemplate.

* [NOD-234] Removed IsCurrent check from handleGetBlockTemplateRequest. Added an explanation for why we're checking the chainHeight.

* [NOD-234] Added ShouldMineOnGenesis to the IsCurrent check.

* [NOD-234] Flipped && operands to fail fast.
2019-08-14 15:09:35 +03:00
stasatdaglabs
6032727965 [NOD-268] Implement selectedParentChain-related structures in btcjson. (#363) 2019-08-13 11:15:51 +03:00
stasatdaglabs
bb3f23b6dc [NOD-240] Get rid of all references for wallet related RPC commands (#361)
* [NOD-240] Removed references to the wallet in rpcserver.go.

* [NOD-240] Began removing btcwalletxxx.go.

* [NOD-240] Got rid of rpcclient/wallet.go and walletsvrcmds.go.

* [NOD-240] Moved GetBestBlockResult to dagsvrresults.go.

* [NOD-240] Finished removing walletsvrXXX.go.

* [NOD-240] Removed wallet stuff from btcctl.

* [NOD-240] Removed a few last things that I've missed.
2019-08-12 09:54:07 +03:00
stasatdaglabs
e5485ac5e6 [NOD-262] Renamed all instances of GetBlocks to GetBlockInvs. (#359) 2019-08-11 12:25:29 +03:00
stasatdaglabs
594a209f83 [NOD-263] Rename all instances of hashStop to stopHash. (#360) 2019-08-11 11:20:21 +03:00
Svarog
9981ce7adb [NOD-255] When adding orphans during netsync process - report only to debug log, unless number of orphans > k*2 (#357)
* [NOD-255] When orphan blocks arrive from netsync - don't write log unless we are in Debug

* [NOD-255] If there are more than K*2 orphans in pool - report as a potential problem anyway

* [NOD-255] Update comment to explain the K*2 figure
2019-08-07 11:30:32 +03:00
Svarog
49ac97c7db [NOD-265] Return an empty array for searchRawTransactions when no txs were found (#356) 2019-08-06 17:54:56 +03:00
stasatdaglabs
bfdf7a2cf2 [NOD-237] Implement transaction mass (#355)
* [NOD-237] Implemented transaction mass.

* [NOD-237] Added transaction mass validation to the mempool.

* [NOD-237] Made blockMaxMassMax not rely on MaxBlockPayload.

* [NOD-237] Added comments describing the new constants in validate.go.

* [NOD-237] Changed the default blockmaxmass to 10,000,000.

* [NOD-237] Fixed a comment that erroneously didn't refer to mass.

* [NOD-237] Added comments to ValidateTxMass and CalcTxMass.

* [NOD-237] Renamed "size" to "byte". Made validateBlockMass exit early if validation fails. Fixed unit names in comments. In CalcTxMass, moved summing of mass to the bottom of the function.

* [NOD-237] Instead of ErrMassTooHigh, renamed ErrBlockTooBig and ErrTxTooBig. Replaced wire.MaxBlockPayload with MaxMassPerBlock.

* [NOD-237] Fixed sanity checks related to block size in commands.

* [NOD-237] To use up less memory during testing, made the mass in the "too big" test come from pkScripts rather than input bytes.

* [NOD-237] Added an overflow check to validateBlockMass.
2019-08-05 16:04:24 +03:00
Ori Newman
54b681460d [NOD-244] Don't validate subnetwork registry transactions when they are being registered (#354) 2019-08-01 12:40:01 +03:00
stasatdaglabs
2147d16c1f [NOD-220] When handling an INV message, made it skip tx invs that are currently being requested. (#352) 2019-08-01 12:31:40 +03:00
Ori Newman
7c1cb47bd0 [NOD-249] Change WaitGroup to use channels (#350)
* [NOD-248] Implement waitgroup to enable waiting while adding

* [NOD-248] fix waitGroup.done() error message

* [NOD-248] atomically read wg.counter

* [NOD-248] return lowPriorityMutex

* [NOD-249] Add tests to waitgroup

* [NOD-249] Change waitgroup to use channels

* [NOD-249] Format project

* [NOD-249] Add comments and logs to waitGroup, and remove timeouts from
prioritymutex_test.go

* [NOD-249] Fix comments
2019-07-28 18:23:26 +03:00
stasatdaglabs
6acfa18d7c [NOD-220] Fixed handleSearchRawTransactions trying to get confirmations of mempool transactions, which made txgen crash on start. (#351) 2019-07-28 13:02:16 +03:00
Svarog
f0a675162c [NOD-253] Use spawn instead of go (#349) 2019-07-22 12:06:13 +03:00
Svarog
7a4deb6f18 [NOD-251] Use bluest parent anywhere validating difficulty, instead of selectedTip (#348) 2019-07-17 14:46:58 +03:00
Svarog
96842353de [NOD-250] Calculate UTXOCommitment when loading DAG + add UTXOCommitment to getBlockDagInfo (#347) 2019-07-16 16:53:28 +03:00
Svarog
5ce8875ce0 [NOD-243] Optimize validateGasLimit (#346)
* [NOD-243] Optimize validateGasLimit to only remember the current subnetwork gasUsage

* [NOD-243] Fix a typo

* [NOD-243] Rephrased comment
2019-07-16 11:07:58 +03:00
Ori Newman
812819e92f [NOD-248] Implement waitgroup to enable waiting while adding (#345)
* [NOD-248] Implement waitgroup to enable waiting while adding

* [NOD-248] fix waitGroup.done() error message

* [NOD-248] atomically read wg.counter

* [NOD-248] return lowPriorityMutex
2019-07-14 18:50:09 +03:00
Ori Newman
5cb536643e [NOD-236] Add secondary address to txgen (#341)
* [NOD-236] Add secondary address to txgen

* [NOD-236] Add description to secondary address cli argument

* [NOD-236] Remove unnecessary empty line
2019-07-14 11:26:32 +03:00
Ori Newman
4c6b8969d3 [NOD-245] Increase MaxInvPerMsg and MaxBlocksPerMsg to 65536 (#343)
* [NOD-245] Increase MaxInvPerMsg and MaxBlocksPerMsg to 65536

* [NOD-245] Fix MaxInvPerMsg to 1 << 16
2019-07-14 10:47:41 +03:00
Svarog
8ccc63752c [NOD-246] Increate RequestTimeout in mining simulator (#344) 2019-07-14 10:36:55 +03:00
Ori Newman
1088b69616 [NOD-239] Use custom priority mutex for utxo diff store (#340)
* [NOD-239] Use custom priority mutex for utxo diff store

* [NOD-239] Add shared slice to TestMutex

* [NOD-239] Add TestHighPriorityReadLock

* [NOD-239] Change comments

* [NOD-239] Rename LowPriorityLock -> LowPriorityWriteLock

* [NOD-239] Rename lock functions to write lock

* [NOD-239] Make TestHighPriorityReadLock use channels
2019-07-14 10:17:26 +03:00
Ori Newman
541119dda2 [NOD-238] Check if the incoming block is the newset orphan (#339) 2019-07-08 10:31:00 +03:00
stasatdaglabs
7400eabc6d [NOD-233] Fixed iteration order when iterating over the blockIndex bucket. (#338) 2019-07-04 18:29:56 +03:00
stasatdaglabs
c3c429494f [NOD-228] Added JSONRPCifyer to the project and created a Dockerfile for it. (#337) 2019-07-04 11:16:05 +03:00
stasatdaglabs
6d20202354 [NOD-222] Use accepting block blue score instead of containing block blue score for sequence lock and block maturity (#333)
* [NOD-222] Added constant: UnacceptedBlueScore.

* [NOD-222] Made it so that block transactions always have UnacceptedBlueScore.

* [NOD-222] Implemented updating unaccepted UTXO entries with accepted ones in the virtual.

* [NOD-222] Fixed an unclear comment.

* [NOD-222] Fixed diffFromAcceptanceData not receiving the right blue score.

* [NOD-222] Fixed various issues with the implementation. It appears to work now.

* [NOD-222] Removed debug logs.

* [NOD-222] Fixed tests that relied on utxoCollection.String().

* [NOD-222] Fixed TestChainedTransactions.

* [NOD-222] Fixed tests that relied on GetVirtualFromParentsForTest.

* [NOD-222] Fixed having identical entries in toAdd and toRemove.

* [NOD-222] Fixed logic in diffFrom that I previously broke.

* [NOD-222] Fixed a wrong check.

* [NOD-222] Figured out the magical invocation to make everything work.

* [NOD-222] Fixed blockDB tests.

* [NOD-222] Removed debug method.

* [NOD-222] Fixed comments related to setting coinbase maturity to 0.

* [NOD-222] Fixed a typo in a comment.

* [NOD-222] Added a comment that explains the new addition in GetVirtualFromParentsForTest.

* [NOD-222] Added a comment to DiffUTXOSet.Get().

* [NOD-222] Fixed a nuance in DiffUTXOSet.containsInputs.

* [NOD-222] Replaced nonsense in GetVirtualFromParentsForTest with diffFromAcceptanceData.

* [NOD-222] Renamed newVirtualUTXO -> newVirtualPastUTXO.

* [NOD-222] Fixed a comment.

* [NOD-222] Extracted checking utxoCollection with blueScore to a method.

* [NOD-222] Added tests where the same entry is in both toAdd and toRemove.

* [NOD-222] Used Add/RemoveEntry inside diffFromAcceptedTx.

* [NOD-222] Removed superfluous test for UnacceptedBlueScore.

* [NOD-222] Added/Updated comments.

* [NOD-222] Added tests to TestUTXODiffRules.

* [NOD-222] Added appropriate protection against impossible "from"s in diffFrom.

* [NOD-222] Added a comment explaining why we diffFrom acceptanceData in verifyAndBuildUTXO.

* [NOD-222] Fixed comments and equal() in utxoset.
2019-07-02 16:28:54 +03:00
Ori Newman
d6297a3192 [NOD-225] Finalize nodes below finality point (#335)
* [NOD-225] Finalize nodes below finality point

* [NOD-225] finalizeNodesBelowFinalityPoint only if dag.lastFinalityPoint is changed

* [NOD-225] change comment in validateParents

* [NOD-225] add string to ErrInvalidParentsRelation error

* [NOD-225] Change comment in validateParents

* [NOD-225] Change comment in validateParents

* [NOD-225] change comment in validateParents

* [NOD-225] Delete diff data from db directly from finalizeNodesBelowFinalityPoint

* [NOD-225] Refactor updateFinalityPoint
2019-07-02 16:10:33 +03:00
Ori Newman
e2f8d4e0aa [NOD-232] Remove diff and diffChild from blockNode (#336) 2019-07-02 11:01:41 +03:00
stasatdaglabs
589763e8ec [NOD-226] Fix comments around BlockLocator (#334)
* [NOD-226] Corrected blockLocator-related comments.

* [NOD-226] Fixed "current tips" -> "selected tip".
2019-06-30 12:34:53 +03:00
Ori Newman
c14c64d534 [NOD-224] Make P2PK and raw Multisig non-standard (#332) 2019-06-27 12:44:22 +03:00
Ori Newman
f7f44995d6 [NOD-215] implement difficulty adjustment algorithm (#331)
* [NOD-215] Implement difficulty adjustment algorithm

* [NOD-215] Handle blocks with genesis parent, and fix adjustment factor calculation

* [NOD-215] Fix tests

* [NOD-215] fix calcNextRequiredDifficulty

* [NOD-215] Add TestDifficulty

* [NOD-215] Fix delay to be positive, and add tests for delayed blocks

* [NOD-215] Split calcBlockWindowMinMaxAndMedianTimestamps to two functions

* [NOD-215] Make explicit loop for padding blue block window with genesis

* [NOD-215] Name return values

* [NOD-215] Fix delay != 0 error messages

* [NOD-215] Fix comments

* [NOD-215] Fix blueBlockWindow

* [NOD-215] Add TestBlueBlockWindow

* [NOD-215] Rename PowLimit -> PowMax

* [NOD-215] Fix delay != 0 error messages

* [NOD-215] Move PowMaxBits to BlockDAG

* [NOD-215] Make blockWindow type

* [NOD-215] Make blueBlockWindow always pad with genesis

* [NOD-215] Remove redundant line in checkWindowIDs

* [NOD-215] Make medianTimestamp return error for empty window
2019-06-26 15:47:39 +03:00
Ori Newman
263737b3fb [NOD-196] move coinbase scriptpukey to payload (#330)
* [NOD-196] move coinbase scriptpukey to payload (no tests) (#311)

* [NOD-196] Move coinbase scriptPubKey to payload

* [NOD-196] Rename SubnetworkID.IsFull to SubnetworkID.IsBuiltIn

* [NOD-196] Fix comments

* [NOD-196] Add block subsidy to fee transaction

* [NOD-196] Fix comments

* [NOD-217] Merge coinbase and fee transaction (#328)

* [NOD-196] Fix tests

* [NOD-196] Fix tests

* [NOD-217] Add error to getBluesFeeData

* [NOD-217] Merge Coinbase and fee transaction

* [NOD-217] Format project

* [NOD-217] Remove OpTrue default for mining.NewBlockTemplate

* [NOD-196] Format project

* [NOD-217] Add missing space before comment

* [NOD-196] Change MaxCoinbasePayloadLen to 150
2019-06-17 17:43:13 +03:00
Svarog
0c5f3d72bd [NOD-223] Removed Nulldata as standard tx type (#329)
* [NOD-223] Removed Nulldata as standard tx type

* [NOD-223] Removed redundant space
2019-06-16 16:31:38 +03:00
stasatdaglabs
ffd886498a [NOD-208] Make block reward maturity use the same mechanism as confirmations (#327)
* [NOD-208] Added blockBlueScore to UTXOEntry.

* [NOD-208] Added blueBlockScore to NewUTXOEntry.

* [NOD-208] Fixed compilation errors in policy, utxoset, and dag tests.

* [NOD-208] Changed validateBlockRewardMaturity and CheckTransactionInputsAndCalulateFee to use blueScore.

* [NOD-208] Changed CalcBlockSubsidy to use blueScore.

* [NOD-208] Changed SequenceLockActive to use blueScore.

* [NOD-208] Removed ExtractCoinbaseHeight.

* [NOD-208] Removed reference to block height in ensureNoDuplicateTx.

* [NOD-208] Changed IsFinalizedTransaction to use blueScore.

* [NOD-208] Fixed merge errors.

* [NOD-208] Made UTXOEntry serialization use blueScore.

* [NOD-208] Changed CalcPriority and calcInputValueAge to use blueScore.

* [NOD-208] Changed calcSequenceLock to use blueScore.

* [NOD-208] Removed blockChainHeight from UTXOEntry.

* [NOD-208] Fixed compilation errors in feeEstimator. Fixed a bug in the test pool hardness.

* [NOD-208] Fixed oldestChainBlockWithBlueScoreGreaterThan not handling an extreme case.

* [NOD-208] Fixed TestDiffFromTx.

* [NOD-208] Got rid of priority and support of free transactions.

* [NOD-208] Fixed TestProcessTransaction.

* [NOD-208] Fixed TestTxFeePrioHeap.

* [NOD-208] Fixed TestAddrIndex and TestFeeEstimatorCfg.

* [NOD-208] Removed unused rateLimit parameter from ProcessTransaction.

* [NOD-208] Fixed tests that rely on CreateTxChain.

* [NOD-208] Fixed tests that rely on CreateSignedTxForSubnetwork.

* [NOD-208] Fixed TestFetchTransaction.

* [NOD-208] Fixed TestHandleNewBlock. Fixed HandleNewBlock erroneously processing fee transactions.

* [NOD-208] Fixed TestTxIndexConnectBlock.

* [NOD-208] Removed the use of Height() from the fee estimator.

* [NOD-208] Removed unused methods from rpcwebsocket.go.

* [NOD-208] Removed Height from util.Block.

* [NOD-208] Removed ErrForkTooOld. It doesn't make sense in a DAG.

* [NOD-208] Made blockHeap use blueScore instead of height.

* [NOD-208] Removed fee estimator.

* [NOD-208] Removed DAG.Height.

* [NOD-208] Made TestAncestorErrors test chainHeight instead of height.

* [NOD-208] Fixed a couple of comments that were still speaking about block height.

* [NOD-208] Replaced all uses of HighestTipHash with SelectedTipHash.

* [NOD-208] Remove blockNode highest and some remaining erroneous uses of height.

* [NOD-208] Fixed a couple of comments. Fixed outPoint -> outpoint merge error.

* [NOD-208] Fixed a couple more comments.

* [NOD-208] Used calcMinRequiredTxRelayFee instead of DefaultMinRelayTxFee for mempool tests.

* [NOD-208] Renamed mempool Config BestHeight to DAGChainHeight.

* [NOD-208] Fixed a bug in oldestChainBlockWithBlueScoreGreaterThan. Made calcSequenceLock use the node's selected parent chain rather than the virtual block's.

* [NOD-208] Removed chainHeight from blockNode String().
Renamed checkpointsByHeight to checkpointsByChainHeight and prevCheckpointHeight to prevCheckpointChainHeight.
Removed reference to chainHeight in blockIndexKey.
Fixed comments in dagio.go.

* [NOD-208] Removed indexers/blocklogger.go, as no one was using it.

* [NOD-208] Made blocklogger.go log blueScore instead of height.

* [NOD-208] Fixed typo.

* [NOD-208] Fixed comments, did minor renaming.

* [NOD-208] Made a "common sense" wrapper around sort.Search.

* [NOD-208] Fixed comment in SearchSlice.
2019-06-16 14:12:02 +03:00
Svarog
76f5619de7 [NOD-211] Add concurrent-safe version of BlockConfirmationsByHash (#326) 2019-06-10 13:30:16 +03:00
Svarog
35703e7956 [NOD-218] Fix the order of txgen log message arguments (#325) 2019-06-06 16:10:21 +03:00
Ori Newman
29231d8d14 [NOD-213] add customization to txgen (#324)
* [NOD-213] Add customization to txgen

* [NOD-213] Add fee rate as an argument

* [NOD-213] Don't delay transaction emission if there's no need

* [NOD-213] enqueueTransactions -> queueTransactions

* [NOD-213] reuse delay variable

* [NOD-213] Add ExtractGasLimit function

* [NOD-213] Use time.Ticket in sendTransactionLoop
2019-06-06 14:01:28 +03:00
Svarog
396842ae40 [NOD-207] Rename any place that says 'OutPoint' to 'Outpoint' (#323)
* [NOD-207] Rename any place that says 'OutPoint' to 'Outpoint'

* [NOD-207] Fix any place that says output point
2019-06-05 16:23:57 +03:00
Svarog
072c753323 [NOD-216] Revert implicit fee transaction (#322)
* Revert "[NOD-214] Remove Fee transaction from addrindex (#321)"

This reverts commit e4b2d869d4.

* Revert "[NOD-195] Make fee tx implicit (#315)"

This reverts commit ccca580a4b.
2019-06-05 12:54:51 +03:00
Ori Newman
6250342b86 [NOD-205] Reimplement txgen (#320)
* [NOD-205] Reimplement txgen

* [NOD-205] remove prev outpoints of all initial transactions

* [NOD-205] break txloop to smaller functions

* [NOD-205] Limit collectTransactions iterations

* [NOD-205] Use requiredConfirmations constant instead of inline number

* [NOD-205] Rename wTx -> walletTx

* [NOD-205] Remove handleNewBlock

* [NOD-205] Fix search and replace error
2019-06-04 18:06:35 +03:00
Ori Newman
e4b2d869d4 [NOD-214] Remove Fee transaction from addrindex (#321) 2019-06-04 16:12:00 +03:00
Svarog
ccca580a4b [NOD-195] Make fee tx implicit (#315)
* [NOD-195] Made fee tx implicit

* [NOD-195] Removed redundant checks for fee transactions

* [NOD-195] Add fee tx data into acceptence data and fee data

* [NOD-195] Fix some tests

* [NOD-195] Update Block100000 with new data

* [NOD-195] Fixed remaining tests

* [NOD-195] Save and load feeTx to/from database

* [NOD-195] Remove DisconnectBlock methods from indexers, since they are not used anywhere

* [NOD-195] Add fee tx to addrindex

* [NOD-195] Don't populate inputs for fee transactions

* [NOD-195] Delete feeTxBucket in removeDAGState

* [NOD-195] Got rid of util.FeeTRansactionIndex
2019-06-03 17:30:57 +03:00
stasatdaglabs
84970a8378 [NOD-201] Create AddSubnetwork cli tool (#319)
* [NOD-201] Implemented the AddSubnetwork CLI tool.

* [NOD-201] Fixed various bugs in AddSubnetwork.

* [NOD-201] Fixed mempool maybeAcceptTransaction verifying gasLimit for a subnetwork registry transaction.

* [NOD-201] Fixed serialization/deserialization bugs in addrIndex.

* [NOD-201] Fixed BlockConfirmationsByHash not handling the zeroHash.

* [NOD-201] Used btclog instead of go log.

* [NOD-201] Made gasLimit a command-line flag. Made waitForSubnetworkToBecomeAccepted only return an error.

* [NOD-201] Filtered out mempool transactions.

* [NOD-201] Fixed embarrassing typos.

* [NOD-201] Added subnetwork registry tx fee + appropriate cli flag.

* [NOD-201] Skipped TXOs that can't pay for registration.
2019-06-03 15:44:43 +03:00
Ori Newman
901bde1fd4 [NOD-202] undo createDAGState if blockdag new fails (#318)
* [NOD-204] Add UTXOCommitment to GetBlockTemplateResult

* [NOD-204] Add UTXOCommitment to GetBlockTemplateResult

* [NOD-206] Avoid leaking blocks from previous miner when switching miners

* [NOD-202] Undo createDAGState if blockdag.New fails

* [NOD-202] Fix gofmt errors
2019-05-30 18:14:27 +03:00
Ori Newman
33a4183bfa [NOD-206] Avoid leaking blocks from previous miner when switching miners (#317)
* [NOD-204] Add UTXOCommitment to GetBlockTemplateResult

* [NOD-204] Add UTXOCommitment to GetBlockTemplateResult

* [NOD-206] Avoid leaking blocks from previous miner when switching miners
2019-05-30 17:25:53 +03:00
Ori Newman
0bc6e5bc92 [NOD-204] Add utxo commitment to get block template result (#316)
* [NOD-204] Add UTXOCommitment to GetBlockTemplateResult

* [NOD-204] Add UTXOCommitment to GetBlockTemplateResult
2019-05-30 16:59:26 +03:00
stasatdaglabs
8323e468da [NOD-200] Add GetSubNetwork command to JSON-RPC (#314)
* [NOD-200] Implemented the GetSubnetwork JSON-RPC command.

* [NOD-200] Fixed a copy+paste error in a comment.
2019-05-29 17:59:18 +03:00
Ori Newman
7912fe4c35 [NOD-203] Add UTXO commitment to devnet genesis (#313) 2019-05-29 15:06:10 +03:00
stasatdaglabs
266e471941 [NOD-190] Implement Confirmations counting algorithm (#312)
* [NOD-192] Add method to compute confirmations of a single transaction (#306)

* [NOD-192] Implemented txConfirmations.

* [NOD-192] Renamed acceptedBy -> acceptingBlock and ConfirmationsByHash -> BlockConfirmationsByHash.

* [NOD-194 + NOD-199] Update all JSON-RPC methods to use new methods for computing confirmations + Remove the x1.5 factor when counting confirmations in txgen (#309)

* [NOD-194] Connected JSON-RPC commands with new confirmations logic.

* [NOD-194] Fixed failing tests.

* [NOD-194] Removed x1.5 from isTxMatured.

* [NOD-194] Made isTxMatured panic if it receives nil confirmations.

* [NOD-194] Added isInMempool to RPC methods that require it.

* [NOD-194] Fixed a typo.

* [NOD-194] Made the declaration of isInMempool more clear.

* [NOD-194] Removed some unnecessary complexity from isTxMatured.

* [NOD-193] Update Tx-Index to accomodate correct Confirmations structure (#308)

* [NOD-193] Uploaded BlockID to be uint64 in txIndex and addrIndex.

* [NOD-193] Removed the inclusion of current block transactions to txsAcceptanceData.

* [NOD-193] Implemented writing to the tx index txs with the virtual as the accepting block.

* [NOD-193] Added test for txs accepted by the virtual block.

* [NOD-193] Removed the requirement for subnetwork registry transactions to be accepted.

* [NOD-194] Made in-memory the txsAcceptedByVirtual part of txIndex.

* [NOD-193] Optimized txsAcceptedByVirtual initialization.

* [NOD-193] Fixed weird loop in txsAcceptedByVirtual initialization.

* [NOD-190] Fixed merge errors.
2019-05-29 13:09:16 +03:00
stasatdaglabs
4e6edd4ffd [NOD-189] Optimize UTXOCollection operations (#307)
* [NOD-189] Made UTXODiff WithDiff and DiffFrom allocate collections with appropriate sizes.
In mempool HandleNewBlock, Replaced removeTransaction loop with removeTransactions.

* [NOD-189] Removed code duplication between removeTransaction and removeTransactions.

* [NOD-189] Fixed a merge error.

* [NOD-189] Fixed another merge error.

* [NOD-189] Renamed removeRedeemers to removeDependants.

* [NOD-189] Removed superfluous check inside removeTransactionWithDiff.

* [NOD-189] Added a comment to removeTransactions detailing what it optimizes.

* [NOD-189] Added documentation to removeTransactionWithDiff and split it into smaller methods.
2019-05-29 11:46:55 +03:00
Ori Newman
7069d173c6 [NOD-180] Add validation of utxo commitments (#310)
* [NOD-172] Port EMCH from bchd

* [NOD-172] Fix hdkeychain.TestErrors and add btcec.TestRecoverCompact

* [NOD-172] Make ECMH immutable

* [NOD-172] Fix gofmt errors

* [NOD-172] Add TestMultiset_NewMultisetFromDataSlice and fix Point to be immutable

* [NOD-172] Fix gofmt errors

* [NOD-172] Add test for checking that the Union of a multiset and its inverse is zero

* [NOD-179] Add ECMH Point to all UTXO-structs

* [NOD-179] Fix utxo set tests

* [NOD-179] Fix mempool tests

* [NOD-179] Remove RemoveTxOuts

* [NOD-179] Move serializeBlockUTXODiffData to the top of the file

* [NOD-179] Fix serializeBlockUTXODiffData comment format

* [NOD-179] Fix AddTx comment and name return values

* [NOD-180] Validate utxo commitments

* [NOD-179] Fix TestAcceptingBlock and TestConfirmations to not use the block hash as phantom break even

* [NOD-180] Fix typo

* [NOD-180] move most of the logic in calcUTXOCommitment to UTXOSet.WithTransactions

* [NOD-180] Optionally return error when a transaction in WithTransactions is double spent

* [NOD-180] Rename allowDoubleSpends to ignoreDoubleSpends
2019-05-28 11:33:11 +03:00
Ori Newman
aa51b5f071 [NOD-179] Added ECMH-Multiset to all UTXO structs (#304)
* [NOD-172] Port EMCH from bchd

* [NOD-172] Fix hdkeychain.TestErrors and add btcec.TestRecoverCompact

* [NOD-172] Make ECMH immutable

* [NOD-172] Fix gofmt errors

* [NOD-172] Add TestMultiset_NewMultisetFromDataSlice and fix Point to be immutable

* [NOD-172] Fix gofmt errors

* [NOD-172] Add test for checking that the Union of a multiset and its inverse is zero

* [NOD-179] Add ECMH Point to all UTXO-structs

* [NOD-179] Fix utxo set tests

* [NOD-179] Fix mempool tests

* [NOD-179] Remove RemoveTxOuts

* [NOD-179] Move serializeBlockUTXODiffData to the top of the file

* [NOD-179] Fix serializeBlockUTXODiffData comment format

* [NOD-179] Fix AddTx comment and name return values
2019-05-23 15:11:42 +03:00
stasatdaglabs
da7c9c7dfb [NOD-191] Added .acceptingBlock and .confirmations methods to blockNode (#305)
* [NOD-191] Added selectedPathChainSlice to virtualBlock.

* [NOD-191] Implemented acceptingBlock().

* [NOD-191] Implemented confirmations().

* [NOD-191] Added selectedPathChainSlice tests to TestSelectedPath.

* [NOD-191] Fixed a bug in acceptingBlock(). Written tests for confirmations().

* [NOD-191] Written tests for acceptingBlock().

* [NOD-191] Added test to make sure that acceptingBlock(tip) returns the virtual block.

* [NOD-191] Added a panic if we somehow feed a childless block that isn't the virtual to acceptingBlock.

* [NOD-191] Fixed comments.

* [NOD-191] Fixed a bug in acceptingBlock. Added red block tests for acceptingBlock.

* [NOD-191] Added red block tests for confirmations.

* [NOD-191] Fixed misleading comment and error message.
2019-05-23 10:57:03 +03:00
Svarog
ec10346e79 [NOD-184] Use lock-less NextAcceptedIDMerkleRoot in NewBlockTemplate (#302) 2019-05-16 17:33:04 +03:00
stasatdaglabs
2481871c10 [NOD-175] When resolving orphans - don't send inv (#300)
* [NOD-175] Added BlockAddedNotificationData and sent it instead of just a block on BlockAdded.

* [NOD-175] Added BFWasUnorphaned and raised it when an unorphaned block was to be accepted.

* [NOD-175] Fixed a typo.

* [NOD-175] Made it so that only the mempool gets updated if we're not current or the block was just now unorphaned.
2019-05-16 13:05:30 +03:00
Svarog
ac1fd11a42 [NOD-182] Added AcceptedIDMerkleRoot to GetBlockTemplateResult (#301) 2019-05-16 12:44:25 +03:00
stasatdaglabs
b1d3ca0206 [NOD-177] Remove idMerkleRoot (#299)
* [NOD-177] Removed references to idMerkleRoot.

* [NOD-177] Generated new genesis hashes.

* [NOD-177] Generated new blk_ blocks.

* [NOD-177] Fixed TestHaveBlock.

* [NOD-177] Fixed The rest of the tests.

* [NOD-177] Fixed a couple of comments and a duplicate test.

* [NOD-177] Fixed blocks1-256.bz2.
2019-05-15 16:16:57 +03:00
Ori Newman
5c5491e1e4 [NOD-172] Port ECMH from bchd and fix Remove to preserve commutativity (#292)
* [NOD-172] Port EMCH from bchd

* [NOD-172] Fix hdkeychain.TestErrors and add btcec.TestRecoverCompact

* [NOD-172] Make ECMH immutable

* [NOD-172] Fix gofmt errors

* [NOD-172] Add TestMultiset_NewMultisetFromDataSlice and fix Point to be immutable

* [NOD-172] Fix gofmt errors

* [NOD-172] Add test for checking that the Union of a multiset and its inverse is zero
2019-05-15 16:07:37 +03:00
Evgeny Khirin
8dedca693e [NOD-164 + NOD-167] AcceptedIDMerkleRoot validation and newBlockTemplate (#295)
* [NOD-164] Added validation routine

* [NOD-167] Extracted acceptedIDMerkleRoot calculation to its own method and implemented NextAcceptedIDMerkleRoot.

* [NOD-164] Fixed TestValidateFeeTransaction.

* [NOD-164] Fixed TestFinality.

* [NOD-164] Fixed blk_ tests.

* [NOD-164] Fixed if -> iff in a comment.

* [NOD-164] Minor style changes in comments.

* [NOD-164] Moved validateAcceptedIDMerkleRoot to before its population with the block's own transactions.
Replaced heavy call to verifyAndBuildUTXO in NextBlockFeeTransaction and NextAcceptedIDMerkleRoot with a call to pastUTXO on the virtual.

* [NOD-164] Fixed erroneous comment.

* [NOD-164] Inserted the logic from buildAndSortAcceptedTxs into calculateAcceptedIDMerkleRoot, since the former was meaningless on its own.

* [NOD-164] Changed looping over txsAcceptanceData instead of over node.blues.
2019-05-14 15:31:23 +03:00
Svarog
ca0619bbcf [NOD-176] Moved daghash from dagconfig to util (#298)
* [NOD-176] Moved daghash from dagconfig to util

* [NOD-176] Fixed order of includes with gofmt
2019-05-14 14:05:36 +03:00
Ori Newman
d7a2ab52a1 [NOD-173] Add UTXO commitment to block header (#297) 2019-05-13 16:23:28 +03:00
Ori Newman
3b72aafbc6 [NOD-174] Measure difficulty window with chain height instead of height (#296) 2019-05-12 18:14:35 +03:00
stasatdaglabs
dfd12cdaac [NOD-165] In getBlockVerboseResult and getBlockHeaderVerboseResult, renamed merkleRoot to HashMerkleRoot and added idMerkleRoot and acceptedIdMerkleRoot. (#294) 2019-05-12 15:05:33 +03:00
Evgeny Khirin
08d94c7a47 [NOD-163] Added BlockHeader.AcceptedIDMerkleRoot (#293) 2019-05-12 11:53:47 +03:00
Ori Newman
b7b41f1a94 [NOD-159] Wrap all goroutines to handle panics (#290)
* [NOD-159] Wrap all goroutines to handle panics

* [NOD-159] Fix gofmt errors

* [NOD-159] Add comment to HandlePanic

* [NOD-159] Merge panics and gowrapper packages

* [NOD-159] Added missing initialization
2019-05-07 16:13:06 +03:00
stasatdaglabs
42109ec4d5 [NOD-162] Fixed calculateNodeHeight not handling the genesis block. (#291) 2019-05-07 14:36:27 +03:00
Ori Newman
39ccc4b225 [NOD-166] Remove dag.RLock() from addTransaction (#289) 2019-05-06 17:42:39 +03:00
Ori Newman
8acc738b27 [NOD-146] Remove unnecessary dag notifications (#288) 2019-05-06 11:19:19 +03:00
stasatdaglabs
945b3f8fbf [NOD-13] Notify failing builds in Telegram (#287)
* [NOD-13] Added notify_telegram to deploy.sh.

* [NOD-13] Made a test fail for testing.

* [NOD-13] Added some temporary logging.

* [NOD-13] Wrote a nice message for the bot to send.

* [NOD-13] Made the message nicer.

* [NOD-13] Made the message nicer still.

* [NOD-13] Added the build log as an attachment.

* [NOD-13] Actually added the build log as an attachment.

* [NOD-13] Added a delay to allow the build log to properly flush.

* [NOD-13] Disowned notify_telegram.

* [NOD-13] Disowning doesn't work. Using the "at" command instead.

* [NOD-13] Properly using the at command.

* [NOD-13] Actually properly using the at command.

* [NOD-13] Added a couple of prints to see whether the script is even being called.

* [NOD-13] More printouts...

* [NOD-13] Added a command to start atd if it stopped for some reason.

* [NOD-13] Added slashes in multiline echo command.

* [NOD-13] Added quotes where required and removed debug comments.

* [NOD-13] Revert "[NOD-13] Made a test fail for testing."

This reverts commit 9701e30e

* [NOD-13] Added some comments.
2019-05-05 17:12:55 +03:00
Evgeny Khirin
a73f218402 [NOD-150] Removed blockNode.workSum (#286)
* [NOD-150] Removed blockNode.workSum

* [NOD-150] Fixed comment
2019-05-05 16:02:07 +03:00
Ori Newman
eded4c2285 [NOD-161] Add flushDbCache rpc command (#285)
* [NOD-161] Add flushDbCache rpc command

* [NOD-161] Fix tests
2019-05-02 17:08:00 +03:00
Svarog
33036278ac [NOD-144] Use chainHeight in SelectedAncestor, and update all logic that uses it (#281)
* [NOD-144] Use chainHeight in SelectedAncestor, and update all logic that uses it

* [NOD-144] Moved UnminedHeight to blockdag, and updated all references
2019-05-02 16:50:54 +03:00
Ori Newman
6163d3b4ec [NOD-87] Rename hash to txID when necessary (#283)
* [NOD-87] Rename hash to txID when necessary

* [NOD-87] Fix NewTxIDFromStr error messages in msgtx_test.go
2019-05-02 14:59:59 +03:00
Evgeny Khirin
22046bebc5 [NOD-93] Removed "make go vet happy" comments (#282) 2019-05-02 13:45:40 +03:00
Evgeny Khirin
c67d4507b6 [NOD-132] Fixed getaddr command summary (#278) 2019-05-02 12:06:23 +03:00
Ori Newman
ea5e18ea11 [NOD-96] Convert txid to pointer where possible (#279)
* [NOD-96] Convert txid to pointer where possible

* [NOD-96] Make msgTx.TxID return a pointer

* [NOD-96] observedTransaction.id -> observedTransaction.txID
2019-05-02 10:54:18 +03:00
stasatdaglabs
1cc479dbf8 [NOD-152] Netsync correctly syncs only 500 first blocks (#276)
* [NOD-152] Stopped pushBlockMsg from sending tip inv to syncing nodes.

* [NOD-152] Fixed restartSyncIfNeeded not restarting if sync is needed.

* [NOD-152] Removed continueHash, as it is no longer required.
2019-05-01 18:01:58 +03:00
Ori Newman
b4e7b59e7b [NOD-155] Increase timeout in test.sh (#280) 2019-05-01 17:36:19 +03:00
Evgeny Khirin
8592ae9641 [NOD-126] Removed BlockHeader.SelectedParentHash() method (#274)
* [NOD-126] Removed BlockHeader.SelectedParentHash() method

* [NOD-126] Added TODO comments
2019-05-01 14:51:16 +03:00
Ori Newman
1362fc45e0 [NOD-148] Delete block index (#275) 2019-05-01 13:10:32 +03:00
stasatdaglabs
b34894e4da [NOD-140] Convert all logging to use btclog (#273)
* [NOD-140] Converted DNSSeeder to use btclog.

* [NOD-140] Converted MiningSimulator to use btclog.

* [NOD-140] Converted TxGen to use btclog.

* [NOD-140] Fixed log level in handlePanic in txgen.

* [NOD-140] Renamed logger to log everywhere. Removed superfluous flag-setting to go-log.
2019-05-01 11:15:45 +03:00
stasatdaglabs
30f5ebd6d1 [NOD-139] Fix the unorphaning mechanism (#271)
* [NOD-139] Made processOrphans not turn an error if one of the parents is still missing.

* [NOD-139] Made addOrphanBlock and removeOrphanBlock process all parents instead of only the selected parent.

* [NOD-139] Made addOrphanBlock remove excess orphans by their timestamp rather than their discovery time. Fixed orphans being added more than once.

* [NOD-139] Simplified removal from slice in removeOrphanBlock.

* [NOD-139] Made check for no-orphans-left come before assignment to prevOrphans.

* [NOD-139] Added Timestamp() to util.Block.

* [NOD-139] Fixed merge errors.
2019-04-30 18:53:03 +03:00
Ori Newman
4292bcac72 [NOD-149] Store phantom values in the database (#270)
* [NOD-149] Store phantom values in the database

* [NOD-149] Explain when zero hash is used when serializing blockNode

* [NOD-149] make deserializeBlockNode return a blockNode

* [NOD-149] use blockNode initializer instead of lots of assignments
2019-04-30 17:42:26 +03:00
Evgeny Khirin
8683258e4a [NOD-151] Removed VerifyDAG RPC API (#272) 2019-04-30 13:22:29 +03:00
Svarog
e9ec8cd39c [NOD-142] Convert Height and ChainHeight to uint64 (#269)
* [NOD-142] Updated util.FastLog2Floor to work on uint64

* [NOD-142] Convert height and chainHeight to uint64

* [NOD-142] A couple fixes in comments of TestFastLog2Floor

* [NOD-142] Make spendableOutOffset uint64 too
2019-04-30 12:50:46 +03:00
Evgeny Khirin
068a8d117d [NOD-145] Fixed deadlock in mempool (#268) 2019-04-29 11:57:50 +03:00
Ori Newman
83a012de12 [NOD-69] Delete BlockAddedNtfnMethod (#267) 2019-04-29 11:50:56 +03:00
Evgeny Khirin
f36ae25baf [NOD-121] Fix empty output script in fundTx (#266)
* [NOD-121] Fix empty output script in fundTx

* [NOD-121] Remove debugging stuff

* [NOD-121] Refactored minRelayTxFee
2019-04-29 11:19:45 +03:00
stasatdaglabs
298cda0617 Merge pull request #265 from daglabs/nod-143-get-block-template-bug
[NOD-143] Multiple fixes for GetBlockTemplate
2019-04-28 12:32:12 +03:00
Mike Zak
b9e3fff5d1 [NOD-143] Move check for sm.syncPeer==nil to syncManager.current() 2019-04-28 12:23:42 +03:00
Mike Zak
ed76e2c962 [NOD-143] SelectedAncestor now returns the first chain block under height if there's no chain block with exact hight 2019-04-28 11:53:48 +03:00
Svarog
77fae7b522 [NOD-138] Request relayed blocks if not recent but no syncPeer available (which usually means everybody are on genesis) (#264) 2019-04-24 15:17:50 +03:00
stasatdaglabs
cd71e80eb3 [NOD-133] BTCD node connects multiple times to the same address (#263)
* [NOD-133] Added addrTrying.

* [NOD-133] Fixed infinite look inside getAddress.

* [NOD-133] Reverted log level to trace.

* [NOD-133] Fixed failing test.

* [NOD-133] Added an explanation as to why devnet is exempt from same-CIDR checking.

* [NOD-133] Changed config.DevNet with activeNetParams.AcceptUnroutable for same-CIDR checking.
2019-04-24 14:27:53 +03:00
Svarog
3f7c73f331 [NOD-135] Even if net params allow unroutable - don't allow local IPs (#262) 2019-04-24 11:52:19 +03:00
Svarog
4845a7f16c [NOD-131] Allow override of dnsseed by command line or config (#261)
* [NOD-131] Allow override of dnsseed by command line or config

* [NOD-131] Moved tor.go from connmgr to util/network, to prevent dependancy loop

* [NOD-131] Typo fix

* [NOD-131] Clarify description for --dnsseed cli flag

* [NOD-131] Removed redundant line that somehow got into go.sum
2019-04-23 14:22:46 +03:00
stasatdaglabs
77fb901706 [NOD-129] Revert NOD-114 (#260) 2019-04-22 15:26:10 +03:00
Evgeny Khirin
d3e70810af [NOD-121] Do not handle transaction inputs for reward transactions in SearchRawTransactions RPC call (#258)
* [NOD-121] Do not handle transaction inputs for reward transactions in SearchRawTransactions RPC call

* [NOD-121] Do not get transaction inputs for fee transactions in SearchRawTransactions RPC call
2019-04-22 12:57:50 +03:00
Evgeny Khirin
daa4481282 [NOD-66] Transaction generator (#247)
* [NOD-66] Created TX generator

* [NOD-66] Created transaction generator

* [NOD-66] Improved TX generator against double spend. Created genaddr utility. Refactored

* [NOD-66] Save chenges before branch switch

* [NOD-66] Use log package instead of fmt

* [NOD-66] Fixed/restored docker files

* [NOD-66] Changed according to new WithLock/NoLock convention
2019-04-21 15:05:03 +03:00
Ori Newman
a3735da12a [NOD-122] Fix timeout for get block template requests (#254)
* [NOD-122] Handle each message in rpcclient with a separate goroutine

* [NOD-122] Stop listening to new blocks when not mining

* [NOD-122] Made RPC logging in mining simulator more explicit + some styling enhencement
2019-04-21 10:11:36 +03:00
stasatdaglabs
311c96122e [NOD-119] Add a call to "go mod download" before copying all files in Dockerfiles. (#255) 2019-04-18 17:20:58 +03:00
Ori Newman
b612426ead [NOD-67] Unexport blockheap (#257) 2019-04-18 17:19:10 +03:00
Ori Newman
e99af346bf [NOD-120] Invert WithLock standard (#256) 2019-04-18 17:06:34 +03:00
Ori Newman
e22bc9af8f [NOD-115] add timeout to rpcclient requests (#252)
* [NOD-115] Add timeout to rpcclient requests

* [NOD-115] Add timeout of half a second to mining simulator requests

* [NOD-115] Remove redundant allocation of responseChan
2019-04-17 17:51:50 +03:00
stasatdaglabs
89ca293dc1 [NOD-113] Added graceful shutdown to mining simulator. (#253) 2019-04-17 17:00:23 +03:00
stasatdaglabs
194ceace6f [NOD-71] Add support for Go modules (#251)
* [NOD-71] Replaced Gopkg.lock and Gopkg.toml with go.mod and go.sum.

* [NOD-71] Updated Dockerfiles to use go-modules instead of dep.
2019-04-17 13:45:29 +03:00
Ori Newman
a79c6cecdb [NOD-90] Update mining simulator to pull latest block template (#248)
* [NOD-90] Update mining simulator to pull latest block template

* [NOD-90] Refactor to reduce global state

* [NOD-90] Split onMinerSwitch func

* [NOD-90] Replace chooseClient with getRandomClient

* [NOD-90] Stop ranging over foundBlock to avoid code repetition
2019-04-17 12:19:14 +03:00
stasatdaglabs
c5827febf7 [NOD-114] On start, DNSSeeder should update its addresses more frequently (#250)
* [NOD-114] Added a minimum address amount GetAddrs.

* [NOD-114] Added smallNetwork intervals for when the network is small.

* [NOD-114] Fixed bad minimum address calculation.
2019-04-15 15:57:43 +03:00
Ori Newman
7353a49469 [NOD-109] Reverse the order of .PastUTXO() and .RestoreUTXO() parameters (#249) 2019-04-15 10:37:06 +03:00
Ori Newman
1a2166cddf [NOD-101] Create a bucket for utxo diffs (#245)
* [NOD-101] Create a bucket for utxo diffs

* [NOD-101] Add error when diff data is not found

* [NOD-101] Fix serialization comment
2019-04-11 16:09:44 +03:00
Ori Newman
9276494820 [NOD-85] Fix dag.NewFinalityPoint (#246)
* [NOD-85] Fix dag.NewFinalityPoint

* [NOD-85] change newFinalityPoint to updateFinalityPoint

* [NOD-85] Fix comment in updateFinalityPoint
2019-04-11 16:05:55 +03:00
540 changed files with 37521 additions and 30053 deletions

1
.gitignore vendored
View File

@@ -38,6 +38,7 @@ _testmain.go
.vscode
debug
debug.test
__debug_bin
# CI
version.txt

View File

@@ -513,7 +513,7 @@ Changes in 0.8.0-beta (Sun May 25 2014)
- Reduce max bytes allowed for a standard nulldata transaction to 40 for
compatibility with the reference client
- Introduce a new btcnet package which houses all of the network params
for each network (mainnet, testnet3, regtest) to ultimately enable
for each network (mainnet, testnet, regtest) to ultimately enable
easier addition and tweaking of networks without needing to change
several packages
- Fix several script discrepancies found by reference client test data
@@ -530,7 +530,7 @@ Changes in 0.8.0-beta (Sun May 25 2014)
- Provide options to control block template creation settings
- Support the getwork RPC
- Allow address identifiers to apply to more than one network since both
testnet3 and the regression test network unfortunately use the same
testnet and the regression test network unfortunately use the same
identifier
- RPC changes:
- Set the content type for HTTP POST RPC connections to application/json

105
Gopkg.lock generated
View File

@@ -1,105 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "bou.ke/monkey"
packages = ["."]
revision = "bdf6dea004c6fd1cdf4b25da8ad45a606c09409a"
version = "v1.0.1"
[[projects]]
name = "github.com/aead/siphash"
packages = ["."]
revision = "83563a290f60225eb120d724600b9690c3fb536f"
version = "v1.0.1"
[[projects]]
branch = "master"
name = "github.com/btcsuite/btclog"
packages = ["."]
revision = "84c8d2346e9fc8c7b947e243b9c24e6df9fd206a"
[[projects]]
branch = "master"
name = "github.com/btcsuite/go-socks"
packages = ["socks"]
revision = "4720035b7bfd2a9bb130b1c184f8bbe41b6f0d0f"
[[projects]]
name = "github.com/btcsuite/goleveldb"
packages = ["leveldb","leveldb/cache","leveldb/comparer","leveldb/errors","leveldb/filter","leveldb/iterator","leveldb/journal","leveldb/memdb","leveldb/opt","leveldb/storage","leveldb/table","leveldb/util"]
revision = "3fd0373267b6461dbefe91cef614278064d05465"
version = "v1.0.0"
[[projects]]
name = "github.com/btcsuite/snappy-go"
packages = ["."]
revision = "b3db38edf0a9a11a115eb6b022d8c946024a9ac0"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/btcsuite/websocket"
packages = ["."]
revision = "31079b6807923eb23992c421b114992b95131b55"
[[projects]]
name = "github.com/btcsuite/winsvc"
packages = ["eventlog","mgr","registry","svc","winapi"]
revision = "f8fb11f83f7e860e3769a08e6811d1b399a43722"
version = "v1.0.0"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
name = "github.com/jessevdk/go-flags"
packages = ["."]
revision = "c6ca198ec95c841fdb89fc0de7496fed11ab854e"
version = "v1.4.0"
[[projects]]
name = "github.com/jrick/logrotate"
packages = ["rotator"]
revision = "a93b200c26cbae3bb09dd0dc2c7c7fe1468a034a"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/kkdai/bstream"
packages = ["."]
revision = "b3251f7901ec4dd4ec66b3210e8f4bd5c0f1c5a3"
[[projects]]
name = "github.com/miekg/dns"
packages = ["."]
revision = "cc8cd02140663157ce797c6650488d6c8563f31f"
version = "v1.1.6"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["ed25519","ed25519/internal/edwards25519","ripemd160"]
revision = "c2843e01d9a2bc60bb26ad24e09734fdc2d9ec58"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6"]
revision = "d8887717615a059821345a5c23649351b52a1c0b"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix"]
revision = "fead79001313d15903fb4605b4a1b781532cd93e"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "00392a00928f96fc94e2c8c65ce3a98cc6f5e2f93dda64d3c4502f2f38026e96"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -1,78 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "bou.ke/monkey"
version = "1.0.1"
[[constraint]]
name = "github.com/aead/siphash"
version = "1.0.1"
[[constraint]]
branch = "master"
name = "github.com/btcsuite/btclog"
[[constraint]]
branch = "master"
name = "github.com/btcsuite/go-socks"
[[constraint]]
name = "github.com/btcsuite/goleveldb"
version = "1.0.0"
[[constraint]]
branch = "master"
name = "github.com/btcsuite/websocket"
[[constraint]]
name = "github.com/btcsuite/winsvc"
version = "1.0.0"
[[constraint]]
name = "github.com/davecgh/go-spew"
version = "1.1.1"
[[constraint]]
name = "github.com/jessevdk/go-flags"
version = "1.4.0"
[[constraint]]
name = "github.com/jrick/logrotate"
version = "1.0.0"
[[constraint]]
branch = "master"
name = "github.com/kkdai/bstream"
[[constraint]]
name = "github.com/miekg/dns"
version = "1.1.6"
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
[prune]
go-tests = true
unused-packages = true

View File

@@ -10,7 +10,7 @@ import (
"encoding/base32"
"encoding/binary"
"encoding/json"
"fmt"
"github.com/pkg/errors"
"io"
"math/rand"
"net"
@@ -24,7 +24,7 @@ import (
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
@@ -44,6 +44,7 @@ type AddrManager struct {
addrNewFullNodes newBucket
addrTried map[subnetworkid.SubnetworkID]*triedBucket
addrTriedFullNodes triedBucket
addrTrying map[*KnownAddress]bool
started int32
shutdown int32
wg sync.WaitGroup
@@ -160,6 +161,10 @@ const (
// will consider evicting an address.
minBadDays = 7
// getAddrMin is the least addresses that we will send in response
// to a getAddr. If we have less than this amount, we send everything.
getAddrMin = 50
// getAddrMax is the most addresses that we will send in response
// to a getAddr (in practise the most addresses we will return from a
// call to AddressCache()).
@@ -560,7 +565,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
}
r, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("%s error opening file: %s", filePath, err)
return errors.Errorf("%s error opening file: %s", filePath, err)
}
defer r.Close()
@@ -568,11 +573,11 @@ func (a *AddrManager) deserializePeers(filePath string) error {
dec := json.NewDecoder(r)
err = dec.Decode(&sam)
if err != nil {
return fmt.Errorf("error reading %s: %s", filePath, err)
return errors.Errorf("error reading %s: %s", filePath, err)
}
if sam.Version != serialisationVersion {
return fmt.Errorf("unknown version %d in serialized "+
return errors.Errorf("unknown version %d in serialized "+
"addrmanager", sam.Version)
}
copy(a.key[:], sam.Key[:])
@@ -581,18 +586,18 @@ func (a *AddrManager) deserializePeers(filePath string) error {
ka := new(KnownAddress)
ka.na, err = a.DeserializeNetAddress(v.Addr)
if err != nil {
return fmt.Errorf("failed to deserialize netaddress "+
return errors.Errorf("failed to deserialize netaddress "+
"%s: %s", v.Addr, err)
}
ka.srcAddr, err = a.DeserializeNetAddress(v.Src)
if err != nil {
return fmt.Errorf("failed to deserialize netaddress "+
return errors.Errorf("failed to deserialize netaddress "+
"%s: %s", v.Src, err)
}
if v.SubnetworkID != "" {
ka.subnetworkID, err = subnetworkid.NewFromStr(v.SubnetworkID)
if err != nil {
return fmt.Errorf("failed to deserialize subnetwork id "+
return errors.Errorf("failed to deserialize subnetwork id "+
"%s: %s", v.SubnetworkID, err)
}
}
@@ -611,7 +616,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
for _, val := range subnetworkNewBucket {
ka, ok := a.addrIndex[val]
if !ok {
return fmt.Errorf("newbucket contains %s but "+
return errors.Errorf("newbucket contains %s but "+
"none in address list", val)
}
@@ -628,7 +633,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
for _, val := range newBucket {
ka, ok := a.addrIndex[val]
if !ok {
return fmt.Errorf("full nodes newbucket contains %s but "+
return errors.Errorf("full nodes newbucket contains %s but "+
"none in address list", val)
}
@@ -649,7 +654,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
for _, val := range subnetworkTriedBucket {
ka, ok := a.addrIndex[val]
if !ok {
return fmt.Errorf("Tried bucket contains %s but "+
return errors.Errorf("Tried bucket contains %s but "+
"none in address list", val)
}
@@ -664,7 +669,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
for _, val := range triedBucket {
ka, ok := a.addrIndex[val]
if !ok {
return fmt.Errorf("Full nodes tried bucket contains %s but "+
return errors.Errorf("Full nodes tried bucket contains %s but "+
"none in address list", val)
}
@@ -677,12 +682,12 @@ func (a *AddrManager) deserializePeers(filePath string) error {
// Sanity checking.
for k, v := range a.addrIndex {
if v.refs == 0 && !v.tried {
return fmt.Errorf("address %s after serialisation "+
return errors.Errorf("address %s after serialisation "+
"with no references", k)
}
if v.refs > 0 && v.tried {
return fmt.Errorf("address %s after serialisation "+
return errors.Errorf("address %s after serialisation "+
"which is both new and tried!", k)
}
}
@@ -719,7 +724,7 @@ func (a *AddrManager) Start() {
// Start the address ticker to save addresses periodically.
a.wg.Add(1)
go a.addressHandler()
spawn(a.addressHandler)
}
// Stop gracefully shuts down the address manager by stopping the main handler.
@@ -769,11 +774,11 @@ func (a *AddrManager) AddAddressByIP(addrIP string, subnetworkID *subnetworkid.S
// Put it in wire.Netaddress
ip := net.ParseIP(addr)
if ip == nil {
return fmt.Errorf("invalid ip address %s", addr)
return errors.Errorf("invalid ip address %s", addr)
}
port, err := strconv.ParseUint(portStr, 10, 0)
if err != nil {
return fmt.Errorf("invalid port %s: %s", portStr, err)
return errors.Errorf("invalid port %s: %s", portStr, err)
}
na := wire.NewNetAddressIPPort(ip, uint16(port), 0)
a.AddAddress(na, na, subnetworkID) // XXX use correct src address
@@ -844,6 +849,12 @@ func (a *AddrManager) AddressCache(includeAllSubnetworks bool, subnetworkID *sub
if numAddresses > getAddrMax {
numAddresses = getAddrMax
}
if len(allAddr) < getAddrMin {
numAddresses = len(allAddr)
}
if len(allAddr) > getAddrMin && numAddresses < getAddrMin {
numAddresses = getAddrMin
}
// Fisher-Yates shuffle the array. We only need to do the first
// `numAddresses' since we are throwing the rest.
@@ -879,6 +890,8 @@ func (a *AddrManager) reset() {
}
a.nNewFullNodes = 0
a.nTriedFullNodes = 0
a.addrTrying = make(map[*KnownAddress]bool)
}
// HostToNetAddress returns a netaddress given a host address. If the address
@@ -904,7 +917,7 @@ func (a *AddrManager) HostToNetAddress(host string, port uint16, services wire.S
return nil, err
}
if len(ips) == 0 {
return nil, fmt.Errorf("no addresses found for %s", host)
return nil, errors.Errorf("no addresses found for %s", host)
}
ip = ips[0]
}
@@ -942,15 +955,25 @@ func (a *AddrManager) GetAddress() *KnownAddress {
a.mtx.Lock()
defer a.mtx.Unlock()
var knownAddress *KnownAddress
if a.localSubnetworkID == nil {
return a.getAddress(&a.addrTriedFullNodes, a.nTriedFullNodes,
knownAddress = a.getAddress(&a.addrTriedFullNodes, a.nTriedFullNodes,
&a.addrNewFullNodes, a.nNewFullNodes)
} else {
subnetworkID := *a.localSubnetworkID
knownAddress = a.getAddress(a.addrTried[subnetworkID], a.nTried[subnetworkID],
a.addrNew[subnetworkID], a.nNew[subnetworkID])
}
subnetworkID := *a.localSubnetworkID
if knownAddress != nil {
if a.addrTrying[knownAddress] {
return nil
}
a.addrTrying[knownAddress] = true
}
return knownAddress
return a.getAddress(a.addrTried[subnetworkID], a.nTried[subnetworkID],
a.addrNew[subnetworkID], a.nNew[subnetworkID])
}
// see GetAddress for details
@@ -1033,6 +1056,8 @@ func (a *AddrManager) Attempt(addr *wire.NetAddress) {
// set last tried time to now
ka.attempts++
ka.lastattempt = time.Now()
delete(a.addrTrying, ka)
}
// Connected Marks the given address as currently connected and working at the
@@ -1224,7 +1249,7 @@ func (a *AddrManager) Good(addr *wire.NetAddress, subnetworkID *subnetworkid.Sub
// with the given priority.
func (a *AddrManager) AddLocalAddress(na *wire.NetAddress, priority AddressPriority) error {
if !IsRoutable(na) {
return fmt.Errorf("address %s is not routable", na.IP)
return errors.Errorf("address %s is not routable", na.IP)
}
a.lamtx.Lock()

View File

@@ -5,8 +5,11 @@
package addrmgr
import (
"errors"
"bou.ke/monkey"
"fmt"
"github.com/daglabs/btcd/config"
"github.com/daglabs/btcd/dagconfig"
"github.com/pkg/errors"
"net"
"reflect"
"testing"
@@ -113,7 +116,17 @@ func TestStartStop(t *testing.T) {
}
func TestAddAddressByIP(t *testing.T) {
fmtErr := fmt.Errorf("")
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
fmtErr := errors.Errorf("")
addrErr := &net.AddrError{}
var tests = []struct {
addrIP string
@@ -157,6 +170,16 @@ func TestAddAddressByIP(t *testing.T) {
}
func TestAddLocalAddress(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
var tests = []struct {
address wire.NetAddress
priority AddressPriority
@@ -210,6 +233,16 @@ func TestAddLocalAddress(t *testing.T) {
}
func TestAttempt(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
n := New("testattempt", lookupFunc, nil)
// Add a new address and get it
@@ -232,6 +265,16 @@ func TestAttempt(t *testing.T) {
}
func TestConnected(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
n := New("testconnected", lookupFunc, nil)
// Add a new address and get it
@@ -252,6 +295,16 @@ func TestConnected(t *testing.T) {
}
func TestNeedMoreAddresses(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
n := New("testneedmoreaddresses", lookupFunc, nil)
addrsToAdd := 1500
b := n.NeedMoreAddresses()
@@ -284,6 +337,16 @@ func TestNeedMoreAddresses(t *testing.T) {
}
func TestGood(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
n := New("testgood", lookupFunc, nil)
addrsToAdd := 64 * 64
addrs := make([]*wire.NetAddress, addrsToAdd)
@@ -331,6 +394,16 @@ func TestGood(t *testing.T) {
}
func TestGoodChangeSubnetworkID(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
n := New("test_good_change_subnetwork_id", lookupFunc, nil)
addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
addrKey := NetAddressKey(addr)
@@ -400,6 +473,16 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
}
func TestGetAddress(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
localSubnetworkID := &subnetworkid.SubnetworkID{0xff}
n := New("testgetaddress", lookupFunc, localSubnetworkID)
@@ -417,6 +500,7 @@ func TestGetAddress(t *testing.T) {
if ka == nil {
t.Fatalf("Did not get an address where there is one in the pool")
}
n.Attempt(ka.NetAddress())
// Checks that we don't get it if we find that it has other subnetwork ID than expected.
actualSubnetworkID := &subnetworkid.SubnetworkID{0xfe}
@@ -449,6 +533,7 @@ func TestGetAddress(t *testing.T) {
if !ka.SubnetworkID().IsEqual(localSubnetworkID) {
t.Errorf("Wrong Subnetwork ID: got %v, want %v", *ka.SubnetworkID(), localSubnetworkID)
}
n.Attempt(ka.NetAddress())
// Mark this as a good address and get it
n.Good(ka.NetAddress(), localSubnetworkID)
@@ -470,6 +555,16 @@ func TestGetAddress(t *testing.T) {
}
func TestGetBestLocalAddress(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
localAddrs := []wire.NetAddress{
{IP: net.ParseIP("192.168.0.100")},
{IP: net.ParseIP("::1")},

View File

@@ -5,15 +5,9 @@
package addrmgr
import (
"github.com/btcsuite/btclog"
"github.com/daglabs/btcd/logger"
"github.com/daglabs/btcd/util/panics"
)
// log is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
var log btclog.Logger
func init() {
log, _ = logger.Get(logger.SubsystemTags.ADXR)
}
var log, _ = logger.Get(logger.SubsystemTags.ADXR)
var spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)

View File

@@ -6,9 +6,10 @@ package addrmgr
import (
"fmt"
"github.com/daglabs/btcd/config"
"net"
"github.com/daglabs/btcd/config"
"github.com/daglabs/btcd/wire"
)
@@ -224,8 +225,8 @@ func IsValid(na *wire.NetAddress) bool {
// the public internet. This is true as long as the address is valid and is not
// in any reserved ranges.
func IsRoutable(na *wire.NetAddress) bool {
if config.ActiveNetParams().AcceptUnroutable {
return true
if config.ActiveConfig().NetParams().AcceptUnroutable {
return !IsLocal(na)
}
return IsValid(na) && !(IsRFC1918(na) || IsRFC2544(na) ||

View File

@@ -5,6 +5,9 @@
package addrmgr_test
import (
"bou.ke/monkey"
"github.com/daglabs/btcd/config"
"github.com/daglabs/btcd/dagconfig"
"net"
"testing"
@@ -15,6 +18,16 @@ import (
// TestIPTypes ensures the various functions which determine the type of an IP
// address based on RFCs work as intended.
func TestIPTypes(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
type ipTest struct {
in wire.NetAddress
rfc1918 bool
@@ -145,6 +158,16 @@ func TestIPTypes(t *testing.T) {
// TestGroupKey tests the GroupKey function to ensure it properly groups various
// IP addresses.
func TestGroupKey(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
tests := []struct {
name string
ip string

View File

@@ -0,0 +1,6 @@
package apimodels
// RawTransaction represents a raw transaction posted to the API server
type RawTransaction struct {
RawTransaction string `json:"rawTransaction"`
}

View File

@@ -0,0 +1,64 @@
package apimodels
// TransactionResponse is a json representation of a transaction
type TransactionResponse struct {
TransactionHash string `json:"transactionHash"`
TransactionID string `json:"transactionId"`
AcceptingBlockHash string `json:"acceptingBlockHash,omitempty"`
AcceptingBlockBlueScore uint64 `json:"acceptingBlockBlueScore,omitempty"`
SubnetworkID string `json:"subnetworkId"`
LockTime uint64 `json:"lockTime"`
Gas uint64 `json:"gas,omitempty"`
PayloadHash string `json:"payloadHash,omitempty"`
Payload string `json:"payload,omitempty"`
Inputs []*TransactionInputResponse `json:"inputs"`
Outputs []*TransactionOutputResponse `json:"outputs"`
Mass uint64 `json:"mass"`
}
// TransactionOutputResponse is a json representation of a transaction output
type TransactionOutputResponse struct {
TransactionID string `json:"transactionId,omitempty"`
Value uint64 `json:"value"`
ScriptPubKey string `json:"scriptPubKey"`
Address string `json:"address,omitempty"`
AcceptingBlockHash *string `json:"acceptingBlockHash,omitempty"`
AcceptingBlockBlueScore uint64 `json:"acceptingBlockBlueScore,omitempty"`
Index uint32 `json:"index"`
IsCoinbase *bool `json:"isCoinbase,omitempty"`
IsSpendable *bool `json:"isSpendable,omitempty"`
Confirmations *uint64 `json:"confirmations,omitempty"`
}
// TransactionInputResponse is a json representation of a transaction input
type TransactionInputResponse struct {
TransactionID string `json:"transactionId,omitempty"`
PreviousTransactionID string `json:"previousTransactionId"`
PreviousTransactionOutputIndex uint32 `json:"previousTransactionOutputIndex"`
SignatureScript string `json:"signatureScript"`
Sequence uint64 `json:"sequence"`
Address string `json:"address"`
}
// BlockResponse is a json representation of a block
type BlockResponse struct {
BlockHash string `json:"blockHash"`
Version int32 `json:"version"`
HashMerkleRoot string `json:"hashMerkleRoot"`
AcceptedIDMerkleRoot string `json:"acceptedIDMerkleRoot"`
UTXOCommitment string `json:"utxoCommitment"`
Timestamp uint64 `json:"timestamp"`
Bits uint32 `json:"bits"`
Nonce uint64 `json:"nonce"`
AcceptingBlockHash *string `json:"acceptingBlockHash"`
BlueScore uint64 `json:"blueScore"`
IsChainBlock bool `json:"isChainBlock"`
Mass uint64 `json:"mass"`
}
// FeeEstimateResponse is a json representation of a fee estimate
type FeeEstimateResponse struct {
HighPriority float64 `json:"highPriority"`
NormalPriority float64 `json:"normalPriority"`
LowPriority float64 `json:"lowPriority"`
}

View File

@@ -0,0 +1,98 @@
package config
import (
"github.com/daglabs/btcd/apiserver/logger"
"github.com/daglabs/btcd/config"
"github.com/daglabs/btcd/util"
"github.com/jessevdk/go-flags"
"github.com/pkg/errors"
"path/filepath"
)
const (
defaultLogFilename = "apiserver.log"
defaultErrLogFilename = "apiserver_err.log"
)
var (
// Default configuration options
defaultLogDir = util.AppDataDir("apiserver", false)
defaultDBAddress = "localhost:3306"
defaultHTTPListen = "0.0.0.0:8080"
activeConfig *Config
)
// ActiveConfig returns the active configuration struct
func ActiveConfig() *Config {
return activeConfig
}
// Config defines the configuration options for the API server.
type Config struct {
LogDir string `long:"logdir" description:"Directory to log output."`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
DBAddress string `long:"dbaddress" description:"Database address"`
DBUser string `long:"dbuser" description:"Database user" required:"true"`
DBPassword string `long:"dbpass" description:"Database password" required:"true"`
DBName string `long:"dbname" description:"Database name" required:"true"`
HTTPListen string `long:"listen" description:"HTTP address to listen on (default: 0.0.0.0:8080)"`
Migrate bool `long:"migrate" description:"Migrate the database to the latest version. The server will not start when using this flag."`
MQTTBrokerAddress string `long:"mqttaddress" description:"MQTT broker address" required:"false"`
MQTTUser string `long:"mqttuser" description:"MQTT server user" required:"false"`
MQTTPassword string `long:"mqttpass" description:"MQTT server password" required:"false"`
config.NetworkFlags
}
// Parse parses the CLI arguments and returns a config struct.
func Parse() (*Config, error) {
activeConfig = &Config{
LogDir: defaultLogDir,
DBAddress: defaultDBAddress,
HTTPListen: defaultHTTPListen,
}
parser := flags.NewParser(activeConfig, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return nil, err
}
if !activeConfig.Migrate {
if activeConfig.RPCUser == "" {
return nil, errors.New("--rpcuser is required if --migrate flag is not used")
}
if activeConfig.RPCPassword == "" {
return nil, errors.New("--rpcpass is required if --migrate flag is not used")
}
if activeConfig.RPCServer == "" {
return nil, errors.New("--rpcserver is required if --migrate flag is not used")
}
}
if activeConfig.RPCCert == "" && !activeConfig.DisableTLS {
return nil, errors.New("--notls has to be disabled if --cert is used")
}
if activeConfig.RPCCert != "" && activeConfig.DisableTLS {
return nil, errors.New("--cert should be omitted if --notls is used")
}
if (activeConfig.MQTTBrokerAddress != "" || activeConfig.MQTTUser != "" || activeConfig.MQTTPassword != "") &&
(activeConfig.MQTTBrokerAddress == "" || activeConfig.MQTTUser == "" || activeConfig.MQTTPassword == "") {
return nil, errors.New("--mqttaddress, --mqttuser, and --mqttpass must be passed all together")
}
err = activeConfig.ResolveNetwork(parser)
if err != nil {
return nil, err
}
logFile := filepath.Join(activeConfig.LogDir, defaultLogFilename)
errLogFile := filepath.Join(activeConfig.LogDir, defaultErrLogFilename)
logger.InitLog(logFile, errLogFile)
return activeConfig, nil
}

View File

@@ -0,0 +1,82 @@
package controllers
import (
"encoding/hex"
"net/http"
"github.com/daglabs/btcd/apiserver/apimodels"
"github.com/daglabs/btcd/apiserver/dbmodels"
"github.com/daglabs/btcd/httpserverutils"
"github.com/pkg/errors"
"github.com/daglabs/btcd/apiserver/database"
"github.com/daglabs/btcd/util/daghash"
)
const (
// OrderAscending is parameter that can be used
// in a get list handler to get a list ordered
// in an ascending order.
OrderAscending = "asc"
// OrderDescending is parameter that can be used
// in a get list handler to get a list ordered
// in an ascending order.
OrderDescending = "desc"
)
const maxGetBlocksLimit = 100
// GetBlockByHashHandler returns a block by a given hash.
func GetBlockByHashHandler(blockHash string) (interface{}, error) {
if bytes, err := hex.DecodeString(blockHash); err != nil || len(bytes) != daghash.HashSize {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Errorf("The given block hash is not a hex-encoded %d-byte hash.", daghash.HashSize))
}
db, err := database.DB()
if err != nil {
return nil, err
}
block := &dbmodels.Block{}
dbResult := db.Where(&dbmodels.Block{BlockHash: blockHash}).Preload("AcceptingBlock").First(block)
dbErrors := dbResult.GetErrors()
if httpserverutils.IsDBRecordNotFoundError(dbErrors) {
return nil, httpserverutils.NewHandlerError(http.StatusNotFound, errors.New("No block with the given block hash was found"))
}
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:",
dbResult.GetErrors())
}
return convertBlockModelToBlockResponse(block), nil
}
// GetBlocksHandler searches for all blocks
func GetBlocksHandler(order string, skip uint64, limit uint64) (interface{}, error) {
if limit > maxGetBlocksLimit {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Errorf("The maximum allowed value for the limit is %d", maxGetTransactionsLimit))
}
blocks := []*dbmodels.Block{}
db, err := database.DB()
if err != nil {
return nil, err
}
query := db.
Limit(limit).
Offset(skip).
Preload("AcceptingBlock")
if order == OrderAscending {
query = query.Order("`id` ASC")
} else if order == OrderDescending {
query = query.Order("`id` DESC")
} else {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Errorf("'%s' is not a valid order", order))
}
query.Find(&blocks)
blockResponses := make([]*apimodels.BlockResponse, len(blocks))
for i, block := range blocks {
blockResponses[i] = convertBlockModelToBlockResponse(block)
}
return blockResponses, nil
}

View File

@@ -0,0 +1,63 @@
package controllers
import (
"encoding/hex"
"github.com/daglabs/btcd/apiserver/apimodels"
"github.com/daglabs/btcd/apiserver/dbmodels"
"github.com/daglabs/btcd/btcjson"
)
func convertTxDBModelToTxResponse(tx *dbmodels.Transaction) *apimodels.TransactionResponse {
txRes := &apimodels.TransactionResponse{
TransactionHash: tx.TransactionHash,
TransactionID: tx.TransactionID,
AcceptingBlockHash: tx.AcceptingBlock.BlockHash,
AcceptingBlockBlueScore: tx.AcceptingBlock.BlueScore,
SubnetworkID: tx.Subnetwork.SubnetworkID,
LockTime: tx.LockTime,
Gas: tx.Gas,
PayloadHash: tx.PayloadHash,
Payload: hex.EncodeToString(tx.Payload),
Inputs: make([]*apimodels.TransactionInputResponse, len(tx.TransactionInputs)),
Outputs: make([]*apimodels.TransactionOutputResponse, len(tx.TransactionOutputs)),
Mass: tx.Mass,
}
for i, txOut := range tx.TransactionOutputs {
txRes.Outputs[i] = &apimodels.TransactionOutputResponse{
Value: txOut.Value,
ScriptPubKey: hex.EncodeToString(txOut.ScriptPubKey),
Address: txOut.Address.Address,
Index: txOut.Index,
}
}
for i, txIn := range tx.TransactionInputs {
txRes.Inputs[i] = &apimodels.TransactionInputResponse{
PreviousTransactionID: txIn.PreviousTransactionOutput.Transaction.TransactionID,
PreviousTransactionOutputIndex: txIn.PreviousTransactionOutput.Index,
SignatureScript: hex.EncodeToString(txIn.SignatureScript),
Sequence: txIn.Sequence,
Address: txIn.PreviousTransactionOutput.Address.Address,
}
}
return txRes
}
func convertBlockModelToBlockResponse(block *dbmodels.Block) *apimodels.BlockResponse {
blockRes := &apimodels.BlockResponse{
BlockHash: block.BlockHash,
Version: block.Version,
HashMerkleRoot: block.HashMerkleRoot,
AcceptedIDMerkleRoot: block.AcceptedIDMerkleRoot,
UTXOCommitment: block.UTXOCommitment,
Timestamp: uint64(block.Timestamp.Unix()),
Bits: block.Bits,
Nonce: block.Nonce,
BlueScore: block.BlueScore,
IsChainBlock: block.IsChainBlock,
Mass: block.Mass,
}
if block.AcceptingBlock != nil {
blockRes.AcceptingBlockHash = btcjson.String(block.AcceptingBlock.BlockHash)
}
return blockRes
}

View File

@@ -0,0 +1,16 @@
package controllers
import (
"github.com/daglabs/btcd/apiserver/apimodels"
"github.com/daglabs/btcd/httpserverutils"
)
// GetFeeEstimatesHandler returns the fee estimates for different priorities
// for accepting a transaction in the DAG.
func GetFeeEstimatesHandler() (interface{}, *httpserverutils.HandlerError) {
return &apimodels.FeeEstimateResponse{
HighPriority: 3,
NormalPriority: 2,
LowPriority: 1,
}, nil
}

View File

@@ -0,0 +1,276 @@
package controllers
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"net/http"
"github.com/daglabs/btcd/apiserver/apimodels"
"github.com/daglabs/btcd/apiserver/config"
"github.com/daglabs/btcd/apiserver/dbmodels"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/httpserverutils"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/pkg/errors"
"github.com/daglabs/btcd/apiserver/database"
"github.com/daglabs/btcd/apiserver/jsonrpc"
"github.com/daglabs/btcd/btcjson"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"github.com/jinzhu/gorm"
)
const maxGetTransactionsLimit = 1000
// GetTransactionByIDHandler returns a transaction by a given transaction ID.
func GetTransactionByIDHandler(txID string) (interface{}, error) {
if bytes, err := hex.DecodeString(txID); err != nil || len(bytes) != daghash.TxIDSize {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Errorf("The given txid is not a hex-encoded %d-byte hash.", daghash.TxIDSize))
}
db, err := database.DB()
if err != nil {
return nil, err
}
tx := &dbmodels.Transaction{}
query := db.Where(&dbmodels.Transaction{TransactionID: txID})
dbResult := addTxPreloadedFields(query).First(&tx)
dbErrors := dbResult.GetErrors()
if httpserverutils.IsDBRecordNotFoundError(dbErrors) {
return nil, httpserverutils.NewHandlerError(http.StatusNotFound, errors.New("No transaction with the given txid was found"))
}
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transaction from the database:", dbErrors)
}
return convertTxDBModelToTxResponse(tx), nil
}
// GetTransactionByHashHandler returns a transaction by a given transaction hash.
func GetTransactionByHashHandler(txHash string) (interface{}, error) {
if bytes, err := hex.DecodeString(txHash); err != nil || len(bytes) != daghash.HashSize {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Errorf("The given txhash is not a hex-encoded %d-byte hash.", daghash.HashSize))
}
db, err := database.DB()
if err != nil {
return nil, err
}
tx := &dbmodels.Transaction{}
query := db.Where(&dbmodels.Transaction{TransactionHash: txHash})
dbResult := addTxPreloadedFields(query).First(&tx)
dbErrors := dbResult.GetErrors()
if httpserverutils.IsDBRecordNotFoundError(dbErrors) {
return nil, httpserverutils.NewHandlerError(http.StatusNotFound, errors.Errorf("No transaction with the given txhash was found."))
}
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transaction from the database:", dbErrors)
}
return convertTxDBModelToTxResponse(tx), nil
}
// GetTransactionsByAddressHandler searches for all transactions
// where the given address is either an input or an output.
func GetTransactionsByAddressHandler(address string, skip uint64, limit uint64) (interface{}, error) {
if limit > maxGetTransactionsLimit {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Errorf("The maximum allowed value for the limit is %d", maxGetTransactionsLimit))
}
db, err := database.DB()
if err != nil {
return nil, err
}
txs := []*dbmodels.Transaction{}
query := db.
Joins("LEFT JOIN `transaction_outputs` ON `transaction_outputs`.`transaction_id` = `transactions`.`id`").
Joins("LEFT JOIN `addresses` AS `out_addresses` ON `out_addresses`.`id` = `transaction_outputs`.`address_id`").
Joins("LEFT JOIN `transaction_inputs` ON `transaction_inputs`.`transaction_id` = `transactions`.`id`").
Joins("LEFT JOIN `transaction_outputs` AS `inputs_outs` ON `inputs_outs`.`id` = `transaction_inputs`.`previous_transaction_output_id`").
Joins("LEFT JOIN `addresses` AS `in_addresses` ON `in_addresses`.`id` = `inputs_outs`.`address_id`").
Where("`out_addresses`.`address` = ?", address).
Or("`in_addresses`.`address` = ?", address).
Limit(limit).
Offset(skip).
Order("`transactions`.`id` ASC")
dbResult := addTxPreloadedFields(query).Find(&txs)
dbErrors := dbResult.GetErrors()
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:", dbErrors)
}
txResponses := make([]*apimodels.TransactionResponse, len(txs))
for i, tx := range txs {
txResponses[i] = convertTxDBModelToTxResponse(tx)
}
return txResponses, nil
}
func fetchSelectedTip() (*dbmodels.Block, error) {
db, err := database.DB()
if err != nil {
return nil, err
}
block := &dbmodels.Block{}
dbResult := db.Order("blue_score DESC").
Where(&dbmodels.Block{IsChainBlock: true}).
First(block)
dbErrors := dbResult.GetErrors()
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:", dbErrors)
}
return block, nil
}
func areTxsInBlock(blockID uint64, txIDs []uint64) (map[uint64]bool, error) {
db, err := database.DB()
if err != nil {
return nil, err
}
transactionBlocks := []*dbmodels.TransactionBlock{}
dbErrors := db.
Where(&dbmodels.TransactionBlock{BlockID: blockID}).
Where("transaction_id in (?)", txIDs).
Find(&transactionBlocks).GetErrors()
if len(dbErrors) > 0 {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading UTXOs from the database:", dbErrors)
}
isInBlock := make(map[uint64]bool)
for _, transactionBlock := range transactionBlocks {
isInBlock[transactionBlock.TransactionID] = true
}
return isInBlock, nil
}
// GetUTXOsByAddressHandler searches for all UTXOs that belong to a certain address.
func GetUTXOsByAddressHandler(address string) (interface{}, error) {
db, err := database.DB()
if err != nil {
return nil, err
}
var transactionOutputs []*dbmodels.TransactionOutput
dbErrors := db.
Joins("LEFT JOIN `addresses` ON `addresses`.`id` = `transaction_outputs`.`address_id`").
Where("`addresses`.`address` = ? AND `transaction_outputs`.`is_spent` = 0", address).
Preload("Transaction.AcceptingBlock").
Preload("Transaction.Subnetwork").
Find(&transactionOutputs).GetErrors()
if len(dbErrors) > 0 {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading UTXOs from the database:", dbErrors)
}
nonAcceptedTxIds := make([]uint64, len(transactionOutputs))
for i, txOut := range transactionOutputs {
if txOut.Transaction.AcceptingBlock == nil {
nonAcceptedTxIds[i] = txOut.TransactionID
}
}
var selectedTip *dbmodels.Block
var isTxInSelectedTip map[uint64]bool
if len(nonAcceptedTxIds) != 0 {
selectedTip, err = fetchSelectedTip()
if err != nil {
return nil, err
}
isTxInSelectedTip, err = areTxsInBlock(selectedTip.ID, nonAcceptedTxIds)
if err != nil {
return nil, err
}
}
activeNetParams := config.ActiveConfig().NetParams()
UTXOsResponses := make([]*apimodels.TransactionOutputResponse, len(transactionOutputs))
for i, transactionOutput := range transactionOutputs {
subnetworkID := &subnetworkid.SubnetworkID{}
err := subnetworkid.Decode(subnetworkID, transactionOutput.Transaction.Subnetwork.SubnetworkID)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("Couldn't decode subnetwork id %s", transactionOutput.Transaction.Subnetwork.SubnetworkID))
}
var acceptingBlockHash *string
var confirmations uint64
acceptingBlockBlueScore := blockdag.UnacceptedBlueScore
if isTxInSelectedTip[transactionOutput.ID] {
confirmations = 1
} else if transactionOutput.Transaction.AcceptingBlock != nil {
acceptingBlockHash = btcjson.String(transactionOutput.Transaction.AcceptingBlock.BlockHash)
acceptingBlockBlueScore = transactionOutput.Transaction.AcceptingBlock.BlueScore
confirmations = selectedTip.BlueScore - acceptingBlockBlueScore + 2
}
isCoinbase := subnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)
UTXOsResponses[i] = &apimodels.TransactionOutputResponse{
TransactionID: transactionOutput.Transaction.TransactionID,
Value: transactionOutput.Value,
ScriptPubKey: hex.EncodeToString(transactionOutput.ScriptPubKey),
AcceptingBlockHash: acceptingBlockHash,
AcceptingBlockBlueScore: acceptingBlockBlueScore,
Index: transactionOutput.Index,
IsCoinbase: btcjson.Bool(isCoinbase),
Confirmations: btcjson.Uint64(confirmations),
IsSpendable: btcjson.Bool(!isCoinbase || confirmations >= activeNetParams.BlockCoinbaseMaturity),
}
}
return UTXOsResponses, nil
}
func addTxPreloadedFields(query *gorm.DB) *gorm.DB {
return query.Preload("AcceptingBlock").
Preload("Subnetwork").
Preload("TransactionOutputs").
Preload("TransactionOutputs.Address").
Preload("TransactionInputs.PreviousTransactionOutput.Transaction").
Preload("TransactionInputs.PreviousTransactionOutput.Address")
}
// PostTransaction forwards a raw transaction to the JSON-RPC API server
func PostTransaction(requestBody []byte) error {
client, err := jsonrpc.GetClient()
if err != nil {
return err
}
rawTx := &apimodels.RawTransaction{}
err = json.Unmarshal(requestBody, rawTx)
if err != nil {
return httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
errors.Wrap(err, "Error unmarshalling request body"),
"The request body is not json-formatted")
}
txBytes, err := hex.DecodeString(rawTx.RawTransaction)
if err != nil {
return httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
errors.Wrap(err, "Error decoding hex raw transaction"),
"The raw transaction is not a hex-encoded transaction")
}
txReader := bytes.NewReader(txBytes)
tx := &wire.MsgTx{}
err = tx.BtcDecode(txReader, 0)
if err != nil {
return httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
errors.Wrap(err, "Error decoding raw transaction"),
"Error decoding raw transaction")
}
_, err = client.SendRawTransaction(tx, true)
if err != nil {
if rpcErr, ok := err.(*btcjson.RPCError); ok && rpcErr.Code == btcjson.ErrRPCVerify {
return httpserverutils.NewHandlerError(http.StatusInternalServerError, err)
}
return err
}
return nil
}

View File

@@ -0,0 +1,142 @@
package database
import (
nativeerrors "errors"
"fmt"
"github.com/pkg/errors"
"os"
"github.com/daglabs/btcd/apiserver/config"
"github.com/golang-migrate/migrate/v4/source"
"github.com/jinzhu/gorm"
"github.com/golang-migrate/migrate/v4"
)
// db is the API server database.
var db *gorm.DB
// DB returns a reference to the database connection
func DB() (*gorm.DB, error) {
if db == nil {
return nil, errors.New("Database is not connected")
}
return db, nil
}
type gormLogger struct{}
func (l gormLogger) Print(v ...interface{}) {
str := fmt.Sprint(v...)
log.Errorf(str)
}
// Connect connects to the database mentioned in
// config variable.
func Connect() error {
connectionString := buildConnectionString()
migrator, driver, err := openMigrator(connectionString)
if err != nil {
return err
}
isCurrent, version, err := isCurrent(migrator, driver)
if err != nil {
return errors.Errorf("Error checking whether the database is current: %s", err)
}
if !isCurrent {
return errors.Errorf("Database is not current (version %d). Please migrate"+
" the database by running the server with --migrate flag and then run it again.", version)
}
db, err = gorm.Open("mysql", connectionString)
if err != nil {
return err
}
db.SetLogger(gormLogger{})
return nil
}
// Close closes the connection to the database
func Close() error {
if db == nil {
return nil
}
err := db.Close()
db = nil
return err
}
func buildConnectionString() string {
cfg := config.ActiveConfig()
return fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True",
cfg.DBUser, cfg.DBPassword, cfg.DBAddress, cfg.DBName)
}
// isCurrent resolves whether the database is on the latest
// version of the schema.
func isCurrent(migrator *migrate.Migrate, driver source.Driver) (bool, uint, error) {
// Get the current version
version, isDirty, err := migrator.Version()
if nativeerrors.Is(err, migrate.ErrNilVersion) {
return false, 0, nil
}
if err != nil {
return false, 0, errors.WithStack(err)
}
if isDirty {
return false, 0, errors.Errorf("Database is dirty")
}
// The database is current if Next returns ErrNotExist
_, err = driver.Next(version)
if pathErr, ok := err.(*os.PathError); ok {
if pathErr.Err == os.ErrNotExist {
return true, version, nil
}
}
return false, version, err
}
func openMigrator(connectionString string) (*migrate.Migrate, source.Driver, error) {
driver, err := source.Open("file://migrations")
if err != nil {
return nil, nil, err
}
migrator, err := migrate.NewWithSourceInstance(
"migrations", driver, "mysql://"+connectionString)
if err != nil {
return nil, nil, err
}
return migrator, driver, nil
}
// Migrate database to the latest version.
func Migrate() error {
connectionString := buildConnectionString()
migrator, driver, err := openMigrator(connectionString)
if err != nil {
return err
}
isCurrent, version, err := isCurrent(migrator, driver)
if err != nil {
return errors.Errorf("Error checking whether the database is current: %s", err)
}
if isCurrent {
log.Infof("Database is already up-to-date (version %d)", version)
return nil
}
err = migrator.Up()
if err != nil {
return err
}
version, isDirty, err := migrator.Version()
if err != nil {
return err
}
if isDirty {
return errors.Errorf("error migrating database: database is dirty")
}
log.Infof("Migrated database to the latest version (version %d)", version)
return nil
}

View File

@@ -0,0 +1,9 @@
package database
import "github.com/daglabs/btcd/util/panics"
import "github.com/daglabs/btcd/apiserver/logger"
var (
log = logger.BackendLog.Logger("DTBS")
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
)

View File

@@ -0,0 +1,111 @@
package dbmodels
import (
"time"
)
// Block is the gorm model for the 'blocks' table
type Block struct {
ID uint64 `gorm:"primary_key"`
BlockHash string
AcceptingBlockID *uint64
AcceptingBlock *Block
Version int32
HashMerkleRoot string
AcceptedIDMerkleRoot string
UTXOCommitment string
Timestamp time.Time
Bits uint32
Nonce uint64
BlueScore uint64
IsChainBlock bool
Mass uint64
ParentBlocks []Block `gorm:"many2many:parent_blocks;"`
}
// ParentBlock is the gorm model for the 'parent_blocks' table
type ParentBlock struct {
BlockID uint64
Block Block
ParentBlockID uint64
ParentBlock Block
}
// RawBlock is the gorm model for the 'raw_blocks' table
type RawBlock struct {
BlockID uint64
Block Block
BlockData []byte
}
// Subnetwork is the gorm model for the 'subnetworks' table
type Subnetwork struct {
ID uint64 `gorm:"primary_key"`
SubnetworkID string
GasLimit *uint64
}
// Transaction is the gorm model for the 'transactions' table
type Transaction struct {
ID uint64 `gorm:"primary_key"`
AcceptingBlockID *uint64
AcceptingBlock *Block
TransactionHash string
TransactionID string
LockTime uint64
SubnetworkID uint64
Subnetwork Subnetwork
Gas uint64
PayloadHash string
Payload []byte
Mass uint64
Blocks []Block `gorm:"many2many:transactions_to_blocks;"`
TransactionOutputs []TransactionOutput
TransactionInputs []TransactionInput
}
// TransactionBlock is the gorm model for the 'transactions_to_blocks' table
type TransactionBlock struct {
TransactionID uint64
Transaction Transaction
BlockID uint64
Block Block
Index uint32
}
// TableName returns the table name associated to the
// TransactionBlock gorm model
func (TransactionBlock) TableName() string {
return "transactions_to_blocks"
}
// TransactionOutput is the gorm model for the 'transaction_outputs' table
type TransactionOutput struct {
ID uint64 `gorm:"primary_key"`
TransactionID uint64
Transaction Transaction
Index uint32
Value uint64
ScriptPubKey []byte
IsSpent bool
AddressID uint64
Address Address
}
// TransactionInput is the gorm model for the 'transaction_inputs' table
type TransactionInput struct {
ID uint64 `gorm:"primary_key"`
TransactionID uint64
Transaction Transaction
PreviousTransactionOutputID uint64
PreviousTransactionOutput TransactionOutput
Index uint32
SignatureScript []byte
Sequence uint64
}
// Address is the gorm model for the 'addresses' table
type Address struct {
ID uint64 `gorm:"primary_key"`
Address string
}

View File

@@ -0,0 +1,28 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
RUN mkdir -p /go/src/github.com/daglabs/btcd
WORKDIR /go/src/github.com/daglabs/btcd
RUN apk add --no-cache curl git
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY . .
RUN cd apiserver && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o apiserver .
# --- multistage docker build: stage #2: runtime image
FROM alpine
WORKDIR /app
RUN apk add --no-cache tini
COPY --from=build /go/src/github.com/daglabs/btcd/apiserver/ /app/
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/app/apiserver"]

125
apiserver/jsonrpc/client.go Normal file
View File

@@ -0,0 +1,125 @@
package jsonrpc
import (
"github.com/pkg/errors"
"io/ioutil"
"time"
"github.com/daglabs/btcd/apiserver/config"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/rpcclient"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/wire"
)
// Client represents a connection to the JSON-RPC API of a full node
type Client struct {
*rpcclient.Client
OnBlockAdded chan *BlockAddedMsg
OnChainChanged chan *ChainChangedMsg
}
var client *Client
// GetClient returns an instance of the JSON-RPC client, in case we have an active connection
func GetClient() (*Client, error) {
if client == nil {
return nil, errors.New("JSON-RPC is not connected")
}
return client, nil
}
// BlockAddedMsg defines the message received in onBlockAdded
type BlockAddedMsg struct {
ChainHeight uint64
Header *wire.BlockHeader
}
// ChainChangedMsg defines the message received in onChainChanged
type ChainChangedMsg struct {
RemovedChainBlockHashes []*daghash.Hash
AddedChainBlocks []*rpcclient.ChainBlock
}
// Close closes the connection to the JSON-RPC API server
func Close() {
if client == nil {
return
}
client.Disconnect()
client = nil
}
// Connect initiates a connection to the JSON-RPC API Server
func Connect() error {
cfg := config.ActiveConfig()
var cert []byte
if !cfg.DisableTLS {
var err error
cert, err = ioutil.ReadFile(cfg.RPCCert)
if err != nil {
return errors.Errorf("Error reading certificates file: %s", err)
}
}
connCfg := &rpcclient.ConnConfig{
Host: cfg.RPCServer,
Endpoint: "ws",
User: cfg.RPCUser,
Pass: cfg.RPCPassword,
DisableTLS: cfg.DisableTLS,
RequestTimeout: time.Second * 5,
}
if !cfg.DisableTLS {
connCfg.Certificates = cert
}
var err error
client, err = newClient(connCfg)
if err != nil {
return errors.Errorf("Error connecting to address %s: %s", cfg.RPCServer, err)
}
return nil
}
func newClient(connCfg *rpcclient.ConnConfig) (*Client, error) {
client = &Client{
OnBlockAdded: make(chan *BlockAddedMsg),
OnChainChanged: make(chan *ChainChangedMsg),
}
notificationHandlers := &rpcclient.NotificationHandlers{
OnFilteredBlockAdded: func(height uint64, header *wire.BlockHeader,
txs []*util.Tx) {
client.OnBlockAdded <- &BlockAddedMsg{
ChainHeight: height,
Header: header,
}
},
OnChainChanged: func(removedChainBlockHashes []*daghash.Hash,
addedChainBlocks []*rpcclient.ChainBlock) {
client.OnChainChanged <- &ChainChangedMsg{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlocks: addedChainBlocks,
}
},
}
var err error
client.Client, err = rpcclient.New(connCfg, notificationHandlers)
if err != nil {
return nil, errors.Errorf("Error connecting to address %s: %s", connCfg.Host, err)
}
if err = client.NotifyBlocks(); err != nil {
return nil, errors.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err)
}
if err = client.NotifyChainChanges(); err != nil {
return nil, errors.Errorf("Error while registering client %s for chain changes notifications: %s", client.Host(), err)
}
return client, nil
}

11
apiserver/log.go Normal file
View File

@@ -0,0 +1,11 @@
package main
import (
"github.com/daglabs/btcd/logger"
"github.com/daglabs/btcd/util/panics"
)
var (
log = logger.BackendLog.Logger("APIS")
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
)

View File

@@ -0,0 +1,24 @@
package logger
import (
"fmt"
"github.com/daglabs/btcd/logs"
"os"
)
// BackendLog is the logging backend used to create all subsystem loggers.
var BackendLog = logs.NewBackend()
// InitLog attaches log file and error log file to the backend log.
func InitLog(logFile, errLogFile string) {
err := BackendLog.AddLogFile(logFile, logs.LevelTrace)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err)
os.Exit(1)
}
err = BackendLog.AddLogFile(errLogFile, logs.LevelWarn)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logs.LevelWarn, err)
os.Exit(1)
}
}

81
apiserver/main.go Normal file
View File

@@ -0,0 +1,81 @@
package main
import (
"fmt"
"github.com/daglabs/btcd/apiserver/mqtt"
"github.com/pkg/errors"
"os"
"github.com/daglabs/btcd/apiserver/config"
"github.com/daglabs/btcd/apiserver/database"
"github.com/daglabs/btcd/apiserver/jsonrpc"
"github.com/daglabs/btcd/apiserver/server"
"github.com/daglabs/btcd/logger"
"github.com/daglabs/btcd/signal"
"github.com/daglabs/btcd/util/panics"
_ "github.com/golang-migrate/migrate/v4/database/mysql"
_ "github.com/golang-migrate/migrate/v4/source/file"
_ "github.com/jinzhu/gorm/dialects/mysql"
)
func main() {
defer panics.HandlePanic(log, logger.BackendLog, nil)
cfg, err := config.Parse()
if err != nil {
errString := fmt.Sprintf("Error parsing command-line arguments: %s", err)
_, fErr := fmt.Fprintf(os.Stderr, errString)
if fErr != nil {
panic(errString)
}
return
}
if cfg.Migrate {
err := database.Migrate()
if err != nil {
panic(errors.Errorf("Error migrating database: %s", err))
}
return
}
err = database.Connect()
if err != nil {
panic(errors.Errorf("Error connecting to database: %s", err))
}
defer func() {
err := database.Close()
if err != nil {
panic(errors.Errorf("Error closing the database: %s", err))
}
}()
err = mqtt.Connect()
if err != nil {
panic(errors.Errorf("Error connecting to MQTT: %s", err))
}
defer mqtt.Close()
err = jsonrpc.Connect()
if err != nil {
panic(errors.Errorf("Error connecting to servers: %s", err))
}
defer jsonrpc.Close()
shutdownServer := server.Start(config.ActiveConfig().HTTPListen)
defer shutdownServer()
doneChan := make(chan struct{}, 1)
spawn(func() {
err := startSync(doneChan)
if err != nil {
panic(err)
}
})
interrupt := signal.InterruptListener()
<-interrupt
// Gracefully stop syncing
doneChan <- struct{}{}
}

View File

@@ -0,0 +1 @@
DROP TABLE `blocks`;

View File

@@ -0,0 +1,23 @@
CREATE TABLE `blocks`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`block_hash` CHAR(64) NOT NULL,
`accepting_block_id` BIGINT UNSIGNED NULL,
`version` INT NOT NULL,
`hash_merkle_root` CHAR(64) NOT NULL,
`accepted_id_merkle_root` CHAR(64) NOT NULL,
`utxo_commitment` CHAR(64) NOT NULL,
`timestamp` DATETIME NOT NULL,
`bits` INT UNSIGNED NOT NULL,
`nonce` BIGINT UNSIGNED NOT NULL,
`blue_score` BIGINT UNSIGNED NOT NULL,
`is_chain_block` TINYINT NOT NULL,
`mass` BIGINT NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `idx_blocks_block_hash` (`block_hash`),
INDEX `idx_blocks_timestamp` (`timestamp`),
INDEX `idx_blocks_is_chain_block` (`is_chain_block`),
CONSTRAINT `fk_blocks_accepting_block_id`
FOREIGN KEY (`accepting_block_id`)
REFERENCES `blocks` (`id`)
);

View File

@@ -0,0 +1 @@
DROP TABLE `parent_blocks`;

View File

@@ -0,0 +1,12 @@
CREATE TABLE `parent_blocks`
(
`block_id` BIGINT UNSIGNED NOT NULL,
`parent_block_id` BIGINT UNSIGNED NOT NULL,
PRIMARY KEY (`block_id`, `parent_block_id`),
CONSTRAINT `fk_parent_blocks_block_id`
FOREIGN KEY (`block_id`)
REFERENCES `blocks` (`id`),
CONSTRAINT `fk_parent_blocks_parent_block_id`
FOREIGN KEY (`parent_block_id`)
REFERENCES `blocks` (`id`)
);

View File

@@ -0,0 +1 @@
DROP TABLE `raw_blocks`;

View File

@@ -0,0 +1,9 @@
CREATE TABLE `raw_blocks`
(
`block_id` BIGINT UNSIGNED NOT NULL,
`block_data` BLOB NOT NULL,
PRIMARY KEY (`block_id`),
CONSTRAINT `fk_raw_blocks_block_id`
FOREIGN KEY (`block_id`)
REFERENCES `blocks` (`id`)
);

View File

@@ -0,0 +1 @@
DROP TABLE `subnetworks`;

View File

@@ -0,0 +1,8 @@
CREATE TABLE `subnetworks`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`subnetwork_id` CHAR(64) NOT NULL,
`gas_limit` BIGINT UNSIGNED NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `idx_subnetworks_subnetwork_id` (`subnetwork_id`)
);

View File

@@ -0,0 +1 @@
DROP TABLE `transactions`;

View File

@@ -0,0 +1,19 @@
CREATE TABLE `transactions`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`accepting_block_id` BIGINT UNSIGNED NULL,
`transaction_hash` CHAR(64) NOT NULL,
`transaction_id` CHAR(64) NOT NULL,
`lock_time` BIGINT UNSIGNED NOT NULL,
`subnetwork_id` BIGINT UNSIGNED NOT NULL,
`gas` BIGINT UNSIGNED NOT NULL,
`payload_hash` CHAR(64) NOT NULL,
`payload` BLOB NOT NULL,
`mass` BIGINT NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `idx_transactions_transaction_hash` (`transaction_hash`),
INDEX `idx_transactions_transaction_id` (`transaction_id`),
CONSTRAINT `fk_transactions_accepting_block_id`
FOREIGN KEY (`accepting_block_id`)
REFERENCES `blocks` (`id`)
);

View File

@@ -0,0 +1 @@
DROP TABLE `transactions_to_blocks`;

View File

@@ -0,0 +1,14 @@
CREATE TABLE `transactions_to_blocks`
(
`transaction_id` BIGINT UNSIGNED NOT NULL,
`block_id` BIGINT UNSIGNED NOT NULL,
`index` INT UNSIGNED NOT NULL,
PRIMARY KEY (`transaction_id`, `block_id`),
INDEX `idx_transactions_to_blocks_index` (`index`),
CONSTRAINT `fk_transactions_to_blocks_block_id`
FOREIGN KEY (`block_id`)
REFERENCES `blocks` (`id`),
CONSTRAINT `fk_transactions_to_blocks_transaction_id`
FOREIGN KEY (`transaction_id`)
REFERENCES `transactions` (`id`)
);

View File

@@ -0,0 +1 @@
DROP TABLE `addresses`;

View File

@@ -0,0 +1,7 @@
CREATE TABLE `addresses`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`address` CHAR(50) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `idx_addresses_address` (`address`)
)

View File

@@ -0,0 +1 @@
DROP TABLE `transaction_outputs`;

View File

@@ -0,0 +1,18 @@
CREATE TABLE `transaction_outputs`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`transaction_id` BIGINT UNSIGNED NOT NULL,
`index` INT UNSIGNED NOT NULL,
`value` BIGINT UNSIGNED NOT NULL,
`script_pub_key` BLOB NOT NULL,
`is_spent` TINYINT NOT NULL,
`address_id` BIGINT UNSIGNED NOT NULL,
PRIMARY KEY (`id`),
INDEX `idx_transaction_outputs_transaction_id` (`transaction_id`),
CONSTRAINT `fk_transaction_outputs_transaction_id`
FOREIGN KEY (`transaction_id`)
REFERENCES `transactions` (`id`),
CONSTRAINT `fk_transaction_outputs_address_id`
FOREIGN KEY (`address_id`)
REFERENCES `addresses` (`id`)
);

View File

@@ -0,0 +1 @@
DROP TABLE `transaction_inputs`;

View File

@@ -0,0 +1,18 @@
CREATE TABLE `transaction_inputs`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`transaction_id` BIGINT UNSIGNED NULL,
`previous_transaction_output_id` BIGINT UNSIGNED NOT NULL,
`index` INT UNSIGNED NOT NULL,
`signature_script` BLOB NOT NULL,
`sequence` BIGINT UNSIGNED NOT NULL,
PRIMARY KEY (`id`),
INDEX `idx_transaction_inputs_transaction_id` (`transaction_id`),
INDEX `idx_transaction_inputs_previous_transaction_output_id` (`previous_transaction_output_id`),
CONSTRAINT `fk_transaction_inputs_transaction_id`
FOREIGN KEY (`transaction_id`)
REFERENCES `transactions` (`id`),
CONSTRAINT `fk_transaction_inputs_previous_transaction_output_id`
FOREIGN KEY (`previous_transaction_output_id`)
REFERENCES `transaction_outputs` (`id`)
);

9
apiserver/mqtt/log.go Normal file
View File

@@ -0,0 +1,9 @@
package mqtt
import "github.com/daglabs/btcd/util/panics"
import "github.com/daglabs/btcd/apiserver/logger"
var (
log = logger.BackendLog.Logger("MQTT")
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
)

50
apiserver/mqtt/mqtt.go Normal file
View File

@@ -0,0 +1,50 @@
package mqtt
import (
"errors"
"github.com/daglabs/btcd/apiserver/config"
mqtt "github.com/eclipse/paho.mqtt.golang"
)
// client is an instance of the MQTT client, in case we have an active connection
var client mqtt.Client
// GetClient returns an instance of the MQTT client, in case we have an active connection
func GetClient() (mqtt.Client, error) {
if client == nil {
return nil, errors.New("MQTT is not connected")
}
return client, nil
}
// Connect initiates a connection to the MQTT server, if defined
func Connect() error {
cfg := config.ActiveConfig()
if cfg.MQTTBrokerAddress == "" {
// MQTT broker not defined -- nothing to do
return nil
}
options := mqtt.NewClientOptions()
options.AddBroker(cfg.MQTTBrokerAddress)
options.SetUsername(cfg.MQTTUser)
options.SetPassword(cfg.MQTTPassword)
options.SetAutoReconnect(true)
newClient := mqtt.NewClient(options)
if token := newClient.Connect(); token.Wait() && token.Error() != nil {
return token.Error()
}
client = newClient
return nil
}
// Close closes the connection to the MQTT server, if previously connected
func Close() {
if client == nil {
return
}
client.Disconnect(250)
client = nil
}

9
apiserver/server/log.go Normal file
View File

@@ -0,0 +1,9 @@
package server
import "github.com/daglabs/btcd/util/panics"
import "github.com/daglabs/btcd/apiserver/logger"
var (
log = logger.BackendLog.Logger("REST")
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
)

172
apiserver/server/routes.go Normal file
View File

@@ -0,0 +1,172 @@
package server
import (
"fmt"
"github.com/daglabs/btcd/httpserverutils"
"github.com/pkg/errors"
"net/http"
"strconv"
"github.com/daglabs/btcd/apiserver/controllers"
"github.com/gorilla/mux"
)
const (
routeParamTxID = "txID"
routeParamTxHash = "txHash"
routeParamAddress = "address"
routeParamBlockHash = "blockHash"
)
const (
queryParamSkip = "skip"
queryParamLimit = "limit"
queryParamOrder = "order"
)
const (
defaultGetTransactionsLimit = 100
defaultGetBlocksLimit = 25
defaultGetBlocksOrder = controllers.OrderAscending
)
func mainHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, _ map[string]string, _ []byte) (interface{}, error) {
return struct {
Message string `json:"message"`
}{
Message: "API server is running",
}, nil
}
func addRoutes(router *mux.Router) {
router.HandleFunc("/", httpserverutils.MakeHandler(mainHandler))
router.HandleFunc(
fmt.Sprintf("/transaction/id/{%s}", routeParamTxID),
httpserverutils.MakeHandler(getTransactionByIDHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/transaction/hash/{%s}", routeParamTxHash),
httpserverutils.MakeHandler(getTransactionByHashHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/transactions/address/{%s}", routeParamAddress),
httpserverutils.MakeHandler(getTransactionsByAddressHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/utxos/address/{%s}", routeParamAddress),
httpserverutils.MakeHandler(getUTXOsByAddressHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/block/{%s}", routeParamBlockHash),
httpserverutils.MakeHandler(getBlockByHashHandler)).
Methods("GET")
router.HandleFunc(
"/blocks",
httpserverutils.MakeHandler(getBlocksHandler)).
Methods("GET")
router.HandleFunc(
"/fee-estimates",
httpserverutils.MakeHandler(getFeeEstimatesHandler)).
Methods("GET")
router.HandleFunc(
"/transaction",
httpserverutils.MakeHandler(postTransactionHandler)).
Methods("POST")
}
func convertQueryParamToInt(queryParams map[string]string, param string, defaultValue int) (int, error) {
if _, ok := queryParams[param]; ok {
intValue, err := strconv.Atoi(queryParams[param])
if err != nil {
return 0, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Wrap(err, fmt.Sprintf("Couldn't parse the '%s' query parameter", param)))
}
return intValue, nil
}
return defaultValue, nil
}
func getTransactionByIDHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetTransactionByIDHandler(routeParams[routeParamTxID])
}
func getTransactionByHashHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetTransactionByHashHandler(routeParams[routeParamTxHash])
}
func getTransactionsByAddressHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, queryParams map[string]string,
_ []byte) (interface{}, error) {
skip, err := convertQueryParamToInt(queryParams, queryParamSkip, 0)
if err != nil {
return nil, err
}
limit, err := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetTransactionsLimit)
if err != nil {
return nil, err
}
if _, ok := queryParams[queryParamLimit]; ok {
var err error
skip, err = strconv.Atoi(queryParams[queryParamLimit])
if err != nil {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Wrap(err, fmt.Sprintf("Couldn't parse the '%s' query parameter", queryParamLimit)))
}
}
return controllers.GetTransactionsByAddressHandler(routeParams[routeParamAddress], uint64(skip), uint64(limit))
}
func getUTXOsByAddressHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetUTXOsByAddressHandler(routeParams[routeParamAddress])
}
func getBlockByHashHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetBlockByHashHandler(routeParams[routeParamBlockHash])
}
func getFeeEstimatesHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetFeeEstimatesHandler()
}
func getBlocksHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, queryParams map[string]string,
_ []byte) (interface{}, error) {
skip, err := convertQueryParamToInt(queryParams, queryParamSkip, 0)
if err != nil {
return nil, err
}
limit, err := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetBlocksLimit)
if err != nil {
return nil, err
}
order := defaultGetBlocksOrder
if orderParamValue, ok := queryParams[queryParamOrder]; ok {
if orderParamValue != controllers.OrderAscending && orderParamValue != controllers.OrderDescending {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Errorf("'%s' is not a valid value for the '%s' query parameter", orderParamValue, queryParamLimit))
}
order = orderParamValue
}
return controllers.GetBlocksHandler(order, uint64(skip), uint64(limit))
}
func postTransactionHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, _ map[string]string,
requestBody []byte) (interface{}, error) {
return nil, controllers.PostTransaction(requestBody)
}

View File

@@ -0,0 +1,40 @@
package server
import (
"context"
"github.com/daglabs/btcd/httpserverutils"
"net/http"
"time"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
)
const gracefulShutdownTimeout = 30 * time.Second
// Start starts the HTTP REST server and returns a
// function to gracefully shutdown it.
func Start(listenAddr string) func() {
router := mux.NewRouter()
router.Use(httpserverutils.AddRequestMetadataMiddleware)
router.Use(httpserverutils.RecoveryMiddleware)
router.Use(httpserverutils.LoggingMiddleware)
router.Use(httpserverutils.SetJSONMiddleware)
addRoutes(router)
httpServer := &http.Server{
Addr: listenAddr,
Handler: handlers.CORS()(router),
}
spawn(func() {
log.Errorf("%s", httpServer.ListenAndServe())
})
return func() {
ctx, cancel := context.WithTimeout(context.Background(), gracefulShutdownTimeout)
defer cancel()
err := httpServer.Shutdown(ctx)
if err != nil {
log.Errorf("Error shutting down HTTP server: %s", err)
}
}
}

1033
apiserver/sync.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -6,11 +6,18 @@ package blockdag
import (
"fmt"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
)
func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error {
blockHeader := &block.MsgBlock().Header
newNode := newBlockNode(blockHeader, newSet(), dag.dagParams.K)
newNode.status = statusInvalidAncestor
dag.index.AddNode(newNode)
return dag.index.flushToDB()
}
// maybeAcceptBlock potentially accepts a block into the block DAG. It
// performs several validation checks which depend on its position within
// the block DAG before adding it. The block is expected to have already
@@ -21,27 +28,29 @@ import (
//
// This function MUST be called with the dagLock held (for writes).
func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) error {
// The height of this block is one more than the referenced previous
// block.
parents, err := lookupParentNodes(block, dag)
if err != nil {
if rErr, ok := err.(RuleError); ok && rErr.ErrorCode == ErrInvalidAncestorBlock {
err := dag.addNodeToIndexWithInvalidAncestor(block)
if err != nil {
return err
}
}
return err
}
bluestParent := parents.bluest()
blockHeight := int32(0)
if !block.IsGenesis() {
blockHeight = parents.maxHeight() + 1
}
block.SetHeight(blockHeight)
// The block must pass all of the validation rules which depend on the
// position of the block within the block DAG.
err = dag.checkBlockContext(block, parents, bluestParent, flags)
err = dag.checkBlockContext(block, parents, flags)
if err != nil {
return err
}
// Create a new block node for the block and add it to the node index.
newNode := newBlockNode(&block.MsgBlock().Header, parents, dag.dagParams.K)
newNode.status = statusDataStored
dag.index.AddNode(newNode)
// Insert the block into the database if it's not already there. Even
// though it is possible the block will ultimately fail to connect, it
// has already passed all proof-of-work and validity tests which means
@@ -52,26 +61,30 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
// such as making blocks that never become part of the DAG or
// blocks that fail to connect available for further analysis.
err = dag.db.Update(func(dbTx database.Tx) error {
return dbStoreBlock(dbTx, block)
err := dbStoreBlock(dbTx, block)
if err != nil {
return err
}
return dag.index.flushToDBWithTx(dbTx)
})
if err != nil {
return err
}
// Create a new block node for the block and add it to the node index.
blockHeader := &block.MsgBlock().Header
newNode := newBlockNode(blockHeader, parents, dag.dagParams.K)
newNode.status = statusDataStored
dag.index.AddNode(newNode)
err = dag.index.flushToDB()
if err != nil {
return err
// Make sure that all the block's transactions are finalized
fastAdd := flags&BFFastAdd == BFFastAdd
bluestParent := parents.bluest()
if !fastAdd {
if err := dag.validateAllTxsFinalized(block, newNode, bluestParent); err != nil {
return err
}
}
block.SetChainHeight(newNode.chainHeight)
// Connect the passed block to the DAG. This also handles validation of the
// transaction scripts.
err = dag.addBlock(newNode, parents, block, flags)
chainUpdates, err := dag.addBlock(newNode, parents, block, flags)
if err != nil {
return err
}
@@ -80,7 +93,16 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
// DAG. The caller would typically want to react by relaying the
// inventory to other peers.
dag.dagLock.Unlock()
dag.sendNotification(NTBlockAdded, block)
dag.sendNotification(NTBlockAdded, &BlockAddedNotificationData{
Block: block,
WasUnorphaned: flags&BFWasUnorphaned != 0,
})
if len(chainUpdates.addedChainBlockHashes) > 0 {
dag.sendNotification(NTChainChanged, &ChainChangedNotificationData{
RemovedChainBlockHashes: chainUpdates.removedChainBlockHashes,
AddedChainBlockHashes: chainUpdates.addedChainBlockHashes,
})
}
dag.dagLock.Lock()
return nil

View File

@@ -1,7 +1,8 @@
package blockdag
import (
"errors"
"github.com/pkg/errors"
"path/filepath"
"strings"
"testing"
@@ -21,11 +22,11 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
}
defer teardownFunc()
dag.TestSetBlockRewardMaturity(1)
dag.TestSetCoinbaseMaturity(0)
// Test rejecting the block if its parents are missing
orphanBlockFile := "blk_3B.dat"
loadedBlocks, err := loadBlocks(orphanBlockFile)
loadedBlocks, err := LoadBlocks(filepath.Join("testdata/", orphanBlockFile))
if err != nil {
t.Fatalf("TestMaybeAcceptBlockErrors: "+
"Error loading file '%s': %s\n", orphanBlockFile, err)
@@ -48,7 +49,7 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
// Test rejecting the block if its parents are invalid
blocksFile := "blk_0_to_4.dat"
blocks, err := loadBlocks(blocksFile)
blocks, err := LoadBlocks(filepath.Join("testdata/", blocksFile))
if err != nil {
t.Fatalf("TestMaybeAcceptBlockErrors: "+
"Error loading file '%s': %s\n", blocksFile, err)
@@ -56,10 +57,13 @@ func TestMaybeAcceptBlockErrors(t *testing.T) {
// Add a valid block and mark it as invalid
block1 := blocks[1]
isOrphan, err := dag.ProcessBlock(block1, BFNone)
isOrphan, delay, err := dag.ProcessBlock(block1, BFNone)
if err != nil {
t.Fatalf("TestMaybeAcceptBlockErrors: Valid block unexpectedly returned an error: %s", err)
}
if delay != 0 {
t.Fatalf("TestMaybeAcceptBlockErrors: block 1 is too far in the future")
}
if isOrphan {
t.Fatalf("TestMaybeAcceptBlockErrors: incorrectly returned block 1 is an orphan")
}

View File

@@ -3,7 +3,7 @@ package blockdag
import (
"container/heap"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util/daghash"
)
// baseHeap is an implementation for heap.Interface that sorts blocks by their height
@@ -28,61 +28,61 @@ func (h *baseHeap) Pop() interface{} {
type upHeap struct{ baseHeap }
func (h upHeap) Less(i, j int) bool {
if h.baseHeap[i].height == h.baseHeap[j].height {
if h.baseHeap[i].blueScore == h.baseHeap[j].blueScore {
return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) < 0
}
return h.baseHeap[i].height < h.baseHeap[j].height
return h.baseHeap[i].blueScore < h.baseHeap[j].blueScore
}
// downHeap extends baseHeap to include Less operation that traverses from top to bottom
type downHeap struct{ baseHeap }
func (h downHeap) Less(i, j int) bool {
if h.baseHeap[i].height == h.baseHeap[j].height {
if h.baseHeap[i].blueScore == h.baseHeap[j].blueScore {
return daghash.HashToBig(h.baseHeap[i].hash).Cmp(daghash.HashToBig(h.baseHeap[j].hash)) > 0
}
return h.baseHeap[i].height > h.baseHeap[j].height
return h.baseHeap[i].blueScore > h.baseHeap[j].blueScore
}
// BlockHeap represents a mutable heap of Blocks, sorted by their height
type BlockHeap struct {
// blockHeap represents a mutable heap of Blocks, sorted by their height
type blockHeap struct {
impl heap.Interface
}
// NewDownHeap initializes and returns a new BlockHeap
func NewDownHeap() BlockHeap {
h := BlockHeap{impl: &downHeap{}}
// newDownHeap initializes and returns a new blockHeap
func newDownHeap() blockHeap {
h := blockHeap{impl: &downHeap{}}
heap.Init(h.impl)
return h
}
// NewUpHeap initializes and returns a new BlockHeap
func NewUpHeap() BlockHeap {
h := BlockHeap{impl: &upHeap{}}
// newUpHeap initializes and returns a new blockHeap
func newUpHeap() blockHeap {
h := blockHeap{impl: &upHeap{}}
heap.Init(h.impl)
return h
}
// pop removes the block with lowest height from this heap and returns it
func (bh BlockHeap) pop() *blockNode {
func (bh blockHeap) pop() *blockNode {
return heap.Pop(bh.impl).(*blockNode)
}
// Push pushes the block onto the heap
func (bh BlockHeap) Push(block *blockNode) {
func (bh blockHeap) Push(block *blockNode) {
heap.Push(bh.impl, block)
}
// pushSet pushes a blockset to the heap.
func (bh BlockHeap) pushSet(bs blockSet) {
func (bh blockHeap) pushSet(bs blockSet) {
for _, block := range bs {
heap.Push(bh.impl, block)
}
}
// Len returns the length of this heap
func (bh BlockHeap) Len() int {
func (bh blockHeap) Len() int {
return bh.impl.Len()
}

View File

@@ -4,7 +4,7 @@ import (
"testing"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util/daghash"
)
// TestBlockHeap tests pushing, popping, and determining the length of the heap.
@@ -81,7 +81,7 @@ func TestBlockHeap(t *testing.T) {
}
for _, test := range tests {
dHeap := NewDownHeap()
dHeap := newDownHeap()
for _, block := range test.toPush {
dHeap.Push(block)
}
@@ -99,7 +99,7 @@ func TestBlockHeap(t *testing.T) {
"Expected: %v, got: %v", test.name, test.expectedPopDown, poppedBlock)
}
uHeap := NewUpHeap()
uHeap := newUpHeap()
for _, block := range test.toPush {
uHeap.Push(block)
}

136
blockdag/blockidhash.go Normal file
View File

@@ -0,0 +1,136 @@
package blockdag
import (
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util/daghash"
"github.com/pkg/errors"
)
var (
// idByHashIndexBucketName is the name of the db bucket used to house
// the block hash -> block id index.
idByHashIndexBucketName = []byte("idbyhashidx")
// hashByIDIndexBucketName is the name of the db bucket used to house
// the block id -> block hash index.
hashByIDIndexBucketName = []byte("hashbyididx")
currentBlockIDKey = []byte("currentblockid")
)
// -----------------------------------------------------------------------------
// This is a mapping between block hashes and unique IDs. The ID
// is simply a sequentially incremented uint64 that is used instead of block hash
// for the indexers. This is useful because it is only 8 bytes versus 32 bytes
// hashes and thus saves a ton of space when a block is referenced in an index.
// It consists of three buckets: the first bucket maps the hash of each
// block to the unique ID and the second maps that ID back to the block hash.
// The third bucket contains the last received block ID, and is used
// when starting the node to check that the enabled indexes are up to date
// with the latest received block, and if not, initiate recovery process.
//
// The serialized format for keys and values in the block hash to ID bucket is:
// <hash> = <ID>
//
// Field Type Size
// hash daghash.Hash 32 bytes
// ID uint64 8 bytes
// -----
// Total: 40 bytes
//
// The serialized format for keys and values in the ID to block hash bucket is:
// <ID> = <hash>
//
// Field Type Size
// ID uint64 8 bytes
// hash daghash.Hash 32 bytes
// -----
// Total: 40 bytes
//
// -----------------------------------------------------------------------------
const blockIDSize = 8 // 8 bytes for block ID
// DBFetchBlockIDByHash uses an existing database transaction to retrieve the
// block id for the provided hash from the index.
func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error) {
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
serializedID := hashIndex.Get(hash[:])
if serializedID == nil {
return 0, errors.Errorf("no entry in the block ID index for block with hash %s", hash)
}
return DeserializeBlockID(serializedID), nil
}
// DBFetchBlockHashBySerializedID uses an existing database transaction to
// retrieve the hash for the provided serialized block id from the index.
func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
hashBytes := idIndex.Get(serializedID)
if hashBytes == nil {
return nil, errors.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
}
var hash daghash.Hash
copy(hash[:], hashBytes)
return &hash, nil
}
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
// the index entries for the hash to id and id to hash mappings for the provided
// values.
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, serializedID []byte) error {
// Add the block hash to ID mapping to the index.
meta := dbTx.Metadata()
hashIndex := meta.Bucket(idByHashIndexBucketName)
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
return err
}
// Add the block ID to hash mapping to the index.
idIndex := meta.Bucket(hashByIDIndexBucketName)
return idIndex.Put(serializedID[:], hash[:])
}
// DBFetchCurrentBlockID returns the last known block ID.
func DBFetchCurrentBlockID(dbTx database.Tx) uint64 {
serializedID := dbTx.Metadata().Get(currentBlockIDKey)
if serializedID == nil {
return 0
}
return DeserializeBlockID(serializedID)
}
// DeserializeBlockID returns a deserialized block id
func DeserializeBlockID(serializedID []byte) uint64 {
return byteOrder.Uint64(serializedID)
}
// SerializeBlockID returns a serialized block id
func SerializeBlockID(blockID uint64) []byte {
serializedBlockID := make([]byte, blockIDSize)
byteOrder.PutUint64(serializedBlockID, blockID)
return serializedBlockID
}
// DBFetchBlockHashByID uses an existing database transaction to retrieve the
// hash for the provided block id from the index.
func DBFetchBlockHashByID(dbTx database.Tx, id uint64) (*daghash.Hash, error) {
return DBFetchBlockHashBySerializedID(dbTx, SerializeBlockID(id))
}
func createBlockID(dbTx database.Tx, blockHash *daghash.Hash) (uint64, error) {
currentBlockID := DBFetchCurrentBlockID(dbTx)
newBlockID := currentBlockID + 1
serializedNewBlockID := SerializeBlockID(newBlockID)
err := dbTx.Metadata().Put(currentBlockIDKey, serializedNewBlockID)
if err != nil {
return 0, err
}
err = dbPutBlockIDIndexEntry(dbTx, blockHash, serializedNewBlockID)
if err != nil {
return 0, err
}
return newBlockID, nil
}

View File

@@ -8,8 +8,8 @@ import (
"sync"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util/daghash"
)
// blockIndex provides facilities for keeping track of an in-memory index of the
@@ -117,27 +117,28 @@ func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
// flushToDB writes all dirty block nodes to the database. If all writes
// succeed, this clears the dirty set.
func (bi *blockIndex) flushToDB() error {
return bi.db.Update(func(dbTx database.Tx) error {
return bi.flushToDBWithTx(dbTx)
})
}
// flushToDBWithTx writes all dirty block nodes to the database. If all
// writes succeed, this clears the dirty set.
func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
bi.Lock()
defer bi.Unlock()
if len(bi.dirty) == 0 {
bi.Unlock()
return nil
}
err := bi.db.Update(func(dbTx database.Tx) error {
for node := range bi.dirty {
err := dbStoreBlockNode(dbTx, node)
if err != nil {
return err
}
for node := range bi.dirty {
err := dbStoreBlockNode(dbTx, node)
if err != nil {
return err
}
return nil
})
}
// If write was successful, clear the dirty set.
if err == nil {
bi.dirty = make(map[*blockNode]struct{})
}
bi.Unlock()
return err
bi.dirty = make(map[*blockNode]struct{})
return nil
}

View File

@@ -1,7 +1,7 @@
package blockdag
import (
"errors"
"github.com/pkg/errors"
"strings"
"testing"
"time"
@@ -13,7 +13,7 @@ import (
func TestAncestorErrors(t *testing.T) {
node := newTestNode(newSet(), int32(0x10000000), 0, time.Unix(0, 0), dagconfig.MainNetParams.K)
node.height = 2
node.chainHeight = 2
ancestor := node.SelectedAncestor(3)
if ancestor != nil {
t.Errorf("TestAncestorErrors: Ancestor() unexpectedly returned a node. Expected: <nil>")

143
blockdag/blocklocator.go Normal file
View File

@@ -0,0 +1,143 @@
package blockdag
import (
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
)
// BlockLocator is used to help locate a specific block. The algorithm for
// building the block locator is to add block hashes in reverse order on the
// block's selected parent chain until the desired stop block is reached.
// In order to keep the list of locator hashes to a reasonable number of entries,
// the step between each entry is doubled each loop iteration to exponentially
// decrease the number of hashes as a function of the distance from the block
// being located.
//
// For example, assume a selected parent chain with IDs as depicted below, and the
// stop block is genesis:
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
//
// The block locator for block 17 would be the hashes of blocks:
// [17 16 14 11 7 2 genesis]
type BlockLocator []*daghash.Hash
// BlockLocatorFromHashes returns a block locator from start and stop hash.
// See BlockLocator for details on the algorithm used to create a block locator.
//
// In addition to the general algorithm referenced above, this function will
// return the block locator for the selected tip if the passed hash is not currently
// known.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) BlockLocatorFromHashes(startHash, stopHash *daghash.Hash) BlockLocator {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
startNode := dag.index.LookupNode(startHash)
var stopNode *blockNode
if !stopHash.IsEqual(&daghash.ZeroHash) {
stopNode = dag.index.LookupNode(stopHash)
}
return dag.blockLocator(startNode, stopNode)
}
// LatestBlockLocator returns a block locator for the current tips of the DAG.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) LatestBlockLocator() BlockLocator {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
return dag.blockLocator(nil, nil)
}
// blockLocator returns a block locator for the passed start and stop nodes.
// The default value for the start node is the selected tip, and the default
// values of the stop node is the genesis block.
//
// See the BlockLocator type comments for more details.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) blockLocator(startNode, stopNode *blockNode) BlockLocator {
// Use the selected tip if requested.
if startNode == nil {
startNode = dag.virtual.selectedParent
}
if stopNode == nil {
stopNode = dag.genesis
}
// We use the selected parent of the start node, so the
// block locator won't contain the start node.
startNode = startNode.selectedParent
// If the start node or the stop node are not in the
// virtual's selected parent chain, we replace them with their
// closest selected parent that is part of the virtual's
// selected parent chain.
for !dag.IsInSelectedParentChain(stopNode.hash) {
stopNode = stopNode.selectedParent
}
for !dag.IsInSelectedParentChain(startNode.hash) {
startNode = startNode.selectedParent
}
// Calculate the max number of entries that will ultimately be in the
// block locator. See the description of the algorithm for how these
// numbers are derived.
// startNode.hash + stopNode.hash.
// Then floor(log2(startNode.chainHeight-stopNode.chainHeight)) entries for the skip portion.
maxEntries := 2 + util.FastLog2Floor(startNode.chainHeight-stopNode.chainHeight)
locator := make(BlockLocator, 0, maxEntries)
step := uint64(1)
for node := startNode; node != nil; {
locator = append(locator, node.hash)
// Nothing more to add once the stop node has been added.
if node.chainHeight == stopNode.chainHeight {
break
}
// Calculate chainHeight of previous node to include ensuring the
// final node is stopNode.
nextChainHeight := node.chainHeight - step
if nextChainHeight < stopNode.chainHeight {
nextChainHeight = stopNode.chainHeight
}
// walk backwards through the nodes to the correct ancestor.
node = node.SelectedAncestor(nextChainHeight)
// Double the distance between included hashes.
step *= 2
}
return locator
}
// FindNextLocatorBoundaries returns the lowest unknown block locator, hash
// and the highest known block locator hash. This is used to create the
// next block locator to find the highest shared known chain block with the
// sync peer.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) FindNextLocatorBoundaries(locator BlockLocator) (startHash, stopHash *daghash.Hash) {
// Find the most recent locator block hash in the DAG. In the case none of
// the hashes in the locator are in the DAG, fall back to the genesis block.
stopNode := dag.genesis
nextBlockLocatorIndex := int64(len(locator) - 1)
for i, hash := range locator {
node := dag.index.LookupNode(hash)
if node != nil {
stopNode = node
nextBlockLocatorIndex = int64(i) - 1
break
}
}
if nextBlockLocatorIndex < 0 {
return nil, stopNode.hash
}
return locator[nextBlockLocatorIndex], stopNode.hash
}

View File

@@ -6,12 +6,9 @@ package blockdag
import (
"fmt"
"math/big"
"sort"
"time"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
@@ -78,67 +75,57 @@ type blockNode struct {
// blueScore is the count of all the blue blocks in this block's past
blueScore uint64
// diff is the UTXO representation of the block
// A block's UTXO is reconstituted by applying diffWith on every block in the chain of diffChildren
// from the virtual block down to the block. See diffChild
diff *UTXODiff
// diffChild is the child that diff will be built from. See diff
diffChild *blockNode
// hash is the double sha 256 of the block.
hash *daghash.Hash
// workSum is the total amount of work in the DAG up to and including
// this node.
workSum *big.Int
// height is the position in the block DAG.
height int32
height uint64
// chainHeight is the number of hops you need to go down the selected parent chain in order to get to the genesis block.
chainHeight uint32
chainHeight uint64
// Some fields from block headers to aid in best chain selection and
// reconstructing headers from memory. These must be treated as
// immutable and are intentionally ordered to avoid padding on 64-bit
// platforms.
version int32
bits uint32
nonce uint64
timestamp int64
hashMerkleRoot *daghash.Hash
idMerkleRoot *daghash.Hash
version int32
bits uint32
nonce uint64
timestamp int64
hashMerkleRoot *daghash.Hash
acceptedIDMerkleRoot *daghash.Hash
utxoCommitment *daghash.Hash
// status is a bitfield representing the validation state of the block. The
// status field, unlike the other fields, may be written to and so should
// only be accessed using the concurrent-safe NodeStatus method on
// blockIndex once the node has been added to the global index.
status blockStatus
// isFinalized determines whether the node is below the finality point.
isFinalized bool
}
// initBlockNode initializes a block node from the given header and parent nodes,
// calculating the height and workSum from the respective fields on the first parent.
// initBlockNode initializes a block node from the given header and parent nodes.
// This function is NOT safe for concurrent access. It must only be called when
// initially creating a node.
func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) {
*node = blockNode{
parents: parents,
children: make(blockSet),
workSum: big.NewInt(0),
timestamp: time.Now().Unix(),
}
// blockHeader is nil only for the virtual block
if blockHeader != nil {
node.hash = blockHeader.BlockHash()
node.workSum = util.CalcWork(blockHeader.Bits)
node.version = blockHeader.Version
node.bits = blockHeader.Bits
node.nonce = blockHeader.Nonce
node.timestamp = blockHeader.Timestamp.Unix()
node.hashMerkleRoot = blockHeader.HashMerkleRoot
node.idMerkleRoot = blockHeader.IDMerkleRoot
node.acceptedIDMerkleRoot = blockHeader.AcceptedIDMerkleRoot
node.utxoCommitment = blockHeader.UTXOCommitment
} else {
node.hash = &daghash.ZeroHash
}
@@ -147,15 +134,17 @@ func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents block
node.blues, node.selectedParent, node.blueScore = phantom(node, phantomK)
node.height = calculateNodeHeight(node)
node.chainHeight = calculateChainHeight(node)
node.workSum = node.workSum.Add(node.selectedParent.workSum, node.workSum)
}
}
func calculateNodeHeight(node *blockNode) int32 {
func calculateNodeHeight(node *blockNode) uint64 {
if node.isGenesis() {
return 0
}
return node.parents.maxHeight() + 1
}
func calculateChainHeight(node *blockNode) uint32 {
func calculateChainHeight(node *blockNode) uint64 {
if node.isGenesis() {
return 0
}
@@ -163,8 +152,7 @@ func calculateChainHeight(node *blockNode) uint32 {
}
// newBlockNode returns a new block node for the given block header and parent
// nodes, calculating the height and workSum from the respective fields on the
// parent. This function is NOT safe for concurrent access.
//nodes. This function is NOT safe for concurrent access.
func newBlockNode(blockHeader *wire.BlockHeader, parents blockSet, phantomK uint32) *blockNode {
var node blockNode
initBlockNode(&node, blockHeader, parents, phantomK)
@@ -184,67 +172,54 @@ func (node *blockNode) updateParentsChildren() {
func (node *blockNode) Header() *wire.BlockHeader {
// No lock is needed because all accessed fields are immutable.
return &wire.BlockHeader{
Version: node.version,
ParentHashes: node.ParentHashes(),
HashMerkleRoot: node.hashMerkleRoot,
IDMerkleRoot: node.idMerkleRoot,
Timestamp: time.Unix(node.timestamp, 0),
Bits: node.bits,
Nonce: node.nonce,
Version: node.version,
ParentHashes: node.ParentHashes(),
HashMerkleRoot: node.hashMerkleRoot,
AcceptedIDMerkleRoot: node.acceptedIDMerkleRoot,
UTXOCommitment: node.utxoCommitment,
Timestamp: time.Unix(node.timestamp, 0),
Bits: node.bits,
Nonce: node.nonce,
}
}
// SelectedAncestor returns the ancestor block node at the provided height by following
// the selected chain backwards from this node. The returned block will be nil when a
// height is requested that is after the height of the passed node or is less than zero.
// SelectedAncestor returns the ancestor block node at the provided chain-height by following
// the selected-parents chain backwards from this node. The returned block will be nil when a
// height is requested that is after the height of the passed node.
//
// This function is safe for concurrent access.
func (node *blockNode) SelectedAncestor(height int32) *blockNode {
if height < 0 || height > node.height {
func (node *blockNode) SelectedAncestor(chainHeight uint64) *blockNode {
if chainHeight < 0 || chainHeight > node.chainHeight {
return nil
}
n := node
for ; n != nil && n.height != height; n = n.selectedParent {
for ; n != nil && n.chainHeight != chainHeight; n = n.selectedParent {
// Intentionally left blank
}
return n
}
// RelativeAncestor returns the ancestor block node a relative 'distance' blocks
// before this node. This is equivalent to calling Ancestor with the node's
// height minus provided distance.
// RelativeAncestor returns the ancestor block node a relative 'distance' of
// chain-blocks before this node. This is equivalent to calling Ancestor with
// the node's chain-height minus provided distance.
//
// This function is safe for concurrent access.
func (node *blockNode) RelativeAncestor(distance int32) *blockNode {
return node.SelectedAncestor(node.height - distance)
func (node *blockNode) RelativeAncestor(distance uint64) *blockNode {
return node.SelectedAncestor(node.chainHeight - distance)
}
// PastMedianTime returns the median time of the previous few blocks
// CalcPastMedianTime returns the median time of the previous few blocks
// prior to, and including, the block node.
//
// This function is safe for concurrent access.
func (node *blockNode) PastMedianTime() time.Time {
// Create a slice of the previous few block timestamps used to calculate
// the median per the number defined by the constant medianTimeBlocks.
// If there aren't enough blocks yet - pad remaining with genesis block's timestamp.
timestamps := make([]int64, medianTimeBlocks)
iterNode := node
for i := 0; i < medianTimeBlocks; i++ {
timestamps[i] = iterNode.timestamp
if !iterNode.isGenesis() {
iterNode = iterNode.selectedParent
}
func (node *blockNode) PastMedianTime(dag *BlockDAG) time.Time {
window := blueBlockWindow(node, 2*dag.TimestampDeviationTolerance-1)
medianTimestamp, err := window.medianTimestamp()
if err != nil {
panic(fmt.Sprintf("blueBlockWindow: %s", err))
}
sort.Sort(timeSorter(timestamps))
// Note: This works when medianTimeBlockCount is an odd number.
// If it is to be changed to an even number - must take avarage of two middle values
// Since medianTimeBlockCount is a constant, we can skip the odd/even check
medianTimestamp := timestamps[medianTimeBlocks/2]
return time.Unix(medianTimestamp, 0)
}
@@ -257,11 +232,11 @@ func (node *blockNode) isGenesis() bool {
return len(node.parents) == 0
}
func (node *blockNode) finalityScore() uint64 {
return node.blueScore / FinalityInterval
func (node *blockNode) finalityScore(dag *BlockDAG) uint64 {
return node.blueScore / uint64(dag.dagParams.FinalityInterval)
}
// String returns a string that contains the block hash and height.
// String returns a string that contains the block hash.
func (node blockNode) String() string {
return fmt.Sprintf("%s (%d)", node.hash, node.height)
return node.hash.String()
}

View File

@@ -26,7 +26,7 @@ func TestChainHeight(t *testing.T) {
tests := []struct {
node *blockNode
expectedChainHeight uint32
expectedChainHeight uint64
}{
{
node: node0,

View File

@@ -3,7 +3,7 @@ package blockdag
import (
"strings"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util/daghash"
)
// blockSet implements a basic unsorted set of blocks
@@ -24,8 +24,8 @@ func setFromSlice(blocks ...*blockNode) blockSet {
}
// maxHeight returns the height of the highest block in the block set
func (bs blockSet) maxHeight() int32 {
var maxHeight int32
func (bs blockSet) maxHeight() uint64 {
var maxHeight uint64
for _, node := range bs {
if maxHeight < node.height {
maxHeight = node.height
@@ -34,19 +34,6 @@ func (bs blockSet) maxHeight() int32 {
return maxHeight
}
func (bs blockSet) highest() *blockNode {
var highest *blockNode
for _, node := range bs {
if highest == nil ||
highest.height < node.height ||
(highest.height == node.height && daghash.Less(node.hash, highest.hash)) {
highest = node
}
}
return highest
}
// add adds a block to this BlockSet
func (bs blockSet) add(block *blockNode) {
bs[*block.hash] = block

View File

@@ -4,7 +4,7 @@ import (
"reflect"
"testing"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util/daghash"
)
func TestHashes(t *testing.T) {
@@ -35,47 +35,6 @@ func TestHashes(t *testing.T) {
t.Errorf("TestHashes: hashes order is %s but expected %s", hashes, expected)
}
}
func TestBlockSetHighest(t *testing.T) {
node1 := &blockNode{hash: &daghash.Hash{10}, height: 1}
node2a := &blockNode{hash: &daghash.Hash{20}, height: 2}
node2b := &blockNode{hash: &daghash.Hash{21}, height: 2}
node3 := &blockNode{hash: &daghash.Hash{30}, height: 3}
tests := []struct {
name string
set blockSet
expectedHighest *blockNode
}{
{
name: "empty set",
set: setFromSlice(),
expectedHighest: nil,
},
{
name: "set with one member",
set: setFromSlice(node1),
expectedHighest: node1,
},
{
name: "same-height highest members in set",
set: setFromSlice(node2b, node1, node2a),
expectedHighest: node2a,
},
{
name: "typical set",
set: setFromSlice(node2b, node3, node1, node2a),
expectedHighest: node3,
},
}
for _, test := range tests {
highest := test.set.highest()
if highest != test.expectedHighest {
t.Errorf("blockSet.highest: unexpected value in test '%s'. "+
"Expected: %v, got: %v", test.name, test.expectedHighest, highest)
}
}
}
func TestBlockSetSubtract(t *testing.T) {
node1 := &blockNode{hash: &daghash.Hash{10}}

75
blockdag/blockwindow.go Normal file
View File

@@ -0,0 +1,75 @@
package blockdag
import (
"github.com/daglabs/btcd/util"
"github.com/pkg/errors"
"math"
"math/big"
"sort"
)
type blockWindow []*blockNode
// blueBlockWindow returns a blockWindow of the given size that contains the
// blues in the past of startindNode, sorted by phantom order.
// If the number of blues in the past of startingNode is less then windowSize,
// the window will be padded by genesis blocks to achieve a size of windowSize.
func blueBlockWindow(startingNode *blockNode, windowSize uint64) blockWindow {
window := make(blockWindow, 0, windowSize)
currentNode := startingNode
for uint64(len(window)) < windowSize && currentNode.selectedParent != nil {
if currentNode.selectedParent != nil {
for _, blue := range currentNode.blues {
window = append(window, blue)
if uint64(len(window)) == windowSize {
break
}
}
currentNode = currentNode.selectedParent
}
}
if uint64(len(window)) < windowSize {
genesis := currentNode
for uint64(len(window)) < windowSize {
window = append(window, genesis)
}
}
return window
}
func (window blockWindow) minMaxTimestamps() (min, max int64) {
min = math.MaxInt64
max = 0
for _, node := range window {
if node.timestamp < min {
min = node.timestamp
}
if node.timestamp > max {
max = node.timestamp
}
}
return
}
func (window blockWindow) averageTarget() *big.Int {
averageTarget := big.NewInt(0)
for _, node := range window {
target := util.CompactToBig(node.bits)
averageTarget.Add(averageTarget, target)
}
return averageTarget.Div(averageTarget, big.NewInt(int64(len(window))))
}
func (window blockWindow) medianTimestamp() (int64, error) {
if len(window) == 0 {
return 0, errors.New("Cannot calculate median timestamp for an empty block window")
}
timestamps := make([]int64, len(window))
for i, node := range window {
timestamps[i] = node.timestamp
}
sort.Sort(timeSorter(timestamps))
return timestamps[len(timestamps)/2], nil
}

View File

@@ -0,0 +1,138 @@
package blockdag
import (
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/util/daghash"
"github.com/pkg/errors"
"reflect"
"testing"
"time"
)
func TestBlueBlockWindow(t *testing.T) {
params := dagconfig.SimNetParams
params.K = 1
dag := newTestDAG(&params)
windowSize := uint64(10)
genesisNode := dag.genesis
blockTime := genesisNode.Header().Timestamp
blockByIDMap := make(map[string]*blockNode)
idByBlockMap := make(map[*blockNode]string)
blockByIDMap["A"] = genesisNode
idByBlockMap[genesisNode] = "A"
blockVersion := int32(0x10000000)
blocksData := []*struct {
parents []string
id string //id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash
expectedWindowWithGenesisPadding []string
}{
{
parents: []string{"A"},
id: "B",
expectedWindowWithGenesisPadding: []string{"A", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"B"},
id: "C",
expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"B"},
id: "D",
expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"C", "D"},
id: "E",
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"C", "D"},
id: "F",
expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"A"},
id: "G",
expectedWindowWithGenesisPadding: []string{"A", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"G"},
id: "H",
expectedWindowWithGenesisPadding: []string{"G", "A", "A", "A", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"H", "F"},
id: "I",
expectedWindowWithGenesisPadding: []string{"F", "D", "C", "B", "A", "A", "A", "A", "A", "A"},
},
{
parents: []string{"I"},
id: "J",
expectedWindowWithGenesisPadding: []string{"I", "F", "D", "C", "B", "A", "A", "A", "A", "A"},
},
{
parents: []string{"J"},
id: "K",
expectedWindowWithGenesisPadding: []string{"J", "I", "F", "D", "C", "B", "A", "A", "A", "A"},
},
{
parents: []string{"K"},
id: "L",
expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "D", "C", "B", "A", "A", "A"},
},
{
parents: []string{"L"},
id: "M",
expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "D", "C", "B", "A", "A"},
},
{
parents: []string{"M"},
id: "N",
expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "D", "C", "B", "A"},
},
{
parents: []string{"N"},
id: "O",
expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "B"},
},
}
for _, blockData := range blocksData {
blockTime = blockTime.Add(time.Second)
parents := blockSet{}
for _, parentID := range blockData.parents {
parent := blockByIDMap[parentID]
parents.add(parent)
}
node := newTestNode(parents, blockVersion, 0, blockTime, dag.dagParams.K)
node.hash = &daghash.Hash{} // It helps to predict hash order
for i, char := range blockData.id {
node.hash[i] = byte(char)
}
dag.index.AddNode(node)
node.updateParentsChildren()
blockByIDMap[blockData.id] = node
idByBlockMap[node] = blockData.id
window := blueBlockWindow(node, windowSize)
if err := checkWindowIDs(window, blockData.expectedWindowWithGenesisPadding, idByBlockMap); err != nil {
t.Errorf("Unexpected values for window for block %s: %s", blockData.id, err)
}
}
}
func checkWindowIDs(window []*blockNode, expectedIDs []string, idByBlockMap map[*blockNode]string) error {
ids := make([]string, len(window))
for i, node := range window {
ids[i] = idByBlockMap[node]
}
if !reflect.DeepEqual(ids, expectedIDs) {
return errors.Errorf("window expected to have blocks %s but got %s", expectedIDs, ids)
}
return nil
}

View File

@@ -6,12 +6,11 @@ package blockdag
import (
"fmt"
"time"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/pkg/errors"
)
// CheckpointConfirmations is the number of blocks before the end of the current
@@ -64,16 +63,16 @@ func (dag *BlockDAG) LatestCheckpoint() *dagconfig.Checkpoint {
return &dag.checkpoints[len(dag.checkpoints)-1]
}
// verifyCheckpoint returns whether the passed block height and hash combination
// verifyCheckpoint returns whether the passed block chain height and hash combination
// match the checkpoint data. It also returns true if there is no checkpoint
// data for the passed block height.
func (dag *BlockDAG) verifyCheckpoint(height int32, hash *daghash.Hash) bool {
// data for the passed block chain height.
func (dag *BlockDAG) verifyCheckpoint(chainHeight uint64, hash *daghash.Hash) bool {
if !dag.HasCheckpoints() {
return true
}
// Nothing to check if there is no checkpoint data for the block height.
checkpoint, exists := dag.checkpointsByHeight[height]
// Nothing to check if there is no checkpoint data for the block chainHeight.
checkpoint, exists := dag.checkpointsByChainHeight[chainHeight]
if !exists {
return true
}
@@ -82,7 +81,7 @@ func (dag *BlockDAG) verifyCheckpoint(height int32, hash *daghash.Hash) bool {
return false
}
log.Infof("Verified checkpoint at height %d/block %s", checkpoint.Height,
log.Infof("Verified checkpoint at chainHeight %d/block %s", checkpoint.ChainHeight,
checkpoint.Hash)
return true
}
@@ -136,10 +135,10 @@ func (dag *BlockDAG) findPreviousCheckpoint() (*blockNode, error) {
return dag.checkpointNode, nil
}
// When there is a next checkpoint and the height of the current best
// chain does not exceed it, the current checkpoint lockin is still
// the latest known checkpoint.
if dag.selectedTip().height < dag.nextCheckpoint.Height {
// When there is a next checkpoint and the chain height of the current
// selected tip of the DAG does not exceed it, the current checkpoint
// lockin is still the latest known checkpoint.
if dag.selectedTip().chainHeight < dag.nextCheckpoint.ChainHeight {
return dag.checkpointNode, nil
}
@@ -181,7 +180,7 @@ func (dag *BlockDAG) findPreviousCheckpoint() (*blockNode, error) {
func isNonstandardTransaction(tx *util.Tx) bool {
// Check all of the output public key scripts for non-standard scripts.
for _, txOut := range tx.MsgTx().TxOut {
scriptClass := txscript.GetScriptClass(txOut.PkScript)
scriptClass := txscript.GetScriptClass(txOut.ScriptPubKey)
if scriptClass == txscript.NonStandardTy {
return true
}
@@ -216,19 +215,19 @@ func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
return false, nil
}
// Ensure the height of the passed block and the entry for the block in
// the main chain match. This should always be the case unless the
// Ensure the chain height of the passed block and the entry for the block
// in the DAG match. This should always be the case unless the
// caller provided an invalid block.
if node.height != block.Height() {
return false, fmt.Errorf("passed block height of %d does not "+
"match the main chain height of %d", block.Height(),
node.height)
if node.chainHeight != block.ChainHeight() {
return false, errors.Errorf("passed block chain height of %d does not "+
"match the its height in the DAG: %d", block.ChainHeight(),
node.chainHeight)
}
// A checkpoint must be at least CheckpointConfirmations blocks
// before the end of the main chain.
dagHeight := dag.selectedTip().height
if node.height > (dagHeight - CheckpointConfirmations) {
dagChainHeight := dag.selectedTip().chainHeight
if node.chainHeight > (dagChainHeight - CheckpointConfirmations) {
return false, nil
}
@@ -237,8 +236,7 @@ func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
// This should always succeed since the check above already made sure it
// is CheckpointConfirmations back, but be safe in case the constant
// changes.
nextNode := node.diffChild
if nextNode == nil {
if len(node.children) == 0 {
return false, nil
}
@@ -247,16 +245,6 @@ func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
return false, nil
}
// A checkpoint must have timestamps for the block and the blocks on
// either side of it in order (due to the median time allowance this is
// not always the case).
prevTime := time.Unix(node.selectedParent.timestamp, 0)
curTime := block.MsgBlock().Header.Timestamp
nextTime := time.Unix(nextNode.timestamp, 0)
if prevTime.After(curTime) || nextTime.Before(curTime) {
return false, nil
}
// A checkpoint must have transactions that only contain standard
// scripts.
for _, tx := range block.Transactions() {

278
blockdag/coinbase.go Normal file
View File

@@ -0,0 +1,278 @@
package blockdag
import (
"bufio"
"bytes"
"encoding/binary"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/txsort"
"github.com/daglabs/btcd/wire"
)
// compactFeeData is a specialized data type to store a compact list of fees
// inside a block.
// Every transaction gets a single uint64 value, stored as a plain binary list.
// The transactions are ordered the same way they are ordered inside the block, making it easy
// to traverse every transaction in a block and extract its fee.
//
// compactFeeFactory is used to create such a list.
// compactFeeIterator is used to iterate over such a list.
type compactFeeData []byte
func (cfd compactFeeData) Len() int {
return len(cfd) / 8
}
type compactFeeFactory struct {
buffer *bytes.Buffer
writer *bufio.Writer
}
func newCompactFeeFactory() *compactFeeFactory {
buffer := bytes.NewBuffer([]byte{})
return &compactFeeFactory{
buffer: buffer,
writer: bufio.NewWriter(buffer),
}
}
func (cfw *compactFeeFactory) add(txFee uint64) error {
return binary.Write(cfw.writer, binary.LittleEndian, txFee)
}
func (cfw *compactFeeFactory) data() (compactFeeData, error) {
err := cfw.writer.Flush()
return compactFeeData(cfw.buffer.Bytes()), err
}
type compactFeeIterator struct {
reader io.Reader
}
func (cfd compactFeeData) iterator() *compactFeeIterator {
return &compactFeeIterator{
reader: bufio.NewReader(bytes.NewBuffer(cfd)),
}
}
func (cfr *compactFeeIterator) next() (uint64, error) {
var txFee uint64
err := binary.Read(cfr.reader, binary.LittleEndian, &txFee)
return txFee, err
}
// The following functions relate to storing and retrieving fee data from the database
var feeBucket = []byte("fees")
// getBluesFeeData returns the compactFeeData for all nodes's blues,
// used to calculate the fees this blockNode needs to pay
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
bluesFeeData := make(map[daghash.Hash]compactFeeData)
err := dag.db.View(func(dbTx database.Tx) error {
for _, blueBlock := range node.blues {
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
if err != nil {
return errors.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
}
bluesFeeData[*blueBlock.hash] = feeData
}
return nil
})
if err != nil {
return nil, err
}
return bluesFeeData, nil
}
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
if err != nil {
return errors.Errorf("Error creating or retrieving fee bucket: %s", err)
}
return feeBucket.Put(blockHash.CloneBytes(), feeData)
}
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
feeBucket := dbTx.Metadata().Bucket(feeBucket)
if feeBucket == nil {
return nil, errors.New("Fee bucket does not exist")
}
feeData := feeBucket.Get(blockHash.CloneBytes())
if feeData == nil {
return nil, errors.Errorf("No fee data found for block %s", blockHash)
}
return feeData, nil
}
// The following functions deal with building and validating the coinbase transaction
func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
if node.isGenesis() {
return nil
}
blockCoinbaseTx := block.CoinbaseTransaction().MsgTx()
scriptPubKey, extraData, err := DeserializeCoinbasePayload(blockCoinbaseTx)
if err != nil {
return err
}
expectedCoinbaseTransaction, err := node.expectedCoinbaseTransaction(dag, txsAcceptanceData, scriptPubKey, extraData)
if err != nil {
return err
}
if !expectedCoinbaseTransaction.Hash().IsEqual(block.CoinbaseTransaction().Hash()) {
return ruleError(ErrBadCoinbaseTransaction, "Coinbase transaction is not built as expected")
}
return nil
}
// expectedCoinbaseTransaction returns the coinbase transaction for the current block
func (node *blockNode) expectedCoinbaseTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData, scriptPubKey []byte, extraData []byte) (*util.Tx, error) {
bluesFeeData, err := node.getBluesFeeData(dag)
if err != nil {
return nil, err
}
txIns := []*wire.TxIn{}
txOuts := []*wire.TxOut{}
for _, blue := range node.blues {
txIn, txOut, err := coinbaseInputAndOutputForBlueBlock(dag, blue, txsAcceptanceData, bluesFeeData)
if err != nil {
return nil, err
}
txIns = append(txIns, txIn)
if txOut != nil {
txOuts = append(txOuts, txOut)
}
}
payload, err := SerializeCoinbasePayload(scriptPubKey, extraData)
if err != nil {
return nil, err
}
coinbaseTx := wire.NewSubnetworkMsgTx(wire.TxVersion, txIns, txOuts, subnetworkid.SubnetworkIDCoinbase, 0, payload)
sortedCoinbaseTx := txsort.Sort(coinbaseTx)
return util.NewTx(sortedCoinbaseTx), nil
}
// SerializeCoinbasePayload builds the coinbase payload based on the provided scriptPubKey and extra data.
func SerializeCoinbasePayload(scriptPubKey []byte, extraData []byte) ([]byte, error) {
w := &bytes.Buffer{}
err := wire.WriteVarInt(w, uint64(len(scriptPubKey)))
if err != nil {
return nil, err
}
_, err = w.Write(scriptPubKey)
if err != nil {
return nil, err
}
_, err = w.Write(extraData)
if err != nil {
return nil, err
}
return w.Bytes(), nil
}
// DeserializeCoinbasePayload deserialize the coinbase payload to its component (scriptPubKey and extra data).
func DeserializeCoinbasePayload(tx *wire.MsgTx) (scriptPubKey []byte, extraData []byte, err error) {
r := bytes.NewReader(tx.Payload)
scriptPubKeyLen, err := wire.ReadVarInt(r)
if err != nil {
return nil, nil, err
}
scriptPubKey = make([]byte, scriptPubKeyLen)
_, err = r.Read(scriptPubKey)
if err != nil {
return nil, nil, err
}
extraData = make([]byte, r.Len())
if r.Len() != 0 {
_, err = r.Read(extraData)
if err != nil {
return nil, nil, err
}
}
return scriptPubKey, extraData, nil
}
// feeInputAndOutputForBlueBlock calculates the input and output that should go into the coinbase transaction of blueBlock
// If blueBlock gets no fee - returns only txIn and nil for txOut
func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
*wire.TxIn, *wire.TxOut, error) {
blockTxsAcceptanceData, ok := txsAcceptanceData.FindAcceptanceData(blueBlock.hash)
if !ok {
return nil, nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
}
blockFeeData, ok := feeData[*blueBlock.hash]
if !ok {
return nil, nil, errors.Errorf("No feeData for block %s", blueBlock.hash)
}
if len(blockTxsAcceptanceData.TxAcceptanceData) != blockFeeData.Len() {
return nil, nil, errors.Errorf(
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
len(blockTxsAcceptanceData.TxAcceptanceData), blockFeeData.Len(), blueBlock.hash)
}
txIn := &wire.TxIn{
SignatureScript: []byte{},
PreviousOutpoint: wire.Outpoint{
TxID: daghash.TxID(*blueBlock.hash),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
}
totalFees := uint64(0)
feeIterator := blockFeeData.iterator()
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
fee, err := feeIterator.next()
if err != nil {
return nil, nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
}
if txAcceptanceData.IsAccepted {
totalFees += fee
}
}
totalReward := CalcBlockSubsidy(blueBlock.height, dag.dagParams) + totalFees
if totalReward == 0 {
return txIn, nil, nil
}
// the ScriptPubKey for the coinbase is parsed from the coinbase payload
scriptPubKey, _, err := DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx())
if err != nil {
return nil, nil, err
}
txOut := &wire.TxOut{
Value: totalReward,
ScriptPubKey: scriptPubKey,
}
return txIn, txOut, nil
}

View File

@@ -7,78 +7,31 @@ package blockdag
import (
"compress/bzip2"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
"io"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"time"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/dagconfig/daghash"
_ "github.com/daglabs/btcd/database/ffldb"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
// loadBlocks reads files containing bitcoin block data (gzipped but otherwise
// in the format bitcoind writes) from disk and returns them as an array of
// util.Block. This is largely borrowed from the test code in btcdb.
func loadBlocks(filename string) (blocks []*util.Block, err error) {
filename = filepath.Join("testdata/", filename)
var network = wire.MainNet
var dr io.Reader
var fi io.ReadCloser
fi, err = os.Open(filename)
if err != nil {
return
func loadBlocksWithLog(t *testing.T, filename string) ([]*util.Block, error) {
blocks, err := LoadBlocks(filename)
if err == nil {
t.Logf("Loaded %d blocks from file %s", len(blocks), filename)
for i, b := range blocks {
t.Logf("Block #%d: %s", i, b.Hash())
}
}
if strings.HasSuffix(filename, ".bz2") {
dr = bzip2.NewReader(fi)
} else {
dr = fi
}
defer fi.Close()
var block *util.Block
err = nil
for height := 0; err == nil; height++ {
var rintbuf uint32
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
if err == io.EOF {
// hit end of file at expected offset: no warning
height--
err = nil
break
}
if err != nil {
break
}
if rintbuf != uint32(network) {
break
}
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
blocklen := rintbuf
rbytes := make([]byte, blocklen)
// read block
dr.Read(rbytes)
block, err = util.NewBlockFromBytes(rbytes)
if err != nil {
return
}
block.SetHeight(int32(height))
blocks = append(blocks, block)
}
return
return blocks, err
}
// loadUTXOSet returns a utxo view loaded from a file.
@@ -143,16 +96,16 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
if err != nil {
return nil, err
}
utxoSet.utxoCollection[wire.OutPoint{TxID: txID, Index: index}] = entry
utxoSet.utxoCollection[wire.Outpoint{TxID: txID, Index: index}] = entry
}
return utxoSet, nil
}
// TestSetBlockRewardMaturity makes the ability to set the block reward maturity
// TestSetCoinbaseMaturity makes the ability to set the coinbase maturity
// available when running tests.
func (dag *BlockDAG) TestSetBlockRewardMaturity(maturity uint16) {
dag.dagParams.BlockRewardMaturity = maturity
func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
dag.dagParams.BlockCoinbaseMaturity = maturity
}
// newTestDAG returns a DAG that is usable for syntetic tests. It is
@@ -166,20 +119,19 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
index := newBlockIndex(nil, params)
index.AddNode(node)
targetTimespan := int64(params.TargetTimespan / time.Second)
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
adjustmentFactor := params.RetargetAdjustmentFactor
return &BlockDAG{
dagParams: params,
timeSource: NewMedianTime(),
minRetargetTimespan: targetTimespan / adjustmentFactor,
maxRetargetTimespan: targetTimespan * adjustmentFactor,
blocksPerRetarget: int32(targetTimespan / targetTimePerBlock),
index: index,
virtual: newVirtualBlock(setFromSlice(node), params.K),
genesis: index.LookupNode(params.GenesisHash),
warningCaches: newThresholdCaches(vbNumBits),
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
dagParams: params,
timeSource: NewMedianTime(),
targetTimePerBlock: targetTimePerBlock,
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
powMaxBits: util.BigToCompact(params.PowMax),
index: index,
virtual: newVirtualBlock(setFromSlice(node), params.K),
genesis: index.LookupNode(params.GenesisHash),
warningCaches: newThresholdCaches(vbNumBits),
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
}
}
@@ -188,12 +140,13 @@ func newTestDAG(params *dagconfig.Params) *BlockDAG {
func newTestNode(parents blockSet, blockVersion int32, bits uint32, timestamp time.Time, phantomK uint32) *blockNode {
// Make up a header and create a block node from it.
header := &wire.BlockHeader{
Version: blockVersion,
ParentHashes: parents.hashes(),
Bits: bits,
Timestamp: timestamp,
HashMerkleRoot: &daghash.ZeroHash,
IDMerkleRoot: &daghash.ZeroHash,
Version: blockVersion,
ParentHashes: parents.hashes(),
Bits: bits,
Timestamp: timestamp,
HashMerkleRoot: &daghash.ZeroHash,
AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash,
}
return newBlockNode(header, parents, phantomK)
}
@@ -232,7 +185,7 @@ func checkRuleError(gotErr, wantErr error) error {
// Ensure the error code is of the expected type and the error
// code matches the value specified in the test instance.
if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) {
return fmt.Errorf("wrong error - got %T (%[1]v), want %T",
return errors.Errorf("wrong error - got %T (%[1]v), want %T",
gotErr, wantErr)
}
if gotErr == nil {
@@ -242,7 +195,7 @@ func checkRuleError(gotErr, wantErr error) error {
// Ensure the want error type is a script error.
werr, ok := wantErr.(RuleError)
if !ok {
return fmt.Errorf("unexpected test error type %T", wantErr)
return errors.Errorf("unexpected test error type %T", wantErr)
}
// Ensure the error codes match. It's safe to use a raw type assert
@@ -250,7 +203,7 @@ func checkRuleError(gotErr, wantErr error) error {
// the want error is a script error.
gotErrorCode := gotErr.(RuleError).ErrorCode
if gotErrorCode != werr.ErrorCode {
return fmt.Errorf("mismatched error code - got %v (%v), want %v",
return errors.Errorf("mismatched error code - got %v (%v), want %v",
gotErrorCode, gotErr, werr.ErrorCode)
}

View File

@@ -241,27 +241,27 @@ func isPubKey(script []byte) (bool, []byte) {
// compressedScriptSize returns the number of bytes the passed script would take
// when encoded with the domain specific compression algorithm described above.
func compressedScriptSize(pkScript []byte) int {
func compressedScriptSize(scriptPubKey []byte) int {
// Pay-to-pubkey-hash script.
if valid, _ := isPubKeyHash(pkScript); valid {
if valid, _ := isPubKeyHash(scriptPubKey); valid {
return 21
}
// Pay-to-script-hash script.
if valid, _ := isScriptHash(pkScript); valid {
if valid, _ := isScriptHash(scriptPubKey); valid {
return 21
}
// Pay-to-pubkey (compressed or uncompressed) script.
if valid, _ := isPubKey(pkScript); valid {
if valid, _ := isPubKey(scriptPubKey); valid {
return 33
}
// When none of the above special cases apply, encode the script as is
// preceded by the sum of its size and the number of special cases
// encoded as a variable length quantity.
return serializeSizeVLQ(uint64(len(pkScript)+numSpecialScripts)) +
len(pkScript)
return serializeSizeVLQ(uint64(len(scriptPubKey)+numSpecialScripts)) +
len(scriptPubKey)
}
// decodeCompressedScriptSize treats the passed serialized bytes as a compressed
@@ -296,23 +296,23 @@ func decodeCompressedScriptSize(serialized []byte) int {
// target byte slice. The target byte slice must be at least large enough to
// handle the number of bytes returned by the compressedScriptSize function or
// it will panic.
func putCompressedScript(target, pkScript []byte) int {
func putCompressedScript(target, scriptPubKey []byte) int {
// Pay-to-pubkey-hash script.
if valid, hash := isPubKeyHash(pkScript); valid {
if valid, hash := isPubKeyHash(scriptPubKey); valid {
target[0] = cstPayToPubKeyHash
copy(target[1:21], hash)
return 21
}
// Pay-to-script-hash script.
if valid, hash := isScriptHash(pkScript); valid {
if valid, hash := isScriptHash(scriptPubKey); valid {
target[0] = cstPayToScriptHash
copy(target[1:21], hash)
return 21
}
// Pay-to-pubkey (compressed or uncompressed) script.
if valid, serializedPubKey := isPubKey(pkScript); valid {
if valid, serializedPubKey := isPubKey(scriptPubKey); valid {
pubKeyFormat := serializedPubKey[0]
switch pubKeyFormat {
case 0x02, 0x03:
@@ -331,10 +331,10 @@ func putCompressedScript(target, pkScript []byte) int {
// When none of the above special cases apply, encode the unmodified
// script preceded by the sum of its size and the number of special
// cases encoded as a variable length quantity.
encodedSize := uint64(len(pkScript) + numSpecialScripts)
encodedSize := uint64(len(scriptPubKey) + numSpecialScripts)
vlqSizeLen := putVLQ(target, encodedSize)
copy(target[vlqSizeLen:], pkScript)
return vlqSizeLen + len(pkScript)
copy(target[vlqSizeLen:], scriptPubKey)
return vlqSizeLen + len(scriptPubKey)
}
// decompressScript returns the original script obtained by decompressing the
@@ -344,50 +344,50 @@ func putCompressedScript(target, pkScript []byte) int {
// NOTE: The script parameter must already have been proven to be long enough
// to contain the number of bytes returned by decodeCompressedScriptSize or it
// will panic. This is acceptable since it is only an internal function.
func decompressScript(compressedPkScript []byte) []byte {
func decompressScript(compressedScriptPubKey []byte) []byte {
// In practice this function will not be called with a zero-length or
// nil script since the nil script encoding includes the length, however
// the code below assumes the length exists, so just return nil now if
// the function ever ends up being called with a nil script in the
// future.
if len(compressedPkScript) == 0 {
if len(compressedScriptPubKey) == 0 {
return nil
}
// Decode the script size and examine it for the special cases.
encodedScriptSize, bytesRead := deserializeVLQ(compressedPkScript)
encodedScriptSize, bytesRead := deserializeVLQ(compressedScriptPubKey)
switch encodedScriptSize {
// Pay-to-pubkey-hash script. The resulting script is:
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
case cstPayToPubKeyHash:
pkScript := make([]byte, 25)
pkScript[0] = txscript.OpDup
pkScript[1] = txscript.OpHash160
pkScript[2] = txscript.OpData20
copy(pkScript[3:], compressedPkScript[bytesRead:bytesRead+20])
pkScript[23] = txscript.OpEqualVerify
pkScript[24] = txscript.OpCheckSig
return pkScript
scriptPubKey := make([]byte, 25)
scriptPubKey[0] = txscript.OpDup
scriptPubKey[1] = txscript.OpHash160
scriptPubKey[2] = txscript.OpData20
copy(scriptPubKey[3:], compressedScriptPubKey[bytesRead:bytesRead+20])
scriptPubKey[23] = txscript.OpEqualVerify
scriptPubKey[24] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-script-hash script. The resulting script is:
// <OP_HASH160><20 byte script hash><OP_EQUAL>
case cstPayToScriptHash:
pkScript := make([]byte, 23)
pkScript[0] = txscript.OpHash160
pkScript[1] = txscript.OpData20
copy(pkScript[2:], compressedPkScript[bytesRead:bytesRead+20])
pkScript[22] = txscript.OpEqual
return pkScript
scriptPubKey := make([]byte, 23)
scriptPubKey[0] = txscript.OpHash160
scriptPubKey[1] = txscript.OpData20
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+20])
scriptPubKey[22] = txscript.OpEqual
return scriptPubKey
// Pay-to-compressed-pubkey script. The resulting script is:
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
pkScript := make([]byte, 35)
pkScript[0] = txscript.OpData33
pkScript[1] = byte(encodedScriptSize)
copy(pkScript[2:], compressedPkScript[bytesRead:bytesRead+32])
pkScript[34] = txscript.OpCheckSig
return pkScript
scriptPubKey := make([]byte, 35)
scriptPubKey[0] = txscript.OpData33
scriptPubKey[1] = byte(encodedScriptSize)
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+32])
scriptPubKey[34] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-uncompressed-pubkey script. The resulting script is:
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
@@ -398,26 +398,26 @@ func decompressScript(compressedPkScript []byte) []byte {
// encoding ensures it is valid before compressing to this type.
compressedKey := make([]byte, 33)
compressedKey[0] = byte(encodedScriptSize - 2)
copy(compressedKey[1:], compressedPkScript[1:])
copy(compressedKey[1:], compressedScriptPubKey[1:])
key, err := btcec.ParsePubKey(compressedKey, btcec.S256())
if err != nil {
return nil
}
pkScript := make([]byte, 67)
pkScript[0] = txscript.OpData65
copy(pkScript[1:], key.SerializeUncompressed())
pkScript[66] = txscript.OpCheckSig
return pkScript
scriptPubKey := make([]byte, 67)
scriptPubKey[0] = txscript.OpData65
copy(scriptPubKey[1:], key.SerializeUncompressed())
scriptPubKey[66] = txscript.OpCheckSig
return scriptPubKey
}
// When none of the special cases apply, the script was encoded using
// the general format, so reduce the script size by the number of
// special cases and return the unmodified script.
scriptSize := int(encodedScriptSize - numSpecialScripts)
pkScript := make([]byte, scriptSize)
copy(pkScript, compressedPkScript[bytesRead:bytesRead+scriptSize])
return pkScript
scriptPubKey := make([]byte, scriptSize)
copy(scriptPubKey, compressedScriptPubKey[bytesRead:bytesRead+scriptSize])
return scriptPubKey
}
// -----------------------------------------------------------------------------
@@ -543,9 +543,9 @@ func decompressTxOutAmount(amount uint64) uint64 {
// compressedTxOutSize returns the number of bytes the passed transaction output
// fields would take when encoded with the format described above.
func compressedTxOutSize(amount uint64, pkScript []byte) int {
func compressedTxOutSize(amount uint64, scriptPubKey []byte) int {
return serializeSizeVLQ(compressTxOutAmount(amount)) +
compressedScriptSize(pkScript)
compressedScriptSize(scriptPubKey)
}
// putCompressedTxOut compresses the passed amount and script according to their
@@ -553,9 +553,9 @@ func compressedTxOutSize(amount uint64, pkScript []byte) int {
// passed target byte slice with the format described above. The target byte
// slice must be at least large enough to handle the number of bytes returned by
// the compressedTxOutSize function or it will panic.
func putCompressedTxOut(target []byte, amount uint64, pkScript []byte) int {
func putCompressedTxOut(target []byte, amount uint64, scriptPubKey []byte) int {
offset := putVLQ(target, compressTxOutAmount(amount))
offset += putCompressedScript(target[offset:], pkScript)
offset += putCompressedScript(target[offset:], scriptPubKey)
return offset
}

View File

@@ -162,11 +162,6 @@ func TestScriptCompression(t *testing.T) {
uncompressed: hexToBytes("3302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
compressed: hexToBytes("293302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
},
{
name: "null data",
uncompressed: hexToBytes("6a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
compressed: hexToBytes("286a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
},
{
name: "requires 2 size bytes - data push 200 bytes",
uncompressed: append(hexToBytes("4cc8"), bytes.Repeat([]byte{0x00}, 200)...),
@@ -265,7 +260,7 @@ func TestAmountCompression(t *testing.T) {
compressed uint64
}{
{
name: "0 BTC (sometimes used in nulldata)",
name: "0 BTC",
uncompressed: 0,
compressed: 0,
},
@@ -338,35 +333,29 @@ func TestCompressedTxOut(t *testing.T) {
t.Parallel()
tests := []struct {
name string
amount uint64
pkScript []byte
compressed []byte
name string
amount uint64
scriptPubKey []byte
compressed []byte
}{
{
name: "nulldata with 0 BTC",
amount: 0,
pkScript: hexToBytes("6a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
compressed: hexToBytes("00286a200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"),
name: "pay-to-pubkey-hash dust",
amount: 546,
scriptPubKey: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
},
{
name: "pay-to-pubkey-hash dust",
amount: 546,
pkScript: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
},
{
name: "pay-to-pubkey uncompressed 1 BTC",
amount: 100000000,
pkScript: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
name: "pay-to-pubkey uncompressed 1 BTC",
amount: 100000000,
scriptPubKey: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the txout is calculated properly.
gotSize := compressedTxOutSize(test.amount, test.pkScript)
gotSize := compressedTxOutSize(test.amount, test.scriptPubKey)
if gotSize != len(test.compressed) {
t.Errorf("compressedTxOutSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
@@ -377,7 +366,7 @@ func TestCompressedTxOut(t *testing.T) {
// Ensure the txout compresses to the expected value.
gotCompressed := make([]byte, gotSize)
gotBytesWritten := putCompressedTxOut(gotCompressed,
test.amount, test.pkScript)
test.amount, test.scriptPubKey)
if !bytes.Equal(gotCompressed, test.compressed) {
t.Errorf("compressTxOut (%s): did not get expected "+
"bytes - got %x, want %x", test.name,
@@ -407,10 +396,10 @@ func TestCompressedTxOut(t *testing.T) {
test.name, gotAmount, test.amount)
continue
}
if !bytes.Equal(gotScript, test.pkScript) {
if !bytes.Equal(gotScript, test.scriptPubKey) {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected script - got %x, want %x",
test.name, gotScript, test.pkScript)
test.name, gotScript, test.scriptPubKey)
continue
}
if gotBytesRead != len(test.compressed) {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -9,11 +9,14 @@ import (
"encoding/binary"
"encoding/json"
"fmt"
"github.com/pkg/errors"
"io"
"sync"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/binaryserializer"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/daglabs/btcd/wire"
)
@@ -34,14 +37,6 @@ var (
// block headers and contextual information.
blockIndexBucketName = []byte("blockheaderidx")
// hashIndexBucketName is the name of the db bucket used to house to the
// block hash -> block height index.
hashIndexBucketName = []byte("hashidx")
// heightIndexBucketName is the name of the db bucket used to house to
// the block height -> block hash index.
heightIndexBucketName = []byte("heightidx")
// dagStateKeyName is the name of the db key used to store the DAG
// tip hashes.
dagStateKeyName = []byte("dagstate")
@@ -54,6 +49,10 @@ var (
// unspent transaction output set.
utxoSetBucketName = []byte("utxoset")
// utxoDiffsBucketName is the name of the db bucket used to house the
// diffs and diff children of blocks.
utxoDiffsBucketName = []byte("utxodiffs")
// subnetworksBucketName is the name of the db bucket used to store the
// subnetwork registry.
subnetworksBucketName = []byte("subnetworks")
@@ -137,7 +136,7 @@ func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
// compressed script []byte variable
//
// The serialized header code format is:
// bit 0 - containing transaction is a block reward
// bit 0 - containing transaction is a coinbase
// bits 1-x - height of the block that contains the unspent txout
//
// Example 1:
@@ -205,7 +204,7 @@ var outpointKeyPool = sync.Pool{
// returned to the free list by using the recycleOutpointKey function when the
// caller is done with it _unless_ the slice will need to live for longer than
// the caller can calculate such as when used to write to the database.
func outpointKey(outpoint wire.OutPoint) *[]byte {
func outpointKey(outpoint wire.Outpoint) *[]byte {
// A VLQ employs an MSB encoding, so they are useful not only to reduce
// the amount of storage space, but also so iteration of UTXOs when
// doing byte-wise comparisons will produce them in order.
@@ -223,101 +222,14 @@ func recycleOutpointKey(key *[]byte) {
outpointKeyPool.Put(key)
}
// utxoEntryHeaderCode returns the calculated header code to be used when
// serializing the provided utxo entry.
func utxoEntryHeaderCode(entry *UTXOEntry) uint64 {
// As described in the serialization format comments, the header code
// encodes the height shifted over one bit and the block reward flag in the
// lowest bit.
headerCode := uint64(entry.BlockHeight()) << 1
if entry.IsBlockReward() {
headerCode |= 0x01
}
return headerCode
}
// serializeUTXOEntry returns the entry serialized to a format that is suitable
// for long-term storage. The format is described in detail above.
func serializeUTXOEntry(entry *UTXOEntry) ([]byte, error) {
// Encode the header code.
headerCode := utxoEntryHeaderCode(entry)
// Calculate the size needed to serialize the entry.
size := serializeSizeVLQ(headerCode) +
compressedTxOutSize(uint64(entry.Amount()), entry.PkScript())
// Serialize the header code followed by the compressed unspent
// transaction output.
serialized := make([]byte, size)
offset := putVLQ(serialized, headerCode)
offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()),
entry.PkScript())
return serialized, nil
}
// deserializeOutPoint decodes an outPoint from the passed serialized byte
// slice into a new wire.OutPoint using a format that is suitable for long-
// term storage. this format is described in detail above.
func deserializeOutPoint(serialized []byte) (*wire.OutPoint, error) {
if len(serialized) <= daghash.HashSize {
return nil, errDeserialize("unexpected end of data")
}
txID := daghash.TxID{}
txID.SetBytes(serialized[:daghash.HashSize])
index, _ := deserializeVLQ(serialized[daghash.HashSize:])
return wire.NewOutPoint(&txID, uint32(index)), nil
}
// deserializeUTXOEntry decodes a UTXO entry from the passed serialized byte
// slice into a new UTXOEntry using a format that is suitable for long-term
// storage. The format is described in detail above.
func deserializeUTXOEntry(serialized []byte) (*UTXOEntry, error) {
// Deserialize the header code.
code, offset := deserializeVLQ(serialized)
if offset >= len(serialized) {
return nil, errDeserialize("unexpected end of data after header")
}
// Decode the header code.
//
// Bit 0 indicates whether the containing transaction is a block reward.
// Bits 1-x encode height of containing transaction.
isBlockReward := code&0x01 != 0
blockHeight := int32(code >> 1)
// Decode the compressed unspent transaction output.
amount, pkScript, _, err := decodeCompressedTxOut(serialized[offset:])
if err != nil {
return nil, errDeserialize(fmt.Sprintf("unable to decode "+
"UTXO: %s", err))
}
entry := &UTXOEntry{
amount: amount,
pkScript: pkScript,
blockHeight: blockHeight,
packedFlags: 0,
}
if isBlockReward {
entry.packedFlags |= tfBlockReward
}
return entry, nil
}
// dbPutUTXODiff uses an existing database transaction to update the UTXO set
// in the database based on the provided UTXO view contents and state. In
// particular, only the entries that have been marked as modified are written
// to the database.
func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
for outPoint := range diff.toRemove {
key := outpointKey(outPoint)
for outpoint := range diff.toRemove {
key := outpointKey(outpoint)
err := utxoBucket.Delete(*key)
recycleOutpointKey(key)
if err != nil {
@@ -325,15 +237,12 @@ func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
}
}
for outPoint, entry := range diff.toAdd {
for outpoint, entry := range diff.toAdd {
// Serialize and store the UTXO entry.
serialized, err := serializeUTXOEntry(entry)
if err != nil {
return err
}
serialized := serializeUTXOEntry(entry)
key := outpointKey(outPoint)
err = utxoBucket.Put(*key, serialized)
key := outpointKey(outpoint)
err := utxoBucket.Put(*key, serialized)
// NOTE: The key is intentionally not recycled here since the
// database interface contract prohibits modifications. It will
// be garbage collected normally when the database is done with
@@ -346,58 +255,6 @@ func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
return nil
}
// -----------------------------------------------------------------------------
// The block index consists of two buckets with an entry for every block in the
// main chain. One bucket is for the hash to height mapping and the other is
// for the height to hash mapping.
//
// The serialized format for values in the hash to height bucket is:
// <height>
//
// Field Type Size
// height uint32 4 bytes
//
// The serialized format for values in the height to hash bucket is:
// <hash>
//
// Field Type Size
// hash daghash.Hash daghash.HashSize
// -----------------------------------------------------------------------------
// dbPutBlockIndex uses an existing database transaction to update or add the
// block index entries for the hash to height and height to hash mappings for
// the provided values.
func dbPutBlockIndex(dbTx database.Tx, hash *daghash.Hash, height int32) error {
// Serialize the height for use in the index entries.
var serializedHeight [4]byte
byteOrder.PutUint32(serializedHeight[:], uint32(height))
// Add the block hash to height mapping to the index.
meta := dbTx.Metadata()
hashIndex := meta.Bucket(hashIndexBucketName)
if err := hashIndex.Put(hash[:], serializedHeight[:]); err != nil {
return err
}
// Add the block height to hash mapping to the index.
heightIndex := meta.Bucket(heightIndexBucketName)
return heightIndex.Put(serializedHeight[:], hash[:])
}
// dbFetchHeightByHash uses an existing database transaction to retrieve the
// height for the provided hash from the index.
func dbFetchHeightByHash(dbTx database.Tx, hash *daghash.Hash) (int32, error) {
meta := dbTx.Metadata()
hashIndex := meta.Bucket(hashIndexBucketName)
serializedHeight := hashIndex.Get(hash[:])
if serializedHeight == nil {
str := fmt.Sprintf("block %s is not in the main chain", hash)
return 0, errNotInDAG(str)
}
return int32(byteOrder.Uint32(serializedHeight)), nil
}
type dagState struct {
TipHashes []*daghash.Hash
LastFinalityPoint *daghash.Hash
@@ -452,28 +309,18 @@ func (dag *BlockDAG) createDAGState() error {
return err
}
// Create the bucket that houses the chain block hash to height
// index.
_, err = meta.CreateBucket(hashIndexBucketName)
if err != nil {
return err
}
// Create the bucket that houses the chain block height to hash
// index.
_, err = meta.CreateBucket(heightIndexBucketName)
if err != nil {
return err
}
// Create the bucket that houses the utxo set and store its
// version. Note that the genesis block coinbase transaction is
// intentionally not inserted here since it is not spendable by
// consensus rules.
// Create the buckets that house the utxo set, the utxo diffs, and their
// version.
_, err = meta.CreateBucket(utxoSetBucketName)
if err != nil {
return err
}
_, err = meta.CreateBucket(utxoDiffsBucketName)
if err != nil {
return err
}
err = dbPutVersion(dbTx, utxoSetVersionKeyName,
latestUTXOSetBucketVersion)
if err != nil {
@@ -489,6 +336,56 @@ func (dag *BlockDAG) createDAGState() error {
if err := dbPutLocalSubnetworkID(dbTx, dag.subnetworkID); err != nil {
return err
}
if _, err := meta.CreateBucketIfNotExists(idByHashIndexBucketName); err != nil {
return err
}
if _, err := meta.CreateBucketIfNotExists(hashByIDIndexBucketName); err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
func (dag *BlockDAG) removeDAGState() error {
err := dag.db.Update(func(dbTx database.Tx) error {
meta := dbTx.Metadata()
err := meta.DeleteBucket(blockIndexBucketName)
if err != nil {
return err
}
err = meta.DeleteBucket(utxoSetBucketName)
if err != nil {
return err
}
err = meta.DeleteBucket(utxoDiffsBucketName)
if err != nil {
return err
}
err = dbTx.Metadata().Delete(utxoSetVersionKeyName)
if err != nil {
return err
}
err = meta.DeleteBucket(subnetworksBucketName)
if err != nil {
return err
}
err = dbTx.Metadata().Delete(localSubnetworkKeyName)
if err != nil {
return err
}
return nil
})
@@ -522,7 +419,7 @@ func (dag *BlockDAG) initDAGState() error {
localSubnetworkID.SetBytes(localSubnetworkIDBytes)
}
if !localSubnetworkID.IsEqual(dag.subnetworkID) {
return fmt.Errorf("Cannot start btcd with subnetwork ID %s because"+
return errors.Errorf("Cannot start btcd with subnetwork ID %s because"+
" its database is already built with subnetwork ID %s. If you"+
" want to switch to a new database, please reset the"+
" database by starting btcd with --reset-db flag", dag.subnetworkID, localSubnetworkID)
@@ -562,55 +459,42 @@ func (dag *BlockDAG) initDAGState() error {
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
// Determine how many blocks will be loaded into the index so we can
// allocate the right amount.
var blockCount int32
cursor := blockIndexBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
blockCount++
}
blockNodes := make([]blockNode, blockCount)
var i int32
var lastNode *blockNode
cursor = blockIndexBucket.Cursor()
var unprocessedBlockNodes []*blockNode
cursor := blockIndexBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
header, status, err := deserializeBlockRow(cursor.Value())
node, err := dag.deserializeBlockNode(cursor.Value())
if err != nil {
return err
}
parents := newSet()
// Check to see if this node had been stored in the the block DB
// but not yet accepted. If so, add it to a slice to be processed later.
if node.status == statusDataStored {
unprocessedBlockNodes = append(unprocessedBlockNodes, node)
continue
}
if lastNode == nil {
blockHash := header.BlockHash()
if !blockHash.IsEqual(dag.dagParams.GenesisHash) {
if !node.hash.IsEqual(dag.dagParams.GenesisHash) {
return AssertError(fmt.Sprintf("initDAGState: Expected "+
"first entry in block index to be genesis block, "+
"found %s", blockHash))
"found %s", node.hash))
}
} else {
for _, hash := range header.ParentHashes {
parent := dag.index.LookupNode(hash)
if parent == nil {
return AssertError(fmt.Sprintf("initDAGState: Could "+
"not find parent %s for block %s", hash, header.BlockHash()))
}
parents.add(parent)
}
if len(parents) == 0 {
if len(node.parents) == 0 {
return AssertError(fmt.Sprintf("initDAGState: Could "+
"not find any parent for block %s", header.BlockHash()))
"not find any parent for block %s", node.hash))
}
}
// Initialize the block node for the block, connect it,
// Add the node to its parents children, connect it,
// and add it to the block index.
node := &blockNodes[i]
initBlockNode(node, header, parents, dag.dagParams.K)
node.status = status
node.updateParentsChildren()
dag.index.addNode(node)
if blockStatus(status).KnownValid() {
if node.status.KnownValid() {
dag.blockCount++
}
@@ -636,15 +520,15 @@ func (dag *BlockDAG) initDAGState() error {
fullUTXOCollection := make(utxoCollection, utxoEntryCount)
for ok := cursor.First(); ok; ok = cursor.Next() {
// Deserialize the outPoint
outPoint, err := deserializeOutPoint(cursor.Key())
// Deserialize the outpoint
outpoint, err := deserializeOutpoint(cursor.Key())
if err != nil {
// Ensure any deserialization errors are returned as database
// corruption errors.
if isDeserializeErr(err) {
return database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt outPoint: %s", err),
Description: fmt.Sprintf("corrupt outpoint: %s", err),
}
}
@@ -666,11 +550,14 @@ func (dag *BlockDAG) initDAGState() error {
return err
}
fullUTXOCollection[*outPoint] = entry
fullUTXOCollection[*outpoint] = entry
}
// Apply the loaded utxoCollection to the virtual block.
dag.virtual.utxoSet.utxoCollection = fullUTXOCollection
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
if err != nil {
return AssertError(fmt.Sprintf("Error loading UTXOSet: %s", err))
}
// Apply the stored tips to the virtual block.
tips := newSet()
@@ -686,33 +573,126 @@ func (dag *BlockDAG) initDAGState() error {
// Set the last finality point
dag.lastFinalityPoint = dag.index.LookupNode(state.LastFinalityPoint)
dag.finalizeNodesBelowFinalityPoint(false)
// Go over any unprocessed blockNodes and process them now.
for _, node := range unprocessedBlockNodes {
// Check to see if the block exists in the block DB. If it
// doesn't, the database has certainly been corrupted.
blockExists, err := dbTx.HasBlock(node.hash)
if err != nil {
return AssertError(fmt.Sprintf("initDAGState: HasBlock "+
"for block %s failed: %s", node.hash, err))
}
if !blockExists {
return AssertError(fmt.Sprintf("initDAGState: block %s "+
"exists in block index but not in block db", node.hash))
}
// Attempt to accept the block.
block, err := dbFetchBlockByNode(dbTx, node)
isOrphan, delay, err := dag.ProcessBlock(block, BFWasStored)
if err != nil {
log.Warnf("Block %s, which was not previously processed, "+
"failed to be accepted to the DAG: %s", node.hash, err)
continue
}
// If the block is an orphan or is delayed then it couldn't have
// possibly been written to the block index in the first place.
if isOrphan {
return AssertError(fmt.Sprintf("Block %s, which was not "+
"previously processed, turned out to be an orphan, which is "+
"impossible.", node.hash))
}
if delay != 0 {
return AssertError(fmt.Sprintf("Block %s, which was not "+
"previously processed, turned out to be delayed, which is "+
"impossible.", node.hash))
}
}
return nil
})
}
// deserializeBlockRow parses a value in the block index bucket into a block
// header and block status bitfield.
func deserializeBlockRow(blockRow []byte) (*wire.BlockHeader, blockStatus, error) {
// deserializeBlockNode parses a value in the block index bucket and returns a block node.
func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
buffer := bytes.NewReader(blockRow)
var header wire.BlockHeader
err := header.Deserialize(buffer)
if err != nil {
return nil, statusNone, err
return nil, err
}
node := &blockNode{
hash: header.BlockHash(),
version: header.Version,
bits: header.Bits,
nonce: header.Nonce,
timestamp: header.Timestamp.Unix(),
hashMerkleRoot: header.HashMerkleRoot,
acceptedIDMerkleRoot: header.AcceptedIDMerkleRoot,
utxoCommitment: header.UTXOCommitment,
}
node.children = newSet()
node.parents = newSet()
for _, hash := range header.ParentHashes {
parent := dag.index.LookupNode(hash)
if parent == nil {
return nil, AssertError(fmt.Sprintf("deserializeBlockNode: Could "+
"not find parent %s for block %s", hash, header.BlockHash()))
}
node.parents.add(parent)
}
statusByte, err := buffer.ReadByte()
if err != nil {
return nil, statusNone, err
return nil, err
}
node.status = blockStatus(statusByte)
selectedParentHash := &daghash.Hash{}
if _, err := io.ReadFull(buffer, selectedParentHash[:]); err != nil {
return nil, err
}
return &header, blockStatus(statusByte), nil
// Because genesis doesn't have selected parent, it's serialized as zero hash
if !selectedParentHash.IsEqual(&daghash.ZeroHash) {
node.selectedParent = dag.index.LookupNode(selectedParentHash)
}
node.blueScore, err = binaryserializer.Uint64(buffer, byteOrder)
if err != nil {
return nil, err
}
bluesCount, err := wire.ReadVarInt(buffer)
if err != nil {
return nil, err
}
node.blues = make([]*blockNode, bluesCount)
for i := uint64(0); i < bluesCount; i++ {
hash := &daghash.Hash{}
if _, err := io.ReadFull(buffer, hash[:]); err != nil {
return nil, err
}
node.blues[i] = dag.index.LookupNode(hash)
}
node.height = calculateNodeHeight(node)
node.chainHeight = calculateChainHeight(node)
return node, nil
}
// dbFetchBlockByNode uses an existing database transaction to retrieve the
// raw block for the provided node, deserialize it, and return a util.Block
// with the height set.
// of it.
func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error) {
// Load the raw block bytes from the database.
blockBytes, err := dbTx.FetchBlock(node.hash)
@@ -720,17 +700,15 @@ func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error)
return nil, err
}
// Create the encapsulated block and set the height appropriately.
// Create the encapsulated block.
block, err := util.NewBlockFromBytes(blockBytes)
if err != nil {
return nil, err
}
block.SetHeight(node.height)
return block, nil
}
// dbStoreBlockNode stores the block header and validation status to the block
// dbStoreBlockNode stores the block node data into the block
// index bucket. This overwrites the current entry if there exists one.
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
// Serialize block data to be stored.
@@ -740,15 +718,44 @@ func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
if err != nil {
return err
}
err = w.WriteByte(byte(node.status))
if err != nil {
return err
}
// Because genesis doesn't have selected parent, it's serialized as zero hash
selectedParentHash := &daghash.ZeroHash
if node.selectedParent != nil {
selectedParentHash = node.selectedParent.hash
}
_, err = w.Write(selectedParentHash[:])
if err != nil {
return err
}
err = binaryserializer.PutUint64(w, byteOrder, node.blueScore)
if err != nil {
return err
}
err = wire.WriteVarInt(w, uint64(len(node.blues)))
if err != nil {
return err
}
for _, blue := range node.blues {
_, err = w.Write(blue.hash[:])
if err != nil {
return err
}
}
value := w.Bytes()
// Write block header data to block index bucket.
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
key := blockIndexKey(node.hash, uint32(node.height))
key := BlockIndexKey(node.hash, node.blueScore)
return blockIndexBucket.Put(key, value)
}
@@ -765,23 +772,26 @@ func dbStoreBlock(dbTx database.Tx, block *util.Block) error {
return dbTx.StoreBlock(block)
}
// blockIndexKey generates the binary key for an entry in the block index
// bucket. The key is composed of the block height encoded as a big-endian
// 32-bit unsigned int followed by the 32 byte block hash.
func blockIndexKey(blockHash *daghash.Hash, blockHeight uint32) []byte {
indexKey := make([]byte, daghash.HashSize+4)
binary.BigEndian.PutUint32(indexKey[0:4], blockHeight)
copy(indexKey[4:daghash.HashSize+4], blockHash[:])
// BlockIndexKey generates the binary key for an entry in the block index
// bucket. The key is composed of the block blue score encoded as a big-endian
// 64-bit unsigned int followed by the 32 byte block hash.
// The blue score component is important for iteration order.
func BlockIndexKey(blockHash *daghash.Hash, blueScore uint64) []byte {
indexKey := make([]byte, daghash.HashSize+8)
binary.BigEndian.PutUint64(indexKey[0:8], blueScore)
copy(indexKey[8:daghash.HashSize+8], blockHash[:])
return indexKey
}
// BlockByHash returns the block from the main chain with the given hash with
// the appropriate chain height set.
func blockHashFromBlockIndexKey(BlockIndexKey []byte) (*daghash.Hash, error) {
return daghash.NewHash(BlockIndexKey[8 : daghash.HashSize+8])
}
// BlockByHash returns the block from the DAG with the given hash.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
// Lookup the block hash in block index and ensure it is in the best
// chain.
// Lookup the block hash in block index and ensure it is in the DAG
node := dag.index.LookupNode(hash)
if node == nil {
str := fmt.Sprintf("block %s is not in the main chain", hash)
@@ -797,3 +807,49 @@ func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
})
return block, err
}
// BlockHashesFrom returns a slice of blocks starting from startHash
// ordered by blueScore. If startHash is nil then the genesis block is used.
//
// This method MUST be called with the DAG lock held
func (dag *BlockDAG) BlockHashesFrom(startHash *daghash.Hash, limit int) ([]*daghash.Hash, error) {
blockHashes := make([]*daghash.Hash, 0, limit)
if startHash == nil {
startHash = dag.genesis.hash
// If we're starting from the beginning we should include the
// genesis hash in the result
blockHashes = append(blockHashes, dag.genesis.hash)
}
if !dag.BlockExists(startHash) {
return nil, errors.Errorf("block %s not found", startHash)
}
blueScore, err := dag.BlueScoreByBlockHash(startHash)
if err != nil {
return nil, err
}
err = dag.index.db.View(func(dbTx database.Tx) error {
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
startKey := BlockIndexKey(startHash, blueScore)
cursor := blockIndexBucket.Cursor()
cursor.Seek(startKey)
for ok := cursor.Next(); ok; ok = cursor.Next() {
key := cursor.Key()
blockHash, err := blockHashFromBlockIndexKey(key)
if err != nil {
return err
}
blockHashes = append(blockHashes, blockHash)
if len(blockHashes) == limit {
break
}
}
return nil
})
if err != nil {
return nil, err
}
return blockHashes, nil
}

View File

@@ -6,12 +6,12 @@ package blockdag
import (
"bytes"
"errors"
"github.com/pkg/errors"
"reflect"
"testing"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util/daghash"
)
// TestErrNotInDAG ensures the functions related to errNotInDAG work
@@ -49,24 +49,24 @@ func TestUtxoSerialization(t *testing.T) {
// From tx in main blockchain:
// b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
{
name: "height 1, coinbase",
name: "blue score 1, coinbase",
entry: &UTXOEntry{
amount: 5000000000,
pkScript: hexToBytes("410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
blockHeight: 1,
packedFlags: tfBlockReward,
amount: 5000000000,
scriptPubKey: hexToBytes("410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
blockBlueScore: 1,
packedFlags: tfCoinbase,
},
serialized: hexToBytes("03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"),
},
// From tx in main blockchain:
// 8131ffb0a2c945ecaf9b9063e59558784f9c3a74741ce6ae2a18d0571dac15bb:1
{
name: "height 100001, not coinbase",
name: "blue score 100001, not coinbase",
entry: &UTXOEntry{
amount: 1000000,
pkScript: hexToBytes("76a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
blockHeight: 100001,
packedFlags: 0,
amount: 1000000,
scriptPubKey: hexToBytes("76a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
blockBlueScore: 100001,
packedFlags: 0,
},
serialized: hexToBytes("8b99420700ee8bd501094a7d5ca318da2506de35e1cb025ddc"),
},
@@ -74,12 +74,7 @@ func TestUtxoSerialization(t *testing.T) {
for i, test := range tests {
// Ensure the utxo entry serializes to the expected value.
gotBytes, err := serializeUTXOEntry(test.entry)
if err != nil {
t.Errorf("serializeUTXOEntry #%d (%s) unexpected "+
"error: %v", i, test.name, err)
continue
}
gotBytes := serializeUTXOEntry(test.entry)
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("serializeUTXOEntry #%d (%s): mismatched "+
"bytes - got %x, want %x", i, test.name,
@@ -104,22 +99,22 @@ func TestUtxoSerialization(t *testing.T) {
continue
}
if !bytes.Equal(utxoEntry.PkScript(), test.entry.PkScript()) {
if !bytes.Equal(utxoEntry.ScriptPubKey(), test.entry.ScriptPubKey()) {
t.Errorf("deserializeUTXOEntry #%d (%s) mismatched "+
"scripts: got %x, want %x", i, test.name,
utxoEntry.PkScript(), test.entry.PkScript())
utxoEntry.ScriptPubKey(), test.entry.ScriptPubKey())
continue
}
if utxoEntry.BlockHeight() != test.entry.BlockHeight() {
if utxoEntry.BlockBlueScore() != test.entry.BlockBlueScore() {
t.Errorf("deserializeUTXOEntry #%d (%s) mismatched "+
"block height: got %d, want %d", i, test.name,
utxoEntry.BlockHeight(), test.entry.BlockHeight())
"block blue score: got %d, want %d", i, test.name,
utxoEntry.BlockBlueScore(), test.entry.BlockBlueScore())
continue
}
if utxoEntry.IsBlockReward() != test.entry.IsBlockReward() {
if utxoEntry.IsCoinbase() != test.entry.IsCoinbase() {
t.Errorf("deserializeUTXOEntry #%d (%s) mismatched "+
"coinbase flag: got %v, want %v", i, test.name,
utxoEntry.IsBlockReward(), test.entry.IsBlockReward())
utxoEntry.IsCoinbase(), test.entry.IsCoinbase())
continue
}
}

View File

@@ -11,159 +11,42 @@ import (
"github.com/daglabs/btcd/util"
)
// calcEasiestDifficulty calculates the easiest possible difficulty that a block
// can have given starting difficulty bits and a duration. It is mainly used to
// verify that claimed proof of work by a block is sane as compared to a
// known good checkpoint.
func (dag *BlockDAG) calcEasiestDifficulty(bits uint32, duration time.Duration) uint32 {
// Convert types used in the calculations below.
durationVal := int64(duration / time.Second)
adjustmentFactor := big.NewInt(dag.dagParams.RetargetAdjustmentFactor)
// The test network rules allow minimum difficulty blocks after more
// than twice the desired amount of time needed to generate a block has
// elapsed.
if dag.dagParams.ReduceMinDifficulty {
reductionTime := int64(dag.dagParams.MinDiffReductionTime /
time.Second)
if durationVal > reductionTime {
return dag.dagParams.PowLimitBits
}
}
// Since easier difficulty equates to higher numbers, the easiest
// difficulty for a given duration is the largest value possible given
// the number of retargets for the duration and starting difficulty
// multiplied by the max adjustment factor.
newTarget := util.CompactToBig(bits)
for durationVal > 0 && newTarget.Cmp(dag.dagParams.PowLimit) < 0 {
newTarget.Mul(newTarget, adjustmentFactor)
durationVal -= dag.maxRetargetTimespan
}
// Limit new value to the proof of work limit.
if newTarget.Cmp(dag.dagParams.PowLimit) > 0 {
newTarget.Set(dag.dagParams.PowLimit)
}
return util.BigToCompact(newTarget)
}
// findPrevTestNetDifficulty returns the difficulty of the previous block which
// did not have the special testnet minimum difficulty rule applied.
//
// This function MUST be called with the chain state lock held (for writes).
func (dag *BlockDAG) findPrevTestNetDifficulty(startNode *blockNode) uint32 {
// Search backwards through the chain for the last block without
// the special rule applied.
iterNode := startNode
for iterNode != nil && iterNode.height%dag.blocksPerRetarget != 0 &&
iterNode.bits == dag.dagParams.PowLimitBits {
iterNode = iterNode.selectedParent
}
// Return the found difficulty or the minimum difficulty if no
// appropriate block was found.
lastBits := dag.dagParams.PowLimitBits
if iterNode != nil {
lastBits = iterNode.bits
}
return lastBits
}
// calcNextRequiredDifficulty calculates the required difficulty for the block
// after the passed previous block node based on the difficulty retarget rules.
// This function differs from the exported CalcNextRequiredDifficulty in that
// the exported version uses the current best chain as the previous block node
// while this function accepts any block node.
func (dag *BlockDAG) calcNextRequiredDifficulty(bluestParent *blockNode, newBlockTime time.Time) (uint32, error) {
// requiredDifficulty calculates the required difficulty for a
// block given its bluest parent.
func (dag *BlockDAG) requiredDifficulty(bluestParent *blockNode, newBlockTime time.Time) uint32 {
// Genesis block.
if bluestParent == nil {
return dag.dagParams.PowLimitBits, nil
if bluestParent == nil || bluestParent.blueScore < dag.difficultyAdjustmentWindowSize+1 {
return dag.powMaxBits
}
// Return the previous block's difficulty requirements if this block
// is not at a difficulty retarget interval.
if (bluestParent.height+1)%dag.blocksPerRetarget != 0 {
// For networks that support it, allow special reduction of the
// required difficulty once too much time has elapsed without
// mining a block.
if dag.dagParams.ReduceMinDifficulty {
// Return minimum difficulty when more than the desired
// amount of time has elapsed without mining a block.
reductionTime := int64(dag.dagParams.MinDiffReductionTime /
time.Second)
allowMinTime := bluestParent.timestamp + reductionTime
if newBlockTime.Unix() > allowMinTime {
return dag.dagParams.PowLimitBits, nil
}
// Fetch window of dag.difficultyAdjustmentWindowSize + 1 so we can have dag.difficultyAdjustmentWindowSize block intervals
timestampsWindow := blueBlockWindow(bluestParent, dag.difficultyAdjustmentWindowSize+1)
windowMinTimestamp, windowMaxTimeStamp := timestampsWindow.minMaxTimestamps()
// The block was mined within the desired timeframe, so
// return the difficulty for the last block which did
// not have the special minimum difficulty rule applied.
return dag.findPrevTestNetDifficulty(bluestParent), nil
}
// For the main network (or any unrecognized networks), simply
// return the previous block's difficulty requirements.
return bluestParent.bits, nil
}
// Get the block node at the previous retarget (targetTimespan days
// worth of blocks).
firstNode := bluestParent.RelativeAncestor(dag.blocksPerRetarget - 1)
if firstNode == nil {
return 0, AssertError("unable to obtain previous retarget block")
}
// Limit the amount of adjustment that can occur to the previous
// difficulty.
actualTimespan := bluestParent.timestamp - firstNode.timestamp
adjustedTimespan := actualTimespan
if actualTimespan < dag.minRetargetTimespan {
adjustedTimespan = dag.minRetargetTimespan
} else if actualTimespan > dag.maxRetargetTimespan {
adjustedTimespan = dag.maxRetargetTimespan
}
// Remove the last block from the window so to calculate the average target of dag.difficultyAdjustmentWindowSize blocks
targetsWindow := timestampsWindow[:dag.difficultyAdjustmentWindowSize]
// Calculate new target difficulty as:
// currentDifficulty * (adjustedTimespan / targetTimespan)
// averageWindowTarget * (windowMinTimestamp / (targetTimePerBlock * windowSize))
// The result uses integer division which means it will be slightly
// rounded down. Bitcoind also uses integer division to calculate this
// result.
oldTarget := util.CompactToBig(bluestParent.bits)
newTarget := new(big.Int).Mul(oldTarget, big.NewInt(adjustedTimespan))
targetTimeSpan := int64(dag.dagParams.TargetTimespan / time.Second)
newTarget.Div(newTarget, big.NewInt(targetTimeSpan))
// Limit new value to the proof of work limit.
if newTarget.Cmp(dag.dagParams.PowLimit) > 0 {
newTarget.Set(dag.dagParams.PowLimit)
// rounded down.
newTarget := targetsWindow.averageTarget()
newTarget.
Mul(newTarget, big.NewInt(windowMaxTimeStamp-windowMinTimestamp)).
Div(newTarget, big.NewInt(dag.targetTimePerBlock)).
Div(newTarget, big.NewInt(int64(dag.difficultyAdjustmentWindowSize)))
if newTarget.Cmp(dag.dagParams.PowMax) > 0 {
return dag.powMaxBits
}
// Log new target difficulty and return it. The new target logging is
// intentionally converting the bits back to a number instead of using
// newTarget since conversion to the compact representation loses
// precision.
newTargetBits := util.BigToCompact(newTarget)
log.Debugf("Difficulty retarget at block height %d", bluestParent.height+1)
log.Debugf("Old target %08x (%064x)", bluestParent.bits, oldTarget)
log.Debugf("New target %08x (%064x)", newTargetBits, util.CompactToBig(newTargetBits))
log.Debugf("Actual timespan %s, adjusted timespan %s, target timespan %s",
time.Duration(actualTimespan)*time.Second,
time.Duration(adjustedTimespan)*time.Second,
dag.dagParams.TargetTimespan)
return newTargetBits, nil
return newTargetBits
}
// CalcNextRequiredDifficulty calculates the required difficulty for the block
// after the end of the current best chain based on the difficulty retarget
// rules.
// NextRequiredDifficulty calculates the required difficulty for a block that will
// be built on top of the current tips.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, error) {
difficulty, err := dag.calcNextRequiredDifficulty(dag.selectedTip(), timestamp)
return difficulty, err
func (dag *BlockDAG) NextRequiredDifficulty(timestamp time.Time) uint32 {
difficulty := dag.requiredDifficulty(dag.virtual.parents.bluest(), timestamp)
return difficulty
}

View File

@@ -5,8 +5,12 @@
package blockdag
import (
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"math/big"
"testing"
"time"
"github.com/daglabs/btcd/util"
)
@@ -75,3 +79,120 @@ func TestCalcWork(t *testing.T) {
}
}
}
func TestDifficulty(t *testing.T) {
params := dagconfig.SimNetParams
params.K = 1
dag := newTestDAG(&params)
nonce := uint64(0)
zeroTime := time.Unix(0, 0)
addNode := func(parents blockSet, blockTime time.Time) *blockNode {
bluestParent := parents.bluest()
if blockTime == zeroTime {
blockTime = time.Unix(bluestParent.timestamp+1, 0)
}
header := &wire.BlockHeader{
ParentHashes: parents.hashes(),
Bits: dag.requiredDifficulty(bluestParent, blockTime),
Nonce: nonce,
Timestamp: blockTime,
HashMerkleRoot: &daghash.ZeroHash,
AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash,
}
node := newBlockNode(header, parents, dag.dagParams.K)
node.updateParentsChildren()
nonce++
return node
}
tip := dag.genesis
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
tip = addNode(setFromSlice(tip), zeroTime)
if tip.bits != dag.genesis.bits {
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment window size, the difficulty should be the same as genesis'")
}
}
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize+1000; i++ {
tip = addNode(setFromSlice(tip), zeroTime)
if tip.bits != dag.genesis.bits {
t.Fatalf("As long as the block rate remains the same, the difficulty shouldn't change")
}
}
nodeInThePast := addNode(setFromSlice(tip), tip.PastMedianTime(dag))
if nodeInThePast.bits != tip.bits {
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
}
tip = nodeInThePast
tip = addNode(setFromSlice(tip), zeroTime)
if tip.bits != nodeInThePast.bits {
t.Fatalf("The difficulty should only change when nodeInThePast is in the past of a block bluest parent")
}
tip = addNode(setFromSlice(tip), zeroTime)
if compareBits(tip.bits, nodeInThePast.bits) >= 0 {
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the block rate, so the difficulty should increase as well")
}
expectedBits := uint32(0x207ff395)
if tip.bits != expectedBits {
t.Errorf("tip.bits was expected to be %x but got %x", expectedBits, tip.bits)
}
// Increase block rate to increase difficulty
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
tip = addNode(setFromSlice(tip), tip.PastMedianTime(dag))
if compareBits(tip.bits, tip.parents.bluest().bits) > 0 {
t.Fatalf("Because we're increasing the block rate, the difficulty can't decrease")
}
}
// Add blocks until difficulty stabilizes
lastBits := tip.bits
sameBitsCount := uint64(0)
for sameBitsCount < dag.difficultyAdjustmentWindowSize+1 {
tip = addNode(setFromSlice(tip), zeroTime)
if tip.bits == lastBits {
sameBitsCount++
} else {
lastBits = tip.bits
sameBitsCount = 0
}
}
slowNode := addNode(setFromSlice(tip), time.Unix(tip.timestamp+2, 0))
if slowNode.bits != tip.bits {
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
}
tip = slowNode
tip = addNode(setFromSlice(tip), zeroTime)
if tip.bits != slowNode.bits {
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
}
tip = addNode(setFromSlice(tip), zeroTime)
if compareBits(tip.bits, slowNode.bits) <= 0 {
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block rate, so the difficulty should decrease as well")
}
splitNode := addNode(setFromSlice(tip), zeroTime)
tip = splitNode
for i := 0; i < 100; i++ {
tip = addNode(setFromSlice(tip), zeroTime)
}
blueTip := tip
redChainTip := splitNode
for i := 0; i < 10; i++ {
redChainTip = addNode(setFromSlice(redChainTip), redChainTip.PastMedianTime(dag))
}
tipWithRedPast := addNode(setFromSlice(redChainTip, blueTip), zeroTime)
tipWithoutRedPast := addNode(setFromSlice(blueTip), zeroTime)
if tipWithoutRedPast.bits != tipWithRedPast.bits {
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks shouldn't affect the difficulty")
}
}
func compareBits(a uint32, b uint32) int {
aTarget := util.CompactToBig(a)
bTarget := util.CompactToBig(b)
return aTarget.Cmp(bTarget)
}

View File

@@ -37,9 +37,9 @@ const (
// exists.
ErrDuplicateBlock ErrorCode = iota
// ErrBlockTooBig indicates the serialized block size exceeds the
// maximum allowed size.
ErrBlockTooBig
// ErrBlockMassTooHigh indicates the mass of a block exceeds the maximum
// allowed limits.
ErrBlockMassTooHigh
// ErrBlockVersionTooOld indicates the block version is too old and is
// no longer accepted since the majority of the network has upgraded
@@ -84,17 +84,17 @@ const (
// the expected value.
ErrBadMerkleRoot
// ErrBadUTXOCommitment indicates the calculated UTXO commitment does not match
// the expected value.
ErrBadUTXOCommitment
// ErrBadCheckpoint indicates a block that is expected to be at a
// checkpoint height does not match the expected one.
ErrBadCheckpoint
// ErrForkTooOld indicates a block is attempting to fork the block chain
// before the most recent checkpoint.
ErrForkTooOld
// ErrCheckpointTimeTooOld indicates a block has a timestamp before the
// most recent checkpoint.
ErrCheckpointTimeTooOld
// ErrFinalityPointTimeTooOld indicates a block has a timestamp before the
// last finality point.
ErrFinalityPointTimeTooOld
// ErrNoTransactions indicates the block does not have a least one
// transaction. A valid block must have at least the coinbase
@@ -105,9 +105,9 @@ const (
// valid transaction must have at least one input.
ErrNoTxInputs
// ErrTxTooBig indicates a transaction exceeds the maximum allowed size
// when serialized.
ErrTxTooBig
// ErrTxMassTooHigh indicates the mass of a transaction exceeds the maximum
// allowed limits.
ErrTxMassTooHigh
// ErrBadTxOutValue indicates an output value for a transaction is
// invalid in some way such as being out of range.
@@ -141,7 +141,7 @@ const (
ErrOverwriteTx
// ErrImmatureSpend indicates a transaction is attempting to spend a
// block reward that has not yet reached the required maturity.
// coinbase that has not yet reached the required maturity.
ErrImmatureSpend
// ErrSpendTooHigh indicates a transaction is attempting to spend more
@@ -164,34 +164,12 @@ const (
// coinbase transaction.
ErrMultipleCoinbases
// ErrBadCoinbaseScriptLen indicates the length of the signature script
// for a coinbase transaction is not within the valid range.
ErrBadCoinbaseScriptLen
// ErrBadCoinbasePayloadLen indicates the length of the payload
// for a coinbase transaction is too high.
ErrBadCoinbasePayloadLen
// ErrBadCoinbaseValue indicates the amount of a coinbase value does
// not match the expected value of the subsidy plus the sum of all fees.
ErrBadCoinbaseValue
// ErrMissingCoinbaseHeight indicates the coinbase transaction for a
// block does not start with the serialized block block height as
// required for version 2 and higher blocks.
ErrMissingCoinbaseHeight
// ErrBadCoinbaseHeight indicates the serialized block height in the
// coinbase transaction for version 2 and higher blocks does not match
// the expected value.
ErrBadCoinbaseHeight
// ErrSecondTxNotFeeTransaction indicates the second transaction in
// a block is not a fee transaction.
ErrSecondTxNotFeeTransaction
// ErrBadFeeTransaction indicates that the block's fee transaction is not build as expected
ErrBadFeeTransaction
// ErrMultipleFeeTransactions indicates a block contains more than one
// fee transaction.
ErrMultipleFeeTransactions
// ErrBadCoinbaseTransaction indicates that the block's coinbase transaction is not build as expected
ErrBadCoinbaseTransaction
// ErrScriptMalformed indicates a transaction script is malformed in
// some way. For example, it might be longer than the maximum allowed
@@ -240,12 +218,16 @@ const (
// ErrSubnetwork indicates that a block doesn't adhere to the subnetwork
// registry rules
ErrSubnetworkRegistry
// ErrInvalidParentsRelation indicates that one of the parents of a block
// is also an ancestor of another parent
ErrInvalidParentsRelation
)
// Map of ErrorCode values back to their constant names for pretty printing.
var errorCodeStrings = map[ErrorCode]string{
ErrDuplicateBlock: "ErrDuplicateBlock",
ErrBlockTooBig: "ErrBlockTooBig",
ErrBlockMassTooHigh: "ErrBlockMassTooHigh",
ErrBlockVersionTooOld: "ErrBlockVersionTooOld",
ErrInvalidTime: "ErrInvalidTime",
ErrTimeTooOld: "ErrTimeTooOld",
@@ -257,11 +239,10 @@ var errorCodeStrings = map[ErrorCode]string{
ErrHighHash: "ErrHighHash",
ErrBadMerkleRoot: "ErrBadMerkleRoot",
ErrBadCheckpoint: "ErrBadCheckpoint",
ErrForkTooOld: "ErrForkTooOld",
ErrCheckpointTimeTooOld: "ErrCheckpointTimeTooOld",
ErrFinalityPointTimeTooOld: "ErrFinalityPointTimeTooOld",
ErrNoTransactions: "ErrNoTransactions",
ErrNoTxInputs: "ErrNoTxInputs",
ErrTxTooBig: "ErrTxTooBig",
ErrTxMassTooHigh: "ErrTxMassTooHigh",
ErrBadTxOutValue: "ErrBadTxOutValue",
ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
ErrBadTxInput: "ErrBadTxInput",
@@ -275,13 +256,8 @@ var errorCodeStrings = map[ErrorCode]string{
ErrTooManySigOps: "ErrTooManySigOps",
ErrFirstTxNotCoinbase: "ErrFirstTxNotCoinbase",
ErrMultipleCoinbases: "ErrMultipleCoinbases",
ErrBadCoinbaseScriptLen: "ErrBadCoinbaseScriptLen",
ErrBadCoinbaseValue: "ErrBadCoinbaseValue",
ErrMissingCoinbaseHeight: "ErrMissingCoinbaseHeight",
ErrBadCoinbaseHeight: "ErrBadCoinbaseHeight",
ErrSecondTxNotFeeTransaction: "ErrSecondTxNotFeeTransaction",
ErrBadFeeTransaction: "ErrBadFeeTransaction",
ErrMultipleFeeTransactions: "ErrMultipleFeeTransactions",
ErrBadCoinbasePayloadLen: "ErrBadCoinbasePayloadLen",
ErrBadCoinbaseTransaction: "ErrBadCoinbaseTransaction",
ErrScriptMalformed: "ErrScriptMalformed",
ErrScriptValidation: "ErrScriptValidation",
ErrParentBlockUnknown: "ErrParentBlockUnknown",
@@ -293,6 +269,7 @@ var errorCodeStrings = map[ErrorCode]string{
ErrInvalidGas: "ErrInvalidGas",
ErrInvalidPayload: "ErrInvalidPayload",
ErrInvalidPayloadHash: "ErrInvalidPayloadHash",
ErrInvalidParentsRelation: "ErrInvalidParentsRelation",
}
// String returns the ErrorCode as a human-readable name.

View File

@@ -16,7 +16,7 @@ func TestErrorCodeStringer(t *testing.T) {
want string
}{
{ErrDuplicateBlock, "ErrDuplicateBlock"},
{ErrBlockTooBig, "ErrBlockTooBig"},
{ErrBlockMassTooHigh, "ErrBlockMassTooHigh"},
{ErrBlockVersionTooOld, "ErrBlockVersionTooOld"},
{ErrInvalidTime, "ErrInvalidTime"},
{ErrTimeTooOld, "ErrTimeTooOld"},
@@ -28,11 +28,10 @@ func TestErrorCodeStringer(t *testing.T) {
{ErrHighHash, "ErrHighHash"},
{ErrBadMerkleRoot, "ErrBadMerkleRoot"},
{ErrBadCheckpoint, "ErrBadCheckpoint"},
{ErrForkTooOld, "ErrForkTooOld"},
{ErrCheckpointTimeTooOld, "ErrCheckpointTimeTooOld"},
{ErrFinalityPointTimeTooOld, "ErrFinalityPointTimeTooOld"},
{ErrNoTransactions, "ErrNoTransactions"},
{ErrNoTxInputs, "ErrNoTxInputs"},
{ErrTxTooBig, "ErrTxTooBig"},
{ErrTxMassTooHigh, "ErrTxMassTooHigh"},
{ErrBadTxOutValue, "ErrBadTxOutValue"},
{ErrDuplicateTxInputs, "ErrDuplicateTxInputs"},
{ErrBadTxInput, "ErrBadTxInput"},
@@ -47,13 +46,8 @@ func TestErrorCodeStringer(t *testing.T) {
{ErrTooManySigOps, "ErrTooManySigOps"},
{ErrFirstTxNotCoinbase, "ErrFirstTxNotCoinbase"},
{ErrMultipleCoinbases, "ErrMultipleCoinbases"},
{ErrBadCoinbaseScriptLen, "ErrBadCoinbaseScriptLen"},
{ErrBadCoinbaseValue, "ErrBadCoinbaseValue"},
{ErrMissingCoinbaseHeight, "ErrMissingCoinbaseHeight"},
{ErrBadCoinbaseHeight, "ErrBadCoinbaseHeight"},
{ErrSecondTxNotFeeTransaction, "ErrSecondTxNotFeeTransaction"},
{ErrBadFeeTransaction, "ErrBadFeeTransaction"},
{ErrMultipleFeeTransactions, "ErrMultipleFeeTransactions"},
{ErrBadCoinbasePayloadLen, "ErrBadCoinbasePayloadLen"},
{ErrBadCoinbaseTransaction, "ErrBadCoinbaseTransaction"},
{ErrScriptMalformed, "ErrScriptMalformed"},
{ErrScriptValidation, "ErrScriptValidation"},
{ErrParentBlockUnknown, "ErrParentBlockUnknown"},
@@ -65,6 +59,7 @@ func TestErrorCodeStringer(t *testing.T) {
{ErrInvalidGas, "ErrInvalidGas"},
{ErrInvalidPayload, "ErrInvalidPayload"},
{ErrInvalidPayloadHash, "ErrInvalidPayloadHash"},
{ErrInvalidParentsRelation, "ErrInvalidParentsRelation"},
{0xffff, "Unknown ErrorCode (65535)"},
}

View File

@@ -1,71 +0,0 @@
// Copyright (c) 2014-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag_test
import (
"fmt"
"os"
"path/filepath"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
_ "github.com/daglabs/btcd/database/ffldb"
"github.com/daglabs/btcd/util"
)
// This example demonstrates how to create a new chain instance and use
// ProcessBlock to attempt to add a block to the chain. As the package
// overview documentation describes, this includes all of the Bitcoin consensus
// rules. This example intentionally attempts to insert a duplicate genesis
// block to illustrate how an invalid block is handled.
func ExampleBlockDAG_ProcessBlock() {
// Create a new database to store the accepted blocks into. Typically
// this would be opening an existing database and would not be deleting
// and creating a new database like this, but it is done here so this is
// a complete working example and does not leave temporary files laying
// around.
dbPath := filepath.Join(os.TempDir(), "exampleprocessblock")
_ = os.RemoveAll(dbPath)
db, err := database.Create("ffldb", dbPath, dagconfig.MainNetParams.Net)
if err != nil {
fmt.Printf("Failed to create database: %v\n", err)
return
}
defer os.RemoveAll(dbPath)
defer db.Close()
// Create a new BlockDAG instance using the underlying database for
// the main bitcoin network. This example does not demonstrate some
// of the other available configuration options such as specifying a
// notification callback and signature cache. Also, the caller would
// ordinarily keep a reference to the median time source and add time
// values obtained from other peers on the network so the local time is
// adjusted to be in agreement with other peers.
chain, err := blockdag.New(&blockdag.Config{
DB: db,
DAGParams: &dagconfig.MainNetParams,
TimeSource: blockdag.NewMedianTime(),
})
if err != nil {
fmt.Printf("Failed to create chain instance: %v\n", err)
return
}
// Process a block. For this example, we are going to intentionally
// cause an error by trying to process the genesis block which already
// exists.
genesisBlock := util.NewBlock(dagconfig.MainNetParams.GenesisBlock)
isOrphan, err := chain.ProcessBlock(genesisBlock,
blockdag.BFNone)
if err != nil {
fmt.Printf("Failed to process block: %v\n", err)
return
}
fmt.Printf("Block accepted. Is it an orphan?: %v", isOrphan)
// Output:
// Failed to process block: already have block 6477863f190fac902e556da4671c7537da4fe367022b1f00fa5270e0d073cc08
}

View File

@@ -2,30 +2,32 @@ package blockdag_test
import (
"fmt"
"github.com/pkg/errors"
"math"
"testing"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/testtools"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/mining"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/wire"
)
// TestFinality checks that the finality mechanism works as expected.
// This is how the flow goes:
// 1) We build a chain of blockdag.FinalityInterval blocks and call its tip altChainTip.
// 2) We build another chain (let's call it mainChain) of 2 * blockdag.FinalityInterval
// 1) We build a chain of params.FinalityInterval blocks and call its tip altChainTip.
// 2) We build another chain (let's call it mainChain) of 2 * params.FinalityInterval
// blocks, which points to genesis, and then we check that the block in that
// chain with height of blockdag.FinalityInterval is marked as finality point (This is
// chain with height of params.FinalityInterval is marked as finality point (This is
// very predictable, because the blue score of each new block in a chain is the
// parents plus one).
// 3) We make a new child to block with height (2 * blockdag.FinalityInterval - 1)
// 3) We make a new child to block with height (2 * params.FinalityInterval - 1)
// in mainChain, and we check that connecting it to the DAG
// doesn't affect the last finality point.
// 4) We make a block that points to genesis, and check that it
@@ -37,6 +39,7 @@ import (
func TestFinality(t *testing.T) {
params := dagconfig.SimNetParams
params.K = 1
params.FinalityInterval = 100
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
DAGParams: &params,
})
@@ -45,18 +48,22 @@ func TestFinality(t *testing.T) {
}
defer teardownFunc()
buildNodeToDag := func(parentHashes []*daghash.Hash) (*util.Block, error) {
msgBlock, err := mining.PrepareBlockForTest(dag, &params, parentHashes, nil, false, 1)
msgBlock, err := mining.PrepareBlockForTest(dag, &params, parentHashes, nil, false)
if err != nil {
return nil, err
}
block := util.NewBlock(msgBlock)
isOrphan, err := dag.ProcessBlock(block, blockdag.BFNoPoWCheck)
isOrphan, delay, err := dag.ProcessBlock(block, blockdag.BFNoPoWCheck)
if err != nil {
return nil, err
}
if delay != 0 {
return nil, errors.Errorf("ProcessBlock: block " +
"is too far in the future")
}
if isOrphan {
return nil, fmt.Errorf("ProcessBlock: unexpected returned orphan block")
return nil, errors.Errorf("ProcessBlock: unexpected returned orphan block")
}
return block, nil
@@ -65,8 +72,8 @@ func TestFinality(t *testing.T) {
genesis := util.NewBlock(params.GenesisBlock)
currentNode := genesis
// First we build a chain of blockdag.FinalityInterval blocks for future use
for i := 0; i < blockdag.FinalityInterval; i++ {
// First we build a chain of params.FinalityInterval blocks for future use
for i := 0; i < params.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -75,10 +82,10 @@ func TestFinality(t *testing.T) {
altChainTip := currentNode
// Now we build a new chain of 2 * blockdag.FinalityInterval blocks, pointed to genesis, and
// we expect the block with height 1 * blockdag.FinalityInterval to be the last finality point
// Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and
// we expect the block with height 1 * params.FinalityInterval to be the last finality point
currentNode = genesis
for i := 0; i < blockdag.FinalityInterval; i++ {
for i := 0; i < params.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -87,7 +94,7 @@ func TestFinality(t *testing.T) {
expectedFinalityPoint := currentNode
for i := 0; i < blockdag.FinalityInterval; i++ {
for i := 0; i < params.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -111,7 +118,22 @@ func TestFinality(t *testing.T) {
// Here we check that a block with lower blue score than the last finality
// point will get rejected
_, err = buildNodeToDag([]*daghash.Hash{genesis.Hash()})
fakeCoinbaseTx, err := dag.NextBlockCoinbaseTransaction(nil, nil)
if err != nil {
t.Errorf("NextBlockCoinbaseTransaction: %s", err)
}
merkleRoot := blockdag.BuildHashMerkleTreeStore([]*util.Tx{fakeCoinbaseTx}).Root()
beforeFinalityBlock := wire.NewMsgBlock(&wire.BlockHeader{
Version: 0x10000000,
ParentHashes: []*daghash.Hash{genesis.Hash()},
HashMerkleRoot: merkleRoot,
AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash,
Timestamp: dag.SelectedTipHeader().Timestamp,
Bits: genesis.MsgBlock().Header.Bits,
})
beforeFinalityBlock.AddTransaction(fakeCoinbaseTx.MsgTx())
_, _, err = dag.ProcessBlock(util.NewBlock(beforeFinalityBlock), blockdag.BFNoPoWCheck)
if err == nil {
t.Errorf("TestFinality: buildNodeToDag expected an error but got <nil>")
}
@@ -121,7 +143,7 @@ func TestFinality(t *testing.T) {
t.Errorf("TestFinality: buildNodeToDag expected an error with code %v but instead got %v", blockdag.ErrFinality, rErr.ErrorCode)
}
} else {
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", rErr)
t.Errorf("TestFinality: buildNodeToDag got unexpected error: %v", err)
}
// Here we check that a block that doesn't have the last finality point in
@@ -140,11 +162,22 @@ func TestFinality(t *testing.T) {
}
}
// TestFinalityInterval tests that the finality interval is
// smaller then wire.MaxInvPerMsg, so when a peer receives
// a getblocks message it should always be able to send
// all the necessary invs.
func TestFinalityInterval(t *testing.T) {
params := dagconfig.SimNetParams
if params.FinalityInterval > wire.MaxInvPerMsg {
t.Errorf("dagconfig.SimNetParams.FinalityInterval should be lower or equal to wire.MaxInvPerMsg")
}
}
// TestSubnetworkRegistry tests the full subnetwork registry flow
func TestSubnetworkRegistry(t *testing.T) {
params := dagconfig.SimNetParams
params.K = 1
params.BlockRewardMaturity = 1
params.BlockCoinbaseMaturity = 0
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
DAGParams: &params,
})
@@ -169,7 +202,7 @@ func TestSubnetworkRegistry(t *testing.T) {
func TestChainedTransactions(t *testing.T) {
params := dagconfig.SimNetParams
params.BlockRewardMaturity = 1
params.BlockCoinbaseMaturity = 0
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", blockdag.Config{
DAGParams: &params,
@@ -179,48 +212,61 @@ func TestChainedTransactions(t *testing.T) {
}
defer teardownFunc()
block1, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{params.GenesisHash}, nil, false, 1)
block1, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{params.GenesisHash}, nil, false)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
isOrphan, err := dag.ProcessBlock(util.NewBlock(block1), blockdag.BFNoPoWCheck)
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(block1), blockdag.BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock: %v", err)
}
if delay != 0 {
t.Fatalf("ProcessBlock: block1 " +
"is too far in the future")
}
if isOrphan {
t.Fatalf("ProcessBlock: block1 got unexpectedly orphaned")
}
cbTx := block1.Transactions[0]
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
if err != nil {
t.Fatalf("Failed to build signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutPoint: wire.OutPoint{TxID: cbTx.TxID(), Index: 0},
SignatureScript: nil,
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
SignatureScript: signatureScript,
Sequence: wire.MaxTxInSequenceNum,
}
txOut := &wire.TxOut{
PkScript: blockdag.OpTrueScript,
Value: uint64(1),
ScriptPubKey: blockdag.OpTrueScript,
Value: uint64(1),
}
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
chainedTxIn := &wire.TxIn{
PreviousOutPoint: wire.OutPoint{TxID: tx.TxID(), Index: 0},
SignatureScript: nil,
PreviousOutpoint: wire.Outpoint{TxID: *tx.TxID(), Index: 0},
SignatureScript: signatureScript,
Sequence: wire.MaxTxInSequenceNum,
}
scriptPubKey, err := txscript.PayToScriptHashScript(blockdag.OpTrueScript)
if err != nil {
t.Fatalf("Failed to build public key script: %s", err)
}
chainedTxOut := &wire.TxOut{
PkScript: blockdag.OpTrueScript,
Value: uint64(1),
ScriptPubKey: scriptPubKey,
Value: uint64(1),
}
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
block2, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true, 1)
block2, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
//Checks that dag.ProcessBlock fails because we don't allow a transaction to spend another transaction from the same block
isOrphan, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
if err == nil {
t.Errorf("ProcessBlock expected an error")
} else if rErr, ok := err.(blockdag.RuleError); ok {
@@ -230,41 +276,121 @@ func TestChainedTransactions(t *testing.T) {
} else {
t.Errorf("ProcessBlock expected a blockdag.RuleError but got %v", err)
}
if delay != 0 {
t.Fatalf("ProcessBlock: block2 " +
"is too far in the future")
}
if isOrphan {
t.Errorf("ProcessBlock: block2 got unexpectedly orphaned")
}
nonChainedTxIn := &wire.TxIn{
PreviousOutPoint: wire.OutPoint{TxID: cbTx.TxID(), Index: 0},
SignatureScript: nil,
PreviousOutpoint: wire.Outpoint{TxID: *cbTx.TxID(), Index: 0},
SignatureScript: signatureScript,
Sequence: wire.MaxTxInSequenceNum,
}
nonChainedTxOut := &wire.TxOut{
PkScript: blockdag.OpTrueScript,
Value: uint64(1),
ScriptPubKey: scriptPubKey,
Value: uint64(1),
}
nonChainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{nonChainedTxIn}, []*wire.TxOut{nonChainedTxOut})
block3, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false, 1)
block3, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{nonChainedTx}, false)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
//Checks that dag.ProcessBlock doesn't fail because all of its transaction are dependant on transactions from previous blocks
isOrphan, err = dag.ProcessBlock(util.NewBlock(block3), blockdag.BFNoPoWCheck)
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(block3), blockdag.BFNoPoWCheck)
if err != nil {
t.Errorf("ProcessBlock: %v", err)
}
if delay != 0 {
t.Fatalf("ProcessBlock: block3 " +
"is too far in the future")
}
if isOrphan {
t.Errorf("ProcessBlock: block3 got unexpectedly orphaned")
}
}
// TestOrderInDiffFromAcceptanceData makes sure that the order of transactions in
// dag.diffFromAcceptanceData is such that if txA is spent by txB then txA is processed
// before txB.
func TestOrderInDiffFromAcceptanceData(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimNetParams
params.K = math.MaxUint32
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", blockdag.Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
dag.TestSetCoinbaseMaturity(0)
createBlock := func(previousBlock *util.Block) *util.Block {
// Prepare a transaction that spends the previous block's coinbase transaction
var txs []*wire.MsgTx
if !previousBlock.IsGenesis() {
previousCoinbaseTx := previousBlock.MsgBlock().Transactions[0]
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
if err != nil {
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to build signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutpoint: wire.Outpoint{TxID: *previousCoinbaseTx.TxID(), Index: 0},
SignatureScript: signatureScript,
Sequence: wire.MaxTxInSequenceNum,
}
txOut := &wire.TxOut{
ScriptPubKey: blockdag.OpTrueScript,
Value: uint64(1),
}
txs = append(txs, wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}))
}
// Create the block
msgBlock, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{previousBlock.Hash()}, txs, false)
if err != nil {
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to prepare block: %s", err)
}
// Add the block to the DAG
newBlock := util.NewBlock(msgBlock)
isOrphan, delay, err := dag.ProcessBlock(newBlock, blockdag.BFNoPoWCheck)
if err != nil {
t.Errorf("TestOrderInDiffFromAcceptanceData: %s", err)
}
if delay != 0 {
t.Fatalf("TestOrderInDiffFromAcceptanceData: block is too far in the future")
}
if isOrphan {
t.Fatalf("TestOrderInDiffFromAcceptanceData: block got unexpectedly orphaned")
}
return newBlock
}
// Create two block chains starting from the genesis block. Every time a block is added
// one of the chains is selected as the selected parent chain while all the blocks in
// the other chain (and their transactions) get accepted by the new virtual. If the
// transactions in the non-selected parent chain get processed in the wrong order then
// diffFromAcceptanceData panics.
blockAmountPerChain := 100
chainATip := util.NewBlock(params.GenesisBlock)
chainBTip := chainATip
for i := 0; i < blockAmountPerChain; i++ {
chainATip = createBlock(chainATip)
chainBTip = createBlock(chainBTip)
}
}
// TestGasLimit tests the gas limit rules
func TestGasLimit(t *testing.T) {
params := dagconfig.SimNetParams
params.K = 1
params.BlockRewardMaturity = 1
params.BlockCoinbaseMaturity = 0
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
DAGParams: &params,
})
@@ -273,56 +399,74 @@ func TestGasLimit(t *testing.T) {
}
defer teardownFunc()
// First we prepare a subnetwrok and a block with coinbase outputs to fund our tests
// First we prepare a subnetwork and a block with coinbase outputs to fund our tests
gasLimit := uint64(12345)
subnetworkID, err := testtools.RegisterSubnetworkForTest(dag, &params, gasLimit)
if err != nil {
t.Fatalf("could not register network: %s", err)
}
fundsBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), nil, false, 2)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
isOrphan, err := dag.ProcessBlock(util.NewBlock(fundsBlock), blockdag.BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock: %v", err)
}
if isOrphan {
t.Fatalf("ProcessBlock: funds block got unexpectedly orphan")
cbTxs := []*wire.MsgTx{}
for i := 0; i < 4; i++ {
fundsBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), nil, false)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(fundsBlock), blockdag.BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock: %v", err)
}
if delay != 0 {
t.Fatalf("ProcessBlock: the funds block " +
"is too far in the future")
}
if isOrphan {
t.Fatalf("ProcessBlock: fundsBlock got unexpectedly orphan")
}
cbTxs = append(cbTxs, fundsBlock.Transactions[util.CoinbaseTransactionIndex])
}
cbTxValue := fundsBlock.Transactions[0].TxOut[0].Value
cbTxID := fundsBlock.Transactions[0].TxID()
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
if err != nil {
t.Fatalf("Failed to build signature script: %s", err)
}
scriptPubKey, err := txscript.PayToScriptHashScript(blockdag.OpTrueScript)
if err != nil {
t.Fatalf("Failed to build public key script: %s", err)
}
tx1In := &wire.TxIn{
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 0),
PreviousOutpoint: *wire.NewOutpoint(cbTxs[0].TxID(), 0),
Sequence: wire.MaxTxInSequenceNum,
SignatureScript: signatureScript,
}
tx1Out := &wire.TxOut{
Value: cbTxValue,
PkScript: blockdag.OpTrueScript,
Value: cbTxs[0].TxOut[0].Value,
ScriptPubKey: scriptPubKey,
}
tx1 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx1In}, []*wire.TxOut{tx1Out}, subnetworkID, 10000, []byte{})
tx2In := &wire.TxIn{
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 1),
PreviousOutpoint: *wire.NewOutpoint(cbTxs[1].TxID(), 0),
Sequence: wire.MaxTxInSequenceNum,
SignatureScript: signatureScript,
}
tx2Out := &wire.TxOut{
Value: cbTxValue,
PkScript: blockdag.OpTrueScript,
Value: cbTxs[1].TxOut[0].Value,
ScriptPubKey: scriptPubKey,
}
tx2 := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{tx2In}, []*wire.TxOut{tx2Out}, subnetworkID, 10000, []byte{})
// Here we check that we can't process a block that has transactions that exceed the gas limit
overLimitBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true, 1)
overLimitBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1, tx2}, true)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
isOrphan, err = dag.ProcessBlock(util.NewBlock(overLimitBlock), blockdag.BFNoPoWCheck)
isOrphan, delay, err := dag.ProcessBlock(util.NewBlock(overLimitBlock), blockdag.BFNoPoWCheck)
if err == nil {
t.Fatalf("ProcessBlock expected to have an error")
t.Fatalf("ProcessBlock expected to have an error in block that exceeds gas limit")
}
rErr, ok := err.(blockdag.RuleError)
if !ok {
@@ -330,27 +474,32 @@ func TestGasLimit(t *testing.T) {
} else if rErr.ErrorCode != blockdag.ErrInvalidGas {
t.Fatalf("ProcessBlock expected error code %s but got %s", blockdag.ErrInvalidGas, rErr.ErrorCode)
}
if delay != 0 {
t.Fatalf("ProcessBlock: overLimitBlock " +
"is too far in the future")
}
if isOrphan {
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
}
overflowGasTxIn := &wire.TxIn{
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 1),
PreviousOutpoint: *wire.NewOutpoint(cbTxs[2].TxID(), 0),
Sequence: wire.MaxTxInSequenceNum,
SignatureScript: signatureScript,
}
overflowGasTxOut := &wire.TxOut{
Value: cbTxValue,
PkScript: blockdag.OpTrueScript,
Value: cbTxs[2].TxOut[0].Value,
ScriptPubKey: scriptPubKey,
}
overflowGasTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{overflowGasTxIn}, []*wire.TxOut{overflowGasTxOut},
subnetworkID, math.MaxUint64, []byte{})
// Here we check that we can't process a block that its transactions' gas overflows uint64
overflowGasBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true, 1)
overflowGasBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1, overflowGasTx}, true)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
isOrphan, err = dag.ProcessBlock(util.NewBlock(overflowGasBlock), blockdag.BFNoPoWCheck)
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(overflowGasBlock), blockdag.BFNoPoWCheck)
if err == nil {
t.Fatalf("ProcessBlock expected to have an error")
}
@@ -366,37 +515,43 @@ func TestGasLimit(t *testing.T) {
nonExistentSubnetwork := &subnetworkid.SubnetworkID{123}
nonExistentSubnetworkTxIn := &wire.TxIn{
PreviousOutPoint: *wire.NewOutPoint(&cbTxID, 0),
PreviousOutpoint: *wire.NewOutpoint(cbTxs[3].TxID(), 0),
Sequence: wire.MaxTxInSequenceNum,
SignatureScript: signatureScript,
}
nonExistentSubnetworkTxOut := &wire.TxOut{
Value: cbTxValue,
PkScript: blockdag.OpTrueScript,
Value: cbTxs[3].TxOut[0].Value,
ScriptPubKey: scriptPubKey,
}
nonExistentSubnetworkTx := wire.NewSubnetworkMsgTx(wire.TxVersion, []*wire.TxIn{nonExistentSubnetworkTxIn},
[]*wire.TxOut{nonExistentSubnetworkTxOut}, nonExistentSubnetwork, 1, []byte{})
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true, 1)
nonExistentSubnetworkBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{nonExistentSubnetworkTx, overflowGasTx}, true)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Here we check that we can't process a block with a transaction from a non-existent subnetwork
isOrphan, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
expectedErrStr := fmt.Sprintf("subnetwork '%s' not found", nonExistentSubnetwork)
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
expectedErrStr := fmt.Sprintf("Error getting gas limit for subnetworkID '%s': subnetwork '%s' not found",
nonExistentSubnetwork, nonExistentSubnetwork)
if err.Error() != expectedErrStr {
t.Fatalf("ProcessBlock expected error %v but got %v", expectedErrStr, err)
t.Fatalf("ProcessBlock expected error \"%v\" but got \"%v\"", expectedErrStr, err)
}
// Here we check that we can process a block with a transaction that doesn't exceed the gas limit
validBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1}, true, 1)
validBlock, err := mining.PrepareBlockForTest(dag, &params, dag.TipHashes(), []*wire.MsgTx{tx1}, true)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
isOrphan, err = dag.ProcessBlock(util.NewBlock(validBlock), blockdag.BFNoPoWCheck)
isOrphan, delay, err = dag.ProcessBlock(util.NewBlock(validBlock), blockdag.BFNoPoWCheck)
if err != nil {
t.Fatalf("ProcessBlock: %v", err)
}
if delay != 0 {
t.Fatalf("ProcessBlock: overLimitBlock " +
"is too far in the future")
}
if isOrphan {
t.Fatalf("ProcessBlock: overLimitBlock got unexpectedly orphan")
}

View File

@@ -1,219 +0,0 @@
package blockdag
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/txsort"
"github.com/daglabs/btcd/wire"
)
// compactFeeData is a specialized data type to store a compact list of fees
// inside a block.
// Every transaction gets a single uint64 value, stored as a plain binary list.
// The transactions are ordered the same way they are ordered inside the block, making it easy
// to traverse every transaction in a block and extract its fee.
//
// compactFeeFactory is used to create such a list.
// compactFeeIterator is used to iterate over such a list.
type compactFeeData []byte
func (cfd compactFeeData) Len() int {
return len(cfd) / 8
}
type compactFeeFactory struct {
buffer *bytes.Buffer
writer *bufio.Writer
}
func newCompactFeeFactory() *compactFeeFactory {
buffer := bytes.NewBuffer([]byte{})
return &compactFeeFactory{
buffer: buffer,
writer: bufio.NewWriter(buffer),
}
}
func (cfw *compactFeeFactory) add(txFee uint64) error {
return binary.Write(cfw.writer, binary.LittleEndian, txFee)
}
func (cfw *compactFeeFactory) data() (compactFeeData, error) {
err := cfw.writer.Flush()
return compactFeeData(cfw.buffer.Bytes()), err
}
type compactFeeIterator struct {
reader io.Reader
}
func (cfd compactFeeData) iterator() *compactFeeIterator {
return &compactFeeIterator{
reader: bufio.NewReader(bytes.NewBuffer(cfd)),
}
}
func (cfr *compactFeeIterator) next() (uint64, error) {
var txFee uint64
err := binary.Read(cfr.reader, binary.LittleEndian, &txFee)
return txFee, err
}
// The following functions relate to storing and retrieving fee data from the database
var feeBucket = []byte("fees")
// getBluesFeeData returns the compactFeeData for all nodes's blues,
// used to calculate the fees this blockNode needs to pay
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
bluesFeeData := make(map[daghash.Hash]compactFeeData)
dag.db.View(func(dbTx database.Tx) error {
for _, blueBlock := range node.blues {
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
if err != nil {
return fmt.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
}
bluesFeeData[*blueBlock.hash] = feeData
}
return nil
})
return bluesFeeData, nil
}
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
if err != nil {
return fmt.Errorf("Error creating or retrieving fee bucket: %s", err)
}
return feeBucket.Put(blockHash.CloneBytes(), feeData)
}
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
feeBucket := dbTx.Metadata().Bucket(feeBucket)
if feeBucket == nil {
return nil, errors.New("Fee bucket does not exist")
}
feeData := feeBucket.Get(blockHash.CloneBytes())
if feeData == nil {
return nil, fmt.Errorf("No fee data found for block %s", blockHash)
}
return feeData, nil
}
// The following functions deal with building and validating the fee transaction
func (node *blockNode) validateFeeTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {
if node.isGenesis() {
return nil
}
expectedFeeTransaction, err := node.buildFeeTransaction(dag, txsAcceptanceData)
if err != nil {
return err
}
if !expectedFeeTransaction.TxHash().IsEqual(block.FeeTransaction().Hash()) {
return ruleError(ErrBadFeeTransaction, "Fee transaction is not built as expected")
}
return nil
}
// buildFeeTransaction returns the expected fee transaction for the current block
func (node *blockNode) buildFeeTransaction(dag *BlockDAG, txsAcceptanceData MultiBlockTxsAcceptanceData) (*wire.MsgTx, error) {
bluesFeeData, err := node.getBluesFeeData(dag)
if err != nil {
return nil, err
}
txIns := []*wire.TxIn{}
txOuts := []*wire.TxOut{}
for _, blue := range node.blues {
txIn, txOut, err := feeInputAndOutputForBlueBlock(blue, txsAcceptanceData, bluesFeeData)
if err != nil {
return nil, err
}
txIns = append(txIns, txIn)
if txOut != nil {
txOuts = append(txOuts, txOut)
}
}
feeTx := wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
return txsort.Sort(feeTx), nil
}
// feeInputAndOutputForBlueBlock calculates the input and output that should go into the fee transaction of blueBlock
// If blueBlock gets no fee - returns only txIn and nil for txOut
func feeInputAndOutputForBlueBlock(blueBlock *blockNode, txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
*wire.TxIn, *wire.TxOut, error) {
blockTxsAcceptanceData, ok := txsAcceptanceData[*blueBlock.hash]
if !ok {
return nil, nil, fmt.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
}
blockFeeData, ok := feeData[*blueBlock.hash]
if !ok {
return nil, nil, fmt.Errorf("No feeData for block %s", blueBlock.hash)
}
if len(blockTxsAcceptanceData) != blockFeeData.Len() {
return nil, nil, fmt.Errorf(
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
len(blockTxsAcceptanceData), blockFeeData.Len(), blueBlock.hash)
}
txIn := &wire.TxIn{
SignatureScript: []byte{},
PreviousOutPoint: wire.OutPoint{
TxID: daghash.TxID(*blueBlock.hash),
Index: math.MaxUint32,
},
Sequence: wire.MaxTxInSequenceNum,
}
totalFees := uint64(0)
feeIterator := blockFeeData.iterator()
for _, txAcceptanceData := range blockTxsAcceptanceData {
fee, err := feeIterator.next()
if err != nil {
return nil, nil, fmt.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
}
if txAcceptanceData.IsAccepted {
totalFees += fee
}
}
if totalFees == 0 {
return txIn, nil, nil
}
// the scriptPubKey for the fee is the same as the coinbase's first scriptPubKey
pkScript := blockTxsAcceptanceData[0].Tx.MsgTx().TxOut[0].PkScript
txOut := &wire.TxOut{
Value: totalFees,
PkScript: pkScript,
}
return txIn, txOut, nil
}

View File

@@ -7,7 +7,7 @@ package blockdag_test
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"os"
"path/filepath"
"testing"
@@ -15,11 +15,11 @@ import (
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/blockdag/fullblocktests"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/database"
_ "github.com/daglabs/btcd/database/ffldb"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
@@ -62,7 +62,7 @@ func isSupportedDbType(dbType string) bool {
// a teardown function the caller should invoke when done testing to clean up.
func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func(), error) {
if !isSupportedDbType(testDbType) {
return nil, nil, fmt.Errorf("unsupported db type %v", testDbType)
return nil, nil, errors.Errorf("unsupported db type %v", testDbType)
}
// Handle memory database specially since it doesn't need the disk
@@ -72,7 +72,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
if testDbType == "memdb" {
ndb, err := database.Create(testDbType)
if err != nil {
return nil, nil, fmt.Errorf("error creating db: %v", err)
return nil, nil, errors.Errorf("error creating db: %v", err)
}
db = ndb
@@ -85,7 +85,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
// Create the root directory for test databases.
if !fileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
err := fmt.Errorf("unable to create test db "+
err := errors.Errorf("unable to create test db "+
"root: %v", err)
return nil, nil, err
}
@@ -96,7 +96,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
_ = os.RemoveAll(dbPath)
ndb, err := database.Create(testDbType, dbPath, blockDataNet)
if err != nil {
return nil, nil, fmt.Errorf("error creating db: %v", err)
return nil, nil, errors.Errorf("error creating db: %v", err)
}
db = ndb
@@ -110,7 +110,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
}
// Copy the chain params to ensure any modifications the tests do to
// the chain parameters do not affect the global instance.
// the DAG parameters do not affect the global instance.
paramsCopy := *params
// Create the main chain instance.
@@ -123,7 +123,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
})
if err != nil {
teardown()
err := fmt.Errorf("failed to create chain instance: %v", err)
err := errors.Errorf("failed to create chain instance: %v", err)
return nil, nil, err
}
return chain, teardown, nil
@@ -156,11 +156,11 @@ func TestFullBlocks(t *testing.T) {
testAcceptedBlock := func(item fullblocktests.AcceptedBlock) {
blockHeight := item.Height
block := util.NewBlock(item.Block)
block.SetHeight(blockHeight)
block.SetChainHeight(blockHeight)
t.Logf("Testing block %s (hash %s, height %d)",
item.Name, block.Hash(), blockHeight)
isOrphan, err := dag.ProcessBlock(block,
isOrphan, delay, err := dag.ProcessBlock(block,
blockdag.BFNone)
if err != nil {
t.Fatalf("block %q (hash %s, height %d) should "+
@@ -168,6 +168,13 @@ func TestFullBlocks(t *testing.T) {
block.Hash(), blockHeight, err)
}
if delay != item.Delay {
t.Fatalf("block %q (hash %s, height %d) unexpected "+
"delay -- got %v, want %v", item.Name,
block.Hash(), blockHeight, delay,
item.Delay)
}
if isOrphan != item.IsOrphan {
t.Fatalf("block %q (hash %s, height %d) unexpected "+
"orphan flag -- got %v, want %v", item.Name,
@@ -182,11 +189,11 @@ func TestFullBlocks(t *testing.T) {
testRejectedBlock := func(item fullblocktests.RejectedBlock) {
blockHeight := item.Height
block := util.NewBlock(item.Block)
block.SetHeight(blockHeight)
block.SetChainHeight(blockHeight)
t.Logf("Testing block %s (hash %s, height %d)",
item.Name, block.Hash(), blockHeight)
_, err := dag.ProcessBlock(block, blockdag.BFNone)
_, _, err := dag.ProcessBlock(block, blockdag.BFNone)
if err == nil {
t.Fatalf("block %q (hash %s, height %d) should not "+
"have been accepted", item.Name, block.Hash(),
@@ -239,11 +246,11 @@ func TestFullBlocks(t *testing.T) {
testOrphanOrRejectedBlock := func(item fullblocktests.OrphanOrRejectedBlock) {
blockHeight := item.Height
block := util.NewBlock(item.Block)
block.SetHeight(blockHeight)
block.SetChainHeight(blockHeight)
t.Logf("Testing block %s (hash %s, height %d)",
item.Name, block.Hash(), blockHeight)
isOrphan, err := dag.ProcessBlock(block, blockdag.BFNone)
isOrphan, delay, err := dag.ProcessBlock(block, blockdag.BFNone)
if err != nil {
// Ensure the error code is of the expected type.
if _, ok := err.(blockdag.RuleError); !ok {
@@ -255,6 +262,12 @@ func TestFullBlocks(t *testing.T) {
}
}
if delay != 0 {
t.Fatalf("block %q (hash %s, height %d) "+
"is too far in the future",
item.Name, block.Hash(), blockHeight)
}
if !isOrphan {
t.Fatalf("block %q (hash %s, height %d) was accepted, "+
"but is not considered an orphan", item.Name,
@@ -267,18 +280,18 @@ func TestFullBlocks(t *testing.T) {
testExpectedTip := func(item fullblocktests.ExpectedTip) {
blockHeight := item.Height
block := util.NewBlock(item.Block)
block.SetHeight(blockHeight)
block.SetChainHeight(blockHeight)
t.Logf("Testing tip for block %s (hash %s, height %d)",
item.Name, block.Hash(), blockHeight)
// Ensure hash and height match.
if dag.HighestTipHash() != item.Block.BlockHash() ||
dag.Height() != blockHeight { //TODO: (Ori) the use of dag.Height() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
if dag.SelectedTipHash() != item.Block.BlockHash() ||
dag.ChainHeight() != blockHeight { //TODO: (Ori) the use of dag.ChainHeight() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
t.Fatalf("block %q (hash %s, height %d) should be "+
"the current tip -- got (hash %s, height %d)",
item.Name, block.Hash(), blockHeight, dag.HighestTipHash(),
dag.Height()) //TODO: (Ori) the use of dag.Height() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
item.Name, block.Hash(), blockHeight, dag.SelectedTipHash(),
dag.ChainHeight()) //TODO: (Ori) the use of dag.ChainHeight() and virtualBlock.HighestTipHash() is wrong, and was done only for compilation
}
}

View File

@@ -12,8 +12,8 @@ package fullblocktests
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"github.com/pkg/errors"
"math"
"runtime"
"time"
@@ -21,9 +21,9 @@ import (
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/btcec"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/random"
"github.com/daglabs/btcd/wire"
)
@@ -67,8 +67,9 @@ type TestInstance interface {
type AcceptedBlock struct {
Name string
Block *wire.MsgBlock
Height int32
Height uint64
IsOrphan bool
Delay time.Duration
}
// Ensure AcceptedBlock implements the TestInstance interface.
@@ -85,7 +86,7 @@ func (b AcceptedBlock) FullBlockTestInstance() {}
type RejectedBlock struct {
Name string
Block *wire.MsgBlock
Height int32
Height uint64
RejectCode blockdag.ErrorCode
}
@@ -107,7 +108,7 @@ func (b RejectedBlock) FullBlockTestInstance() {}
type OrphanOrRejectedBlock struct {
Name string
Block *wire.MsgBlock
Height int32
Height uint64
}
// Ensure ExpectedTip implements the TestInstance interface.
@@ -124,7 +125,7 @@ func (b OrphanOrRejectedBlock) FullBlockTestInstance() {}
type ExpectedTip struct {
Name string
Block *wire.MsgBlock
Height int32
Height uint64
}
// Ensure ExpectedTip implements the TestInstance interface.
@@ -141,7 +142,7 @@ func (b ExpectedTip) FullBlockTestInstance() {}
type RejectedNonCanonicalBlock struct {
Name string
RawBlock []byte
Height int32
Height uint64
}
// FullBlockTestInstance only exists to allow RejectedNonCanonicalBlock to be treated as
@@ -153,7 +154,7 @@ func (b RejectedNonCanonicalBlock) FullBlockTestInstance() {}
// spendableOut represents a transaction output that is spendable along with
// additional metadata such as the block its in and how much it pays.
type spendableOut struct {
prevOut wire.OutPoint
prevOut wire.Outpoint
amount util.Amount
}
@@ -161,8 +162,8 @@ type spendableOut struct {
// and transaction output index within the transaction.
func makeSpendableOutForTx(tx *wire.MsgTx, txOutIndex uint32) spendableOut {
return spendableOut{
prevOut: wire.OutPoint{
TxID: tx.TxID(),
prevOut: wire.Outpoint{
TxID: *tx.TxID(),
Index: txOutIndex,
},
amount: util.Amount(tx.TxOut[txOutIndex].Value),
@@ -182,10 +183,10 @@ type testGenerator struct {
params *dagconfig.Params
tip *wire.MsgBlock
tipName string
tipHeight int32
tipHeight uint64
blocks map[daghash.Hash]*wire.MsgBlock
blocksByName map[string]*wire.MsgBlock
blockHeights map[string]int32
blockHeights map[string]uint64
// Used for tracking spendable coinbase outputs.
spendableOuts []spendableOut
@@ -193,6 +194,8 @@ type testGenerator struct {
// Common key for any tests which require signed transactions.
privKey *btcec.PrivateKey
powMaxBits uint32
}
// makeTestGenerator returns a test generator instance initialized with the
@@ -205,11 +208,12 @@ func makeTestGenerator(params *dagconfig.Params) (testGenerator, error) {
params: params,
blocks: map[daghash.Hash]*wire.MsgBlock{*genesisHash: genesis},
blocksByName: map[string]*wire.MsgBlock{"genesis": genesis},
blockHeights: map[string]int32{"genesis": 0},
blockHeights: map[string]uint64{"genesis": 0},
tip: genesis,
tipName: "genesis",
tipHeight: 0,
privKey: privKey,
powMaxBits: util.BigToCompact(params.PowMax),
}, nil
}
@@ -243,8 +247,8 @@ func pushDataScript(items ...[]byte) []byte {
// standardCoinbaseScript returns a standard script suitable for use as the
// signature script of the coinbase transaction of a new block. In particular,
// it starts with the block height that is required by version 2 blocks.
func standardCoinbaseScript(blockHeight int32, extraNonce uint64) ([]byte, error) {
return txscript.NewScriptBuilder().AddInt64(int64(blockHeight)).
func standardCoinbaseScript(extraNonce uint64) ([]byte, error) {
return txscript.NewScriptBuilder().
AddInt64(int64(extraNonce)).Script()
}
@@ -273,11 +277,10 @@ func uniqueOpReturnScript() []byte {
}
// createCoinbaseTx returns a coinbase transaction paying an appropriate
// subsidy based on the passed block height. The coinbase signature script
// conforms to the requirements of version 2 blocks.
func (g *testGenerator) createCoinbaseTx(blockHeight int32) *wire.MsgTx {
// subsidy based on the passed block height.
func (g *testGenerator) createCoinbaseTx(blueScore uint64) *wire.MsgTx {
extraNonce := uint64(0)
coinbaseScript, err := standardCoinbaseScript(blockHeight, extraNonce)
coinbaseScript, err := standardCoinbaseScript(extraNonce)
if err != nil {
panic(err)
}
@@ -285,14 +288,14 @@ func (g *testGenerator) createCoinbaseTx(blockHeight int32) *wire.MsgTx {
txIn := &wire.TxIn{
// Coinbase transactions have no inputs, so previous outpoint is
// zero hash and max index.
PreviousOutPoint: *wire.NewOutPoint(&daghash.TxID{},
PreviousOutpoint: *wire.NewOutpoint(&daghash.TxID{},
wire.MaxPrevOutIndex),
Sequence: wire.MaxTxInSequenceNum,
SignatureScript: coinbaseScript,
}
txOut := &wire.TxOut{
Value: blockdag.CalcBlockSubsidy(blockHeight, g.params),
PkScript: opTrueScript,
Value: blockdag.CalcBlockSubsidy(blueScore, g.params),
ScriptPubKey: opTrueScript,
}
return wire.NewNativeMsgTx(1, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
}
@@ -407,9 +410,9 @@ func additionalSpendFee(fee util.Amount) func(*wire.MsgBlock) {
// replaceSpendScript returns a function that itself takes a block and modifies
// it by replacing the public key script of the spending transaction.
func replaceSpendScript(pkScript []byte) func(*wire.MsgBlock) {
func replaceSpendScript(scriptPubKey []byte) func(*wire.MsgBlock) {
return func(b *wire.MsgBlock) {
b.Transactions[1].TxOut[0].PkScript = pkScript
b.Transactions[1].TxOut[0].ScriptPubKey = scriptPubKey
}
}
@@ -436,7 +439,7 @@ func additionalTx(tx *wire.MsgTx) func(*wire.MsgBlock) {
// tests.
func createSpendTx(spend *spendableOut, fee util.Amount) *wire.MsgTx {
txIn := &wire.TxIn{
PreviousOutPoint: spend.prevOut,
PreviousOutpoint: spend.prevOut,
Sequence: wire.MaxTxInSequenceNum,
SignatureScript: nil,
}
@@ -512,7 +515,7 @@ func (g *testGenerator) nextBlock(blockName string, spend *spendableOut, mungers
Version: 1,
ParentHashes: []*daghash.Hash{g.tip.BlockHash()}, // TODO: (Stas) This is wrong. Modified only to satisfy compilation.
HashMerkleRoot: calcHashMerkleRoot(txns),
Bits: g.params.PowLimitBits,
Bits: g.powMaxBits,
Timestamp: ts,
Nonce: 0, // To be solved.
},
@@ -606,7 +609,8 @@ func (g *testGenerator) saveSpendableCoinbaseOuts() {
// reaching the block that has already had the coinbase outputs
// collected.
var collectBlocks []*wire.MsgBlock
for b := g.tip; b != nil; b = g.blocks[*b.Header.SelectedParentHash()] {
// TODO: (Evgeny) This is wrong. Modified only to satisfy compilation.
for b := g.tip; b != nil; b = g.blocks[*b.Header.ParentHashes[0]] {
if b.BlockHash() == g.prevCollectedHash {
break
}
@@ -680,7 +684,7 @@ func countBlockSigOps(block *wire.MsgBlock) int {
totalSigOps += numSigOps
}
for _, txOut := range tx.TxOut {
numSigOps := txscript.GetSigOpCount(txOut.PkScript)
numSigOps := txscript.GetSigOpCount(txOut.ScriptPubKey)
totalSigOps += numSigOps
}
}
@@ -773,7 +777,7 @@ func (g *testGenerator) assertTipBlockTxOutOpReturn(txIndex, txOutIndex uint32)
}
txOut := tx.TxOut[txOutIndex]
if txOut.PkScript[0] != txscript.OpReturn {
if txOut.ScriptPubKey[0] != txscript.OpReturn {
panic(fmt.Sprintf("transaction index %d output %d in block %q "+
"(height %d) is not an OP_RETURN", txIndex, txOutIndex,
g.tipName, g.tipHeight))
@@ -836,7 +840,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// block to be the current tip of the block chain.
acceptBlock := func(blockName string, block *wire.MsgBlock, isOrphan bool) TestInstance {
blockHeight := g.blockHeights[blockName]
return AcceptedBlock{blockName, block, blockHeight, isOrphan}
return AcceptedBlock{blockName, block, blockHeight, isOrphan, 0}
}
rejectBlock := func(blockName string, block *wire.MsgBlock, code blockdag.ErrorCode) TestInstance {
blockHeight := g.blockHeights[blockName]
@@ -910,9 +914,9 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// genesis -> bm0 -> bm1 -> ... -> bm99
// ---------------------------------------------------------------------
coinbaseMaturity := g.params.BlockRewardMaturity
coinbaseMaturity := g.params.BlockCoinbaseMaturity
var testInstances []TestInstance
for i := uint16(0); i < coinbaseMaturity; i++ {
for i := uint64(0); i < coinbaseMaturity; i++ {
blockName := fmt.Sprintf("bm%d", i)
g.nextBlock(blockName, nil)
g.saveTipCoinbaseOut()
@@ -923,7 +927,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// Collect spendable outputs. This simplifies the code below.
var outs []*spendableOut
for i := uint16(0); i < coinbaseMaturity; i++ {
for i := uint64(0); i < coinbaseMaturity; i++ {
op := g.oldestCoinbaseOut()
outs = append(outs, &op)
}
@@ -1001,45 +1005,6 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// Too much proof-of-work coinbase tests.
// ---------------------------------------------------------------------
// Create a block that generates too coinbase.
//
// ... -> b1(0) -> b2(1) -> b5(2) -> b6(3)
// \-> b9(4)
// \-> b3(1) -> b4(2)
g.setTip("b6")
g.nextBlock("b9", outs[4], additionalCoinbase(1))
rejected(blockdag.ErrBadCoinbaseValue)
// Create a fork that ends with block that generates too much coinbase.
//
// ... -> b1(0) -> b2(1) -> b5(2) -> b6(3)
// \-> b10(3) -> b11(4)
// \-> b3(1) -> b4(2)
g.setTip("b5")
g.nextBlock("b10", outs[3])
acceptedToSideChainWithExpectedTip("b6")
g.nextBlock("b11", outs[4], additionalCoinbase(1))
rejected(blockdag.ErrBadCoinbaseValue)
// Create a fork that ends with block that generates too much coinbase
// as before, but with a valid fork first.
//
// ... -> b1(0) -> b2(1) -> b5(2) -> b6(3)
// | \-> b12(3) -> b13(4) -> b14(5)
// | (b12 added last)
// \-> b3(1) -> b4(2)
g.setTip("b5")
b12 := g.nextBlock("b12", outs[3])
b13 := g.nextBlock("b13", outs[4])
b14 := g.nextBlock("b14", outs[5], additionalCoinbase(1))
tests = append(tests, []TestInstance{
acceptBlock("b13", b13, true),
acceptBlock("b14", b14, true),
rejectBlock("b12", b12, blockdag.ErrBadCoinbaseValue),
expectTipBlock("b13", b13),
})
// ---------------------------------------------------------------------
// Checksig signature operation count tests.
// ---------------------------------------------------------------------
@@ -1141,7 +1106,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
replaceSpendScript(sizePadScript)(b)
})
g.assertTipBlockSize(maxBlockSize + 1)
rejected(blockdag.ErrBlockTooBig)
rejected(blockdag.ErrBlockMassTooHigh)
// Parent was rejected, so this block must either be an orphan or
// outright rejected due to an invalid parent.
@@ -1161,7 +1126,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
g.setTip("b15")
tooSmallCbScript := repeatOpcode(0x00, minCoinbaseScriptLen-1)
g.nextBlock("b26", outs[6], replaceCoinbaseSigScript(tooSmallCbScript))
rejected(blockdag.ErrBadCoinbaseScriptLen)
rejected(blockdag.ErrBadCoinbasePayloadLen)
// Parent was rejected, so this block must either be an orphan or
// outright rejected due to an invalid parent.
@@ -1177,7 +1142,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
g.setTip("b15")
tooLargeCbScript := repeatOpcode(0x00, maxCoinbaseScriptLen+1)
g.nextBlock("b28", outs[6], replaceCoinbaseSigScript(tooLargeCbScript))
rejected(blockdag.ErrBadCoinbaseScriptLen)
rejected(blockdag.ErrBadCoinbasePayloadLen)
// Parent was rejected, so this block must either be an orphan or
// outright rejected due to an invalid parent.
@@ -1349,7 +1314,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
fill := maxBlockSigOps - (txnsNeeded * redeemScriptSigOps) + 1
finalTx := b.Transactions[len(b.Transactions)-1]
tx := createSpendTxForTx(finalTx, lowFee)
tx.TxOut[0].PkScript = repeatOpcode(txscript.OpCheckSig, fill)
tx.TxOut[0].ScriptPubKey = repeatOpcode(txscript.OpCheckSig, fill)
b.AddTransaction(tx)
})
rejected(blockdag.ErrTooManySigOps)
@@ -1383,7 +1348,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
}
finalTx := b.Transactions[len(b.Transactions)-1]
tx := createSpendTxForTx(finalTx, lowFee)
tx.TxOut[0].PkScript = repeatOpcode(txscript.OpCheckSig, fill)
tx.TxOut[0].ScriptPubKey = repeatOpcode(txscript.OpCheckSig, fill)
b.AddTransaction(tx)
})
accepted()
@@ -1444,7 +1409,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
b46.Header.Nonce++
blockHash := b46.BlockHash()
hashNum := daghash.HashToBig(blockHash)
if hashNum.Cmp(g.params.PowLimit) >= 0 {
if hashNum.Cmp(g.params.PowMax) >= 0 {
break
}
}
@@ -1531,8 +1496,8 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
g.nextBlock("b52", outs[14], func(b *wire.MsgBlock) {
txID := newTxIDFromStr("00000000000000000000000000000000" +
"00000000000000000123456789abcdef")
b.Transactions[1].TxIn[0].PreviousOutPoint.TxID = *txID
b.Transactions[1].TxIn[0].PreviousOutPoint.Index = 0
b.Transactions[1].TxIn[0].PreviousOutpoint.TxID = *txID
b.Transactions[1].TxIn[0].PreviousOutpoint.Index = 0
})
rejected(blockdag.ErrMissingTxOut)
@@ -1553,9 +1518,11 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// ... -> b33(9) -> b35(10) -> b39(11) -> b42(12) -> b43(13) -> b53(14)
// \-> b54(15)
g.nextBlock("b54", outs[15], func(b *wire.MsgBlock) {
medianBlock := g.blocks[*b.Header.SelectedParentHash()]
// TODO: (Evgeny) This is wrong. Modified only to satisfy compilation.
medianBlock := g.blocks[*b.Header.ParentHashes[0]]
for i := 0; i < medianTimeBlocks/2; i++ {
medianBlock = g.blocks[*medianBlock.Header.SelectedParentHash()]
// TODO: (Evgeny) This is wrong. Modified only to satisfy compilation.
medianBlock = g.blocks[*medianBlock.Header.ParentHashes[0]]
}
b.Header.Timestamp = medianBlock.Header.Timestamp
})
@@ -1567,9 +1534,11 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// ... -> b33(9) -> b35(10) -> b39(11) -> b42(12) -> b43(13) -> b53(14) -> b55(15)
g.setTip("b53")
g.nextBlock("b55", outs[15], func(b *wire.MsgBlock) {
medianBlock := g.blocks[*b.Header.SelectedParentHash()]
// TODO: (Evgeny) This is wrong. Modified only to satisfy compilation.
medianBlock := g.blocks[*b.Header.ParentHashes[0]]
for i := 0; i < medianTimeBlocks/2; i++ {
medianBlock = g.blocks[*medianBlock.Header.SelectedParentHash()]
// TODO: (Evgeny) This is wrong. Modified only to satisfy compilation.
medianBlock = g.blocks[*medianBlock.Header.ParentHashes[0]]
}
medianBlockTime := medianBlock.Header.Timestamp
b.Header.Timestamp = medianBlockTime.Add(time.Second)
@@ -1684,7 +1653,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// \-> b58(17)
g.setTip("b57")
g.nextBlock("b58", outs[17], func(b *wire.MsgBlock) {
b.Transactions[1].TxIn[0].PreviousOutPoint.Index = 42
b.Transactions[1].TxIn[0].PreviousOutpoint.Index = 42
})
rejected(blockdag.ErrMissingTxOut)
@@ -1717,7 +1686,8 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
g.nextBlock("b61", outs[18], func(b *wire.MsgBlock) {
// Duplicate the coinbase of the parent block to force the
// condition.
parent := g.blocks[*b.Header.SelectedParentHash()]
// TODO: (Evgeny) This is wrong. Modified only to satisfy compilation.
parent := g.blocks[*b.Header.ParentHashes[0]]
b.Transactions[0] = parent.Transactions[0]
})
rejected(blockdag.ErrOverwriteTx)
@@ -1842,7 +1812,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// \-> b68(20)
g.setTip("b65")
g.nextBlock("b68", outs[20], additionalCoinbase(10), additionalSpendFee(9))
rejected(blockdag.ErrBadCoinbaseValue)
rejected(blockdag.ErrBadCoinbaseTransaction)
// Create block that pays 10 extra to the coinbase and a tx that pays
// the extra 10 fee.
@@ -2059,7 +2029,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// Collect all of the spendable coinbase outputs from the previous
// collection point up to the current tip.
g.saveSpendableCoinbaseOuts()
spendableOutOffset := g.tipHeight - int32(coinbaseMaturity)
spendableOutOffset := g.tipHeight - coinbaseMaturity
// Extend the main chain by a large number of max size blocks.
//
@@ -2068,7 +2038,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
reorgSpend := *outs[spendableOutOffset]
reorgStartBlockName := g.tipName
chain1TipName := g.tipName
for i := int32(0); i < numLargeReorgBlocks; i++ {
for i := uint64(0); i < numLargeReorgBlocks; i++ {
chain1TipName = fmt.Sprintf("br%d", i)
g.nextBlock(chain1TipName, &reorgSpend, func(b *wire.MsgBlock) {
bytesToMaxSize := maxBlockSize - b.SerializeSize() - 3
@@ -2083,7 +2053,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
// Use the next available spendable output. First use up any
// remaining spendable outputs that were already popped into the
// outs slice, then just pop them from the stack.
if spendableOutOffset+1+i < int32(len(outs)) {
if spendableOutOffset+1+i < uint64(len(outs)) {
reorgSpend = *outs[spendableOutOffset+1+i]
} else {
reorgSpend = g.oldestCoinbaseOut()

View File

@@ -14,7 +14,7 @@ import (
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
@@ -77,7 +77,7 @@ var (
Transactions: []*wire.MsgTx{{
Version: 1,
TxIn: []*wire.TxIn{{
PreviousOutPoint: wire.OutPoint{
PreviousOutpoint: wire.Outpoint{
TxID: daghash.TxID{},
Index: 0xffffffff,
},
@@ -90,7 +90,7 @@ var (
}},
TxOut: []*wire.TxOut{{
Value: 0,
PkScript: fromHex("4104678afdb0fe5548271967f1" +
ScriptPubKey: fromHex("4104678afdb0fe5548271967f1" +
"a67130b7105cd6a828e03909a67962e0ea1f" +
"61deb649f6bc3f4cef38c4f35504e51ec138" +
"c4f35504e51ec112de5c384df7ba0b8d578a" +
@@ -111,22 +111,19 @@ var (
// allow them to change out from under the tests potentially invalidating them.
var regressionNetParams = &dagconfig.Params{
Name: "regtest",
Net: wire.TestNet,
Net: wire.RegTest,
DefaultPort: "18444",
// Chain parameters
GenesisBlock: &regTestGenesisBlock,
GenesisHash: newHashFromStr("5bec7567af40504e0994db3b573c186fffcc4edefe096ff2e58d00523bd7e8a6"),
PowLimit: regressionPowLimit,
PowLimitBits: 0x207fffff,
BlockRewardMaturity: 100,
SubsidyReductionInterval: 150,
TargetTimespan: time.Hour * 24 * 14, // 14 days
TargetTimePerBlock: time.Second * 10, // 10 seconds
RetargetAdjustmentFactor: 4, // 25% less, 400% more
ReduceMinDifficulty: true,
MinDiffReductionTime: time.Minute * 20, // TargetTimePerBlock * 2
GenerateSupported: true,
// DAG parameters
GenesisBlock: &regTestGenesisBlock,
GenesisHash: newHashFromStr("5bec7567af40504e0994db3b573c186fffcc4edefe096ff2e58d00523bd7e8a6"),
PowMax: regressionPowLimit,
BlockCoinbaseMaturity: 100,
SubsidyReductionInterval: 150,
TargetTimePerBlock: time.Second * 10, // 10 seconds
DifficultyAdjustmentWindowSize: 2640,
TimestampDeviationTolerance: 132,
GenerateSupported: true,
// Checkpoints ordered from oldest to newest.
Checkpoints: nil,

View File

@@ -0,0 +1,234 @@
package indexers
import (
"bytes"
"encoding/gob"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"github.com/pkg/errors"
)
const (
// acceptanceIndexName is the human-readable name for the index.
acceptanceIndexName = "acceptance index"
)
var (
// acceptanceIndexKey is the key of the acceptance index and the db bucket used
// to house it.
acceptanceIndexKey = []byte("acceptanceidx")
)
// AcceptanceIndex implements a txAcceptanceData by block hash index. That is to say,
// it stores a mapping between a block's hash and the set of transactions that the
// block accepts among its blue blocks.
type AcceptanceIndex struct {
db database.DB
dag *blockdag.BlockDAG
}
// Ensure the AcceptanceIndex type implements the Indexer interface.
var _ Indexer = (*AcceptanceIndex)(nil)
// NewAcceptanceIndex returns a new instance of an indexer that is used to create a
// mapping between block hashes and their txAcceptanceData.
//
// It implements the Indexer interface which plugs into the IndexManager that in
// turn is used by the blockdag package. This allows the index to be
// seamlessly maintained along with the DAG.
func NewAcceptanceIndex() *AcceptanceIndex {
return &AcceptanceIndex{}
}
// DropAcceptanceIndex drops the acceptance index from the provided database if it
// exists.
func DropAcceptanceIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, acceptanceIndexKey, acceptanceIndexName, interrupt)
}
// Key returns the database key to use for the index as a byte slice.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Key() []byte {
return acceptanceIndexKey
}
// Name returns the human-readable name of the index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Name() string {
return acceptanceIndexName
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the bucket for the
// acceptance index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Create(dbTx database.Tx) error {
_, err := dbTx.Metadata().CreateBucket(acceptanceIndexKey)
return err
}
// Init initializes the hash-based acceptance index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
idx.db = db
idx.dag = dag
return nil
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the DAG.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) ConnectBlock(dbTx database.Tx, _ *util.Block, blockID uint64, _ *blockdag.BlockDAG,
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
return dbPutTxsAcceptanceData(dbTx, blockID, txsAcceptanceData)
}
// TxsAcceptanceData returns the acceptance data of all the transactions that
// were accepted by the block with hash blockHash.
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
var txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData
err := idx.db.View(func(dbTx database.Tx) error {
var err error
txsAcceptanceData, err = dbFetchTxsAcceptanceDataByHash(dbTx, blockHash)
return err
})
if err != nil {
return nil, err
}
return txsAcceptanceData, nil
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
for blockID := currentBlockID + 1; blockID <= lastKnownBlockID; blockID++ {
hash, err := blockdag.DBFetchBlockHashByID(dbTx, currentBlockID)
if err != nil {
return err
}
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(hash)
if err != nil {
return err
}
err = idx.ConnectBlock(dbTx, nil, blockID, nil, txAcceptanceData, nil)
if err != nil {
return err
}
}
return nil
}
func dbPutTxsAcceptanceData(dbTx database.Tx, blockID uint64,
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(txsAcceptanceData)
if err != nil {
return err
}
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedTxsAcceptanceData)
}
func dbFetchTxsAcceptanceDataByHash(dbTx database.Tx,
hash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
blockID, err := blockdag.DBFetchBlockIDByHash(dbTx, hash)
if err != nil {
return nil, err
}
return dbFetchTxsAcceptanceDataByID(dbTx, blockID)
}
func dbFetchTxsAcceptanceDataByID(dbTx database.Tx,
blockID uint64) (blockdag.MultiBlockTxsAcceptanceData, error) {
serializedBlockID := blockdag.SerializeBlockID(blockID)
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
serializedTxsAcceptanceData := bucket.Get(serializedBlockID)
if serializedTxsAcceptanceData == nil {
return nil, errors.Errorf("no entry in the accpetance index for block id %d", blockID)
}
return deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
}
type serializableTxAcceptanceData struct {
MsgTx wire.MsgTx
IsAccepted bool
}
type serializableBlockTxsAcceptanceData struct {
BlockHash daghash.Hash
TxAcceptanceData []serializableTxAcceptanceData
}
type serializableMultiBlockTxsAcceptanceData []serializableBlockTxsAcceptanceData
func serializeMultiBlockTxsAcceptanceData(
multiBlockTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) ([]byte, error) {
// Convert MultiBlockTxsAcceptanceData to a serializable format
serializableData := make(serializableMultiBlockTxsAcceptanceData, len(multiBlockTxsAcceptanceData))
for i, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
serializableBlockData := serializableBlockTxsAcceptanceData{
BlockHash: blockTxsAcceptanceData.BlockHash,
TxAcceptanceData: make([]serializableTxAcceptanceData, len(blockTxsAcceptanceData.TxAcceptanceData)),
}
for i, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
serializableBlockData.TxAcceptanceData[i] = serializableTxAcceptanceData{
MsgTx: *txAcceptanceData.Tx.MsgTx(),
IsAccepted: txAcceptanceData.IsAccepted,
}
}
serializableData[i] = serializableBlockData
}
// Serialize
var buffer bytes.Buffer
encoder := gob.NewEncoder(&buffer)
err := encoder.Encode(serializableData)
if err != nil {
return nil, err
}
return buffer.Bytes(), nil
}
func deserializeMultiBlockTxsAcceptanceData(
serializedTxsAcceptanceData []byte) (blockdag.MultiBlockTxsAcceptanceData, error) {
// Deserialize
buffer := bytes.NewBuffer(serializedTxsAcceptanceData)
decoder := gob.NewDecoder(buffer)
var serializedData serializableMultiBlockTxsAcceptanceData
err := decoder.Decode(&serializedData)
if err != nil {
return nil, err
}
// Convert serializable format to MultiBlockTxsAcceptanceData
multiBlockTxsAcceptanceData := make(blockdag.MultiBlockTxsAcceptanceData, len(serializedData))
for i, serializableBlockData := range serializedData {
blockTxsAcceptanceData := blockdag.BlockTxsAcceptanceData{
BlockHash: serializableBlockData.BlockHash,
TxAcceptanceData: make([]blockdag.TxAcceptanceData, len(serializableBlockData.TxAcceptanceData)),
}
for i, txData := range serializableBlockData.TxAcceptanceData {
msgTx := txData.MsgTx
blockTxsAcceptanceData.TxAcceptanceData[i] = blockdag.TxAcceptanceData{
Tx: util.NewTx(&msgTx),
IsAccepted: txData.IsAccepted,
}
}
multiBlockTxsAcceptanceData[i] = blockTxsAcceptanceData
}
return multiBlockTxsAcceptanceData, nil
}

View File

@@ -0,0 +1,340 @@
package indexers
import (
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"github.com/pkg/errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"syscall"
"testing"
)
func TestAcceptanceIndexSerializationAndDeserialization(t *testing.T) {
// Create test data
hash, _ := daghash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")
txIn1 := &wire.TxIn{SignatureScript: []byte{1}, PreviousOutpoint: wire.Outpoint{Index: 1}, Sequence: 0}
txIn2 := &wire.TxIn{SignatureScript: []byte{2}, PreviousOutpoint: wire.Outpoint{Index: 2}, Sequence: 0}
txOut1 := &wire.TxOut{ScriptPubKey: []byte{1}, Value: 10}
txOut2 := &wire.TxOut{ScriptPubKey: []byte{2}, Value: 20}
blockTxsAcceptanceData := blockdag.BlockTxsAcceptanceData{
BlockHash: *hash,
TxAcceptanceData: []blockdag.TxAcceptanceData{
{
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn1}, []*wire.TxOut{txOut1})),
IsAccepted: true,
},
{
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn2}, []*wire.TxOut{txOut2})),
IsAccepted: false,
},
},
}
multiBlockTxsAcceptanceData := blockdag.MultiBlockTxsAcceptanceData{blockTxsAcceptanceData}
// Serialize
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(multiBlockTxsAcceptanceData)
if err != nil {
t.Fatalf("TestAcceptanceIndexSerializationAndDeserialization: serialization failed: %s", err)
}
// Deserialize
deserializedTxsAcceptanceData, err := deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
if err != nil {
t.Fatalf("TestAcceptanceIndexSerializationAndDeserialization: deserialization failed: %s", err)
}
// Check that they're the same
if !reflect.DeepEqual(multiBlockTxsAcceptanceData, deserializedTxsAcceptanceData) {
t.Fatalf("TestAcceptanceIndexSerializationAndDeserialization: original data and deseralize data aren't equal")
}
}
// TestAcceptanceIndexRecover tests the recoverability of the
// acceptance index.
// It does it by following these steps:
// * It creates a DAG with enabled acceptance index (let's call it dag1) and
// make it process some blocks.
// * It creates a copy of dag1 (let's call it dag2), and disables the acceptance
// index in it.
// * It processes two more blocks in both dag1 and dag2.
// * A copy of dag2 is created (let's call it dag3) with enabled
// acceptance index
// * It checks that the two missing blocks are added to dag3 acceptance index by
// comparing dag1's last block acceptance data and dag3's last block acceptance
// data.
func TestAcceptanceIndexRecover(t *testing.T) {
params := &dagconfig.SimNetParams
params.BlockCoinbaseMaturity = 0
testFiles := []string{
"blk_0_to_4.dat",
"blk_3B.dat",
}
var blocks []*util.Block
for _, file := range testFiles {
blockTmp, err := blockdag.LoadBlocks(filepath.Join("../testdata/", file))
if err != nil {
t.Fatalf("Error loading file: %v\n", err)
}
blocks = append(blocks, blockTmp...)
}
db1AcceptanceIndex := NewAcceptanceIndex()
db1IndexManager := NewManager([]Indexer{db1AcceptanceIndex})
db1Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover1")
if err != nil {
t.Fatalf("Error creating temporary directory: %s", err)
}
defer os.RemoveAll(db1Path)
db1, err := database.Create("ffldb", db1Path, params.Net)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
db1Config := blockdag.Config{
IndexManager: db1IndexManager,
DAGParams: params,
DB: db1,
}
db1DAG, teardown, err := blockdag.DAGSetup("", db1Config)
if err != nil {
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
}
if teardown != nil {
defer teardown()
}
for i := 1; i < len(blocks)-2; i++ {
isOrphan, delay, err := db1DAG.ProcessBlock(blocks[i], blockdag.BFNone)
if err != nil {
t.Fatalf("ProcessBlock fail on block %v: %v\n", i, err)
}
if delay != 0 {
t.Fatalf("ProcessBlock: block %d "+
"is too far in the future", i)
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned block %v "+
"is an orphan\n", i)
}
}
err = db1.FlushCache()
if err != nil {
t.Fatalf("Error flushing database to disk: %s", err)
}
db2Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover2")
if err != nil {
t.Fatalf("Error creating temporary directory: %s", err)
}
defer os.RemoveAll(db2Path)
err = copyDirectory(db1Path, db2Path)
if err != nil {
t.Fatalf("copyDirectory: %s", err)
}
for i := len(blocks) - 2; i < len(blocks); i++ {
isOrphan, delay, err := db1DAG.ProcessBlock(blocks[i], blockdag.BFNone)
if err != nil {
t.Fatalf("ProcessBlock fail on block %v: %v\n", i, err)
}
if delay != 0 {
t.Fatalf("ProcessBlock: block %d "+
"is too far in the future", i)
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned block %v "+
"is an orphan\n", i)
}
}
db1LastBlockAcceptanceData, err := db1AcceptanceIndex.TxsAcceptanceData(blocks[len(blocks)-1].Hash())
if err != nil {
t.Fatalf("Error fetching acceptance data: %s", err)
}
db2, err := database.Open("ffldb", db2Path, params.Net)
if err != nil {
t.Fatalf("Error opening database: %s", err)
}
db2Config := blockdag.Config{
DAGParams: params,
DB: db2,
}
db2DAG, teardown, err := blockdag.DAGSetup("", db2Config)
if err != nil {
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
}
if teardown != nil {
defer teardown()
}
for i := len(blocks) - 2; i < len(blocks); i++ {
isOrphan, delay, err := db2DAG.ProcessBlock(blocks[i], blockdag.BFNone)
if err != nil {
t.Fatalf("ProcessBlock fail on block %v: %v\n", i, err)
}
if delay != 0 {
t.Fatalf("ProcessBlock: block %d "+
"is too far in the future", i)
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned block %v "+
"is an orphan\n", i)
}
}
err = db2.FlushCache()
if err != nil {
t.Fatalf("Error flushing database to disk: %s", err)
}
db3Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover3")
if err != nil {
t.Fatalf("Error creating temporary directory: %s", err)
}
defer os.RemoveAll(db3Path)
err = copyDirectory(db2Path, db3Path)
if err != nil {
t.Fatalf("copyDirectory: %s", err)
}
db3, err := database.Open("ffldb", db3Path, params.Net)
if err != nil {
t.Fatalf("Error opening database: %s", err)
}
db3AcceptanceIndex := NewAcceptanceIndex()
db3IndexManager := NewManager([]Indexer{db3AcceptanceIndex})
db3Config := blockdag.Config{
IndexManager: db3IndexManager,
DAGParams: params,
DB: db3,
}
_, teardown, err = blockdag.DAGSetup("", db3Config)
if err != nil {
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
}
if teardown != nil {
defer teardown()
}
db3LastBlockAcceptanceData, err := db3AcceptanceIndex.TxsAcceptanceData(blocks[len(blocks)-1].Hash())
if err != nil {
t.Fatalf("Error fetching acceptance data: %s", err)
}
if !reflect.DeepEqual(db1LastBlockAcceptanceData, db3LastBlockAcceptanceData) {
t.Fatalf("recovery failed")
}
}
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
func copyDirectory(scrDir, dest string) error {
entries, err := ioutil.ReadDir(scrDir)
if err != nil {
return err
}
for _, entry := range entries {
sourcePath := filepath.Join(scrDir, entry.Name())
destPath := filepath.Join(dest, entry.Name())
fileInfo, err := os.Stat(sourcePath)
if err != nil {
return err
}
stat, ok := fileInfo.Sys().(*syscall.Stat_t)
if !ok {
return errors.Errorf("failed to get raw syscall.Stat_t data for '%s'", sourcePath)
}
switch fileInfo.Mode() & os.ModeType {
case os.ModeDir:
if err := createIfNotExists(destPath, 0755); err != nil {
return err
}
if err := copyDirectory(sourcePath, destPath); err != nil {
return err
}
case os.ModeSymlink:
if err := copySymLink(sourcePath, destPath); err != nil {
return err
}
default:
if err := copyFile(sourcePath, destPath); err != nil {
return err
}
}
if err := os.Lchown(destPath, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}
isSymlink := entry.Mode()&os.ModeSymlink != 0
if !isSymlink {
if err := os.Chmod(destPath, entry.Mode()); err != nil {
return err
}
}
}
return nil
}
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
func copyFile(srcFile, dstFile string) error {
out, err := os.Create(dstFile)
defer out.Close()
if err != nil {
return err
}
in, err := os.Open(srcFile)
defer in.Close()
if err != nil {
return err
}
_, err = io.Copy(out, in)
if err != nil {
return err
}
return nil
}
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
func createIfNotExists(dir string, perm os.FileMode) error {
if blockdag.FileExists(dir) {
return nil
}
if err := os.MkdirAll(dir, perm); err != nil {
return errors.Errorf("failed to create directory: '%s', error: '%s'", dir, err.Error())
}
return nil
}
// This function is copied and modified from this stackoverflow answer: https://stackoverflow.com/a/56314145/2413761
func copySymLink(source, dest string) error {
link, err := os.Readlink(source)
if err != nil {
return err
}
return os.Symlink(link, dest)
}

View File

@@ -5,16 +5,16 @@
package indexers
import (
"errors"
"fmt"
"github.com/pkg/errors"
"sync"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
@@ -51,9 +51,9 @@ const (
// hash.
addrKeyTypeScriptHash = 1
// Size of a transaction entry. It consists of 4 bytes block id + 4
// Size of a transaction entry. It consists of 8 bytes block id + 4
// bytes offset + 4 bytes length.
txEntrySize = 4 + 4 + 4
txEntrySize = 8 + 4 + 4
)
var (
@@ -117,11 +117,11 @@ var (
// [<block id><start offset><tx length>,...]
//
// Field Type Size
// block id uint32 4 bytes
// block id uint64 8 bytes
// start offset uint32 4 bytes
// tx length uint32 4 bytes
// -----
// Total: 12 bytes per indexed tx
// Total: 16 bytes per indexed tx
// -----------------------------------------------------------------------------
// fetchBlockHashFunc defines a callback function to use in order to convert a
@@ -130,12 +130,12 @@ type fetchBlockHashFunc func(serializedID []byte) (*daghash.Hash, error)
// serializeAddrIndexEntry serializes the provided block id and transaction
// location according to the format described in detail above.
func serializeAddrIndexEntry(blockID uint32, txLoc wire.TxLoc) []byte {
func serializeAddrIndexEntry(blockID uint64, txLoc wire.TxLoc) []byte {
// Serialize the entry.
serialized := make([]byte, 12)
byteOrder.PutUint32(serialized, blockID)
byteOrder.PutUint32(serialized[4:], uint32(txLoc.TxStart))
byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxLen))
serialized := make([]byte, 16)
byteOrder.PutUint64(serialized, blockID)
byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxStart))
byteOrder.PutUint32(serialized[12:], uint32(txLoc.TxLen))
return serialized
}
@@ -149,13 +149,13 @@ func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion,
return errDeserialize("unexpected end of data")
}
hash, err := fetchBlockHash(serialized[0:4])
hash, err := fetchBlockHash(serialized[0:8])
if err != nil {
return err
}
region.Hash = hash
region.Offset = byteOrder.Uint32(serialized[4:8])
region.Len = byteOrder.Uint32(serialized[8:12])
region.Offset = byteOrder.Uint32(serialized[8:12])
region.Len = byteOrder.Uint32(serialized[12:16])
return nil
}
@@ -170,7 +170,7 @@ func keyForLevel(addrKey [addrKeySize]byte, level uint8) [levelKeySize]byte {
// dbPutAddrIndexEntry updates the address index to include the provided entry
// according to the level-based scheme described in detail above.
func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint32, txLoc wire.TxLoc) error {
func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint64, txLoc wire.TxLoc) error {
// Start with level 0 and its initial max number of entries.
curLevel := uint8(0)
maxLevelBytes := level0MaxEntries * txEntrySize
@@ -528,12 +528,6 @@ func addrToKey(addr util.Address) ([addrKeySize]byte, error) {
result[0] = addrKeyTypeScriptHash
copy(result[1:], addr.Hash160()[:])
return result, nil
case *util.AddressPubKey:
var result [addrKeySize]byte
result[0] = addrKeyTypePubKeyHash
copy(result[1:], addr.AddressPubKeyHash().Hash160()[:])
return result, nil
}
return [addrKeySize]byte{}, errUnsupportedAddressType
@@ -591,7 +585,7 @@ func (idx *AddrIndex) NeedsInputs() bool {
// initialize for this index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Init(db database.DB) error {
func (idx *AddrIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
idx.db = db
return nil
}
@@ -626,37 +620,35 @@ func (idx *AddrIndex) Create(dbTx database.Tx) error {
// stored in the order they appear in the block.
type writeIndexData map[[addrKeySize]byte][]int
// indexPkScript extracts all standard addresses from the passed public key
// indexScriptPubKey extracts all standard addresses from the passed public key
// script and maps each of them to the associated transaction using the passed
// map.
func (idx *AddrIndex) indexPkScript(data writeIndexData, pkScript []byte, txIdx int) {
func (idx *AddrIndex) indexScriptPubKey(data writeIndexData, scriptPubKey []byte, txIdx int) {
// Nothing to index if the script is non-standard or otherwise doesn't
// contain any addresses.
_, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript,
_, addr, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
idx.dagParams)
if err != nil || len(addrs) == 0 {
if err != nil || addr == nil {
return
}
for _, addr := range addrs {
addrKey, err := addrToKey(addr)
if err != nil {
// Ignore unsupported address types.
continue
}
// Avoid inserting the transaction more than once. Since the
// transactions are indexed serially any duplicates will be
// indexed in a row, so checking the most recent entry for the
// address is enough to detect duplicates.
indexedTxns := data[addrKey]
numTxns := len(indexedTxns)
if numTxns > 0 && indexedTxns[numTxns-1] == txIdx {
continue
}
indexedTxns = append(indexedTxns, txIdx)
data[addrKey] = indexedTxns
addrKey, err := addrToKey(addr)
if err != nil {
// Ignore unsupported address types.
return
}
// Avoid inserting the transaction more than once. Since the
// transactions are indexed serially any duplicates will be
// indexed in a row, so checking the most recent entry for the
// address is enough to detect duplicates.
indexedTxns := data[addrKey]
numTxns := len(indexedTxns)
if numTxns > 0 && indexedTxns[numTxns-1] == txIdx {
return
}
indexedTxns = append(indexedTxns, txIdx)
data[addrKey] = indexedTxns
}
// indexBlock extract all of the standard addresses from all of the transactions
@@ -667,23 +659,23 @@ func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *bl
// Coinbases do not reference any inputs. Since the block is
// required to have already gone through full validation, it has
// already been proven on the first transaction in the block is
// a coinbase, and the second one is a fee transaction.
if txIdx > 1 {
// a coinbase.
if txIdx > util.CoinbaseTransactionIndex {
for _, txIn := range tx.MsgTx().TxIn {
// The UTXO should always have the input since
// the index contract requires it, however, be
// safe and simply ignore any missing entries.
entry, ok := dag.GetUTXOEntry(txIn.PreviousOutPoint)
entry, ok := dag.GetUTXOEntry(txIn.PreviousOutpoint)
if !ok {
continue
}
idx.indexPkScript(data, entry.PkScript(), txIdx)
idx.indexScriptPubKey(data, entry.ScriptPubKey(), txIdx)
}
}
for _, txOut := range tx.MsgTx().TxOut {
idx.indexPkScript(data, txOut.PkScript, txIdx)
idx.indexScriptPubKey(data, txOut.ScriptPubKey, txIdx)
}
}
}
@@ -693,7 +685,9 @@ func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *bl
// the transactions in the block involve.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData) error {
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
_ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
// The offset and length of the transactions within the serialized
// block.
txLocs, err := block.TxLoc()
@@ -701,12 +695,6 @@ func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, dag *blo
return err
}
// Get the internal block ID associated with the block.
blockID, err := dbFetchBlockIDByHash(dbTx, block.Hash())
if err != nil {
return err
}
// Build all of the address to transaction mappings in a local map.
addrsToTxns := make(writeIndexData)
idx.indexBlock(addrsToTxns, block, dag)
@@ -772,7 +760,7 @@ func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr util.Address, n
// the database transaction.
fetchBlockHash := func(id []byte) (*daghash.Hash, error) {
// Deserialize and populate the result.
return dbFetchBlockHashBySerializedID(dbTx, id)
return blockdag.DBFetchBlockHashBySerializedID(dbTx, id)
}
var err error
@@ -791,37 +779,35 @@ func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr util.Address, n
// script to the transaction.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) indexUnconfirmedAddresses(pkScript []byte, tx *util.Tx) {
func (idx *AddrIndex) indexUnconfirmedAddresses(scriptPubKey []byte, tx *util.Tx) {
// The error is ignored here since the only reason it can fail is if the
// script fails to parse and it was already validated before being
// admitted to the mempool.
_, addresses, _, _ := txscript.ExtractPkScriptAddrs(pkScript,
_, addr, _ := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
idx.dagParams)
for _, addr := range addresses {
// Ignore unsupported address types.
addrKey, err := addrToKey(addr)
if err != nil {
continue
}
// Add a mapping from the address to the transaction.
idx.unconfirmedLock.Lock()
addrIndexEntry := idx.txnsByAddr[addrKey]
if addrIndexEntry == nil {
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
idx.txnsByAddr[addrKey] = addrIndexEntry
}
addrIndexEntry[*tx.ID()] = tx
// Add a mapping from the transaction to the address.
addrsByTxEntry := idx.addrsByTx[*tx.ID()]
if addrsByTxEntry == nil {
addrsByTxEntry = make(map[[addrKeySize]byte]struct{})
idx.addrsByTx[*tx.ID()] = addrsByTxEntry
}
addrsByTxEntry[addrKey] = struct{}{}
idx.unconfirmedLock.Unlock()
// Ignore unsupported address types.
addrKey, err := addrToKey(addr)
if err != nil {
return
}
// Add a mapping from the address to the transaction.
idx.unconfirmedLock.Lock()
addrIndexEntry := idx.txnsByAddr[addrKey]
if addrIndexEntry == nil {
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
idx.txnsByAddr[addrKey] = addrIndexEntry
}
addrIndexEntry[*tx.ID()] = tx
// Add a mapping from the transaction to the address.
addrsByTxEntry := idx.addrsByTx[*tx.ID()]
if addrsByTxEntry == nil {
addrsByTxEntry = make(map[[addrKeySize]byte]struct{})
idx.addrsByTx[*tx.ID()] = addrsByTxEntry
}
addrsByTxEntry[addrKey] = struct{}{}
idx.unconfirmedLock.Unlock()
}
// AddUnconfirmedTx adds all addresses related to the transaction to the
@@ -840,19 +826,19 @@ func (idx *AddrIndex) AddUnconfirmedTx(tx *util.Tx, utxoSet blockdag.UTXOSet) {
// transaction has already been validated and thus all inputs are
// already known to exist.
for _, txIn := range tx.MsgTx().TxIn {
entry, ok := utxoSet.Get(txIn.PreviousOutPoint)
entry, ok := utxoSet.Get(txIn.PreviousOutpoint)
if !ok {
// Ignore missing entries. This should never happen
// in practice since the function comments specifically
// call out all inputs must be available.
continue
}
idx.indexUnconfirmedAddresses(entry.PkScript(), tx)
idx.indexUnconfirmedAddresses(entry.ScriptPubKey(), tx)
}
// Index addresses of all created outputs.
for _, txOut := range tx.MsgTx().TxOut {
idx.indexUnconfirmedAddresses(txOut.PkScript, tx)
idx.indexUnconfirmedAddresses(txOut.ScriptPubKey, tx)
}
}
@@ -907,6 +893,15 @@ func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
return nil
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("addrindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the addrindex with --dropaddrindex", lastKnownBlockID-currentBlockID)
}
// NewAddrIndex returns a new instance of an indexer that is used to create a
// mapping of all addresses in the blockchain to the respective transactions
// that involve them.

View File

@@ -7,6 +7,7 @@ package indexers
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"testing"
"github.com/daglabs/btcd/wire"
@@ -127,17 +128,17 @@ func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal i
if (highestLevel != 0 && numEntries == 0) ||
numEntries > maxEntries {
return fmt.Errorf("level %d has %d entries",
return errors.Errorf("level %d has %d entries",
level, numEntries)
}
} else if numEntries != maxEntries && numEntries != maxEntries/2 {
return fmt.Errorf("level %d has %d entries", level,
return errors.Errorf("level %d has %d entries", level,
numEntries)
}
maxEntries *= 2
}
if totalEntries != expectedTotal {
return fmt.Errorf("expected %d entries - got %d", expectedTotal,
return errors.Errorf("expected %d entries - got %d", expectedTotal,
totalEntries)
}
@@ -151,7 +152,7 @@ func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal i
start := i * txEntrySize
num := byteOrder.Uint32(data[start:])
if num != expectedNum {
return fmt.Errorf("level %d offset %d does "+
return errors.Errorf("level %d offset %d does "+
"not contain the expected number of "+
"%d - got %d", level, i, num,
expectedNum)
@@ -222,7 +223,7 @@ nextTest:
for i := 0; i < test.numInsert; i++ {
txLoc := wire.TxLoc{TxStart: i * 2}
err := dbPutAddrIndexEntry(populatedBucket, test.key,
uint32(i), txLoc)
uint64(i), txLoc)
if err != nil {
t.Errorf("dbPutAddrIndexEntry #%d (%s) - "+
"unexpected error: %v", testNum,

View File

@@ -1,76 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"sync"
"time"
"github.com/btcsuite/btclog"
"github.com/daglabs/btcd/util"
)
// blockProgressLogger provides periodic logging for other services in order
// to show users progress of certain "actions" involving some or all current
// blocks. Ex: syncing to best chain, indexing all blocks, etc.
type blockProgressLogger struct {
receivedLogBlocks int64
receivedLogTx int64
lastBlockLogTime time.Time
subsystemLogger btclog.Logger
progressAction string
sync.Mutex
}
// newBlockProgressLogger returns a new block progress logger.
// The progress message is templated as follows:
// {progressAction} {numProcessed} {blocks|block} in the last {timePeriod}
// ({numTxs}, height {lastBlockHeight}, {lastBlockTimeStamp})
func newBlockProgressLogger(progressMessage string, logger btclog.Logger) *blockProgressLogger {
return &blockProgressLogger{
lastBlockLogTime: time.Now(),
progressAction: progressMessage,
subsystemLogger: logger,
}
}
// LogBlockHeight logs a new block height as an information message to show
// progress to the user. In order to prevent spam, it limits logging to one
// message every 10 seconds with duration and totals included.
func (b *blockProgressLogger) LogBlockHeight(block *util.Block) {
b.Lock()
defer b.Unlock()
b.receivedLogBlocks++
b.receivedLogTx += int64(len(block.MsgBlock().Transactions))
now := time.Now()
duration := now.Sub(b.lastBlockLogTime)
if duration < time.Second*10 {
return
}
// Truncate the duration to 10s of milliseconds.
durationMillis := int64(duration / time.Millisecond)
tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)
// Log information about new block height.
blockStr := "blocks"
if b.receivedLogBlocks == 1 {
blockStr = "block"
}
txStr := "transactions"
if b.receivedLogTx == 1 {
txStr = "transaction"
}
b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, height %d, %s)",
b.progressAction, b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx,
txStr, block.Height(), block.MsgBlock().Header.Timestamp)
b.receivedLogBlocks = 0
b.receivedLogTx = 0
b.lastBlockLogTime = now
}

View File

@@ -5,13 +5,13 @@
package indexers
import (
"errors"
"github.com/pkg/errors"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/gcs"
"github.com/daglabs/btcd/util/gcs/builder"
"github.com/daglabs/btcd/wire"
@@ -84,7 +84,7 @@ var _ Indexer = (*CfIndex)(nil)
// Init initializes the hash-based cf index. This is part of the Indexer
// interface.
func (idx *CfIndex) Init(db database.DB) error {
func (idx *CfIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
idx.db = db
return nil
}
@@ -176,14 +176,19 @@ func storeFilter(dbTx database.Tx, block *util.Block, f *gcs.Filter,
if header.IsGenesis() {
prevHeader = &daghash.ZeroHash
} else {
ph := header.SelectedParentHash()
pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph)
// TODO(Evgeny): Current implementation of GCS filter inherited from chain
// (single parent) and must be ported to DAG (multiple parents)
var parentHash *daghash.Hash
if header.NumParentBlocks() != 0 {
parentHash = header.ParentHashes[0]
}
prevFilterHashBytes, err := dbFetchFilterIdxEntry(dbTx, hkey, parentHash)
if err != nil {
return err
}
// Construct the new block's filter header, and store it.
prevHeader, err = daghash.NewHash(pfh)
prevHeader, err = daghash.NewHash(prevFilterHashBytes)
if err != nil {
return err
}
@@ -199,8 +204,8 @@ func storeFilter(dbTx database.Tx, block *util.Block, f *gcs.Filter,
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the main chain. This indexer adds a hash-to-cf mapping for
// every passed block. This is part of the Indexer interface.
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *util.Block,
_ *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData) error {
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *util.Block, _ uint64,
_ *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
f, err := builder.BuildBasicFilter(block.MsgBlock())
if err != nil {
@@ -335,6 +340,15 @@ func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*daghash.Hash,
return idx.entriesByBlockHashes(cfHashKeys, filterType, blockHashes)
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *CfIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("cfindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the cfindex with --dropcfindex", lastKnownBlockID-currentBlockID)
}
// NewCfIndex returns a new instance of an indexer that is used to create a
// mapping of the hashes of all blocks in the blockchain to their respective
// committed filters.

View File

@@ -9,11 +9,10 @@ package indexers
import (
"encoding/binary"
"errors"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/pkg/errors"
)
var (
@@ -48,11 +47,20 @@ type Indexer interface {
// Init is invoked when the index manager is first initializing the
// index. This differs from the Create method in that it is called on
// every load, including the case the index was just created.
Init(db database.DB) error
Init(db database.DB, dag *blockdag.BlockDAG) error
// ConnectBlock is invoked when the index manager is notified that a new
// block has been connected to the DAG.
ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData) error
ConnectBlock(dbTx database.Tx,
block *util.Block,
blockID uint64,
dag *blockdag.BlockDAG,
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData,
virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error
}
// AssertError identifies an error that indicates an internal code consistency

View File

@@ -5,16 +5,9 @@
package indexers
import (
"github.com/btcsuite/btclog"
"github.com/daglabs/btcd/logger"
"github.com/daglabs/btcd/util/panics"
)
// log is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
var log btclog.Logger
// The default amount of logging is none.
func init() {
log, _ = logger.Get(logger.SubsystemTags.INDX)
}
var log, _ = logger.Get(logger.SubsystemTags.INDX)
var spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)

View File

@@ -8,12 +8,15 @@ import (
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
)
var (
// indexTipsBucketName is the name of the db bucket used to house the
// current tip of each index.
indexTipsBucketName = []byte("idxtips")
indexCurrentBlockIDBucketName = []byte("idxcurrentblockid")
)
// Manager defines an index manager that manages multiple optional indexes and
@@ -146,6 +149,9 @@ func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-
if err != nil {
return err
}
if _, err := meta.CreateBucketIfNotExists(indexCurrentBlockIDBucketName); err != nil {
return err
}
return m.maybeCreateIndexes(dbTx)
})
@@ -155,12 +161,35 @@ func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-
// Initialize each of the enabled indexes.
for _, indexer := range m.enabledIndexes {
if err := indexer.Init(db); err != nil {
if err := indexer.Init(db, blockDAG); err != nil {
return err
}
}
return nil
return m.recoverIfNeeded()
}
// recoverIfNeeded checks if the node worked for some time
// without one of the current enabled indexes, and if it's
// the case, recovers the missing blocks from the index.
func (m *Manager) recoverIfNeeded() error {
return m.db.Update(func(dbTx database.Tx) error {
lastKnownBlockID := blockdag.DBFetchCurrentBlockID(dbTx)
for _, indexer := range m.enabledIndexes {
serializedCurrentIdxBlockID := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Get(indexer.Key())
currentIdxBlockID := uint64(0)
if serializedCurrentIdxBlockID != nil {
currentIdxBlockID = blockdag.DeserializeBlockID(serializedCurrentIdxBlockID)
}
if lastKnownBlockID > currentIdxBlockID {
err := indexer.Recover(dbTx, currentIdxBlockID, lastKnownBlockID)
if err != nil {
return err
}
}
}
return nil
})
}
// ConnectBlock must be invoked when a block is extending the main chain. It
@@ -168,12 +197,32 @@ func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-
// checks, and invokes each indexer.
//
// This is part of the blockchain.IndexManager interface.
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, dag *blockdag.BlockDAG, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// Call each of the currently active optional indexes with the block
// being connected so they can update accordingly.
for _, index := range m.enabledIndexes {
// Notify the indexer with the connected block so it can index it.
if err := index.ConnectBlock(dbTx, block, dag, txsAcceptanceData); err != nil {
if err := index.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData); err != nil {
return err
}
}
// Add the new block ID index entry for the block being connected and
// update the current internal block ID accordingly.
err := m.updateIndexersWithCurrentBlockID(dbTx, block.Hash(), blockID)
if err != nil {
return err
}
return nil
}
func (m *Manager) updateIndexersWithCurrentBlockID(dbTx database.Tx, blockHash *daghash.Hash, blockID uint64) error {
serializedBlockID := blockdag.SerializeBlockID(blockID)
for _, index := range m.enabledIndexes {
err := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Put(index.Key(), serializedBlockID)
if err != nil {
return err
}
}
@@ -316,13 +365,6 @@ func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan s
})
}
// Call extra index specific deinitialization for the transaction index.
if idxName == txIndexName {
if err := dropBlockIDIndex(db); err != nil {
return err
}
}
// Remove the index tip, index bucket, and in-progress drop flag now
// that all index entries have been removed.
err = db.Update(func(dbTx database.Tx) error {
@@ -332,6 +374,10 @@ func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan s
return err
}
if err := meta.Bucket(indexCurrentBlockIDBucketName).Delete(idxKey); err != nil {
return err
}
return indexesBucket.Delete(indexDropKey(idxKey))
})
if err != nil {

View File

@@ -6,12 +6,12 @@ package indexers
import (
"fmt"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"github.com/pkg/errors"
)
const (
@@ -19,40 +19,26 @@ const (
txIndexName = "transaction index"
includingBlocksIndexKeyEntrySize = 8 // 4 bytes for offset + 4 bytes for transaction length
acceptingBlocksIndexKeyEntrySize = 4 // 4 bytes for accepting block ID
)
var (
includingBlocksIndexKey = []byte("includingblocksidx")
acceptingBlocksIndexKey = []byte("acceptingblocksidx")
// idByHashIndexBucketName is the name of the db bucket used to house
// the block id -> block hash index.
idByHashIndexBucketName = []byte("idbyhashidx")
// hashByIDIndexBucketName is the name of the db bucket used to house
// the block hash -> block id index.
hashByIDIndexBucketName = []byte("hashbyididx")
)
// txsAcceptedByVirtual is the in-memory index of txIDs that were accepted
// by the current virtual
var txsAcceptedByVirtual map[daghash.TxID]bool
// -----------------------------------------------------------------------------
// The transaction index consists of an entry for every transaction in the DAG.
// In order to significantly optimize the space requirements a separate
// index which provides an internal mapping between each block that has been
// indexed and a unique ID for use within the hash to location mappings. The ID
// is simply a sequentially incremented uint32. This is useful because it is
// only 4 bytes versus 32 bytes hashes and thus saves a ton of space in the
// index.
//
// There are four buckets used in total. The first bucket maps the hash of
// There are two buckets used in total. The first bucket maps the hash of
// each transaction to its location in each block it's included in. The second bucket
// contains all of the blocks that from their viewpoint the transaction has been
// accepted (i.e. the transaction is found in their blue set without double spends),
// and their blue block (or themselves) that included the transaction. The third
// bucket maps the hash of each block to the unique ID and the fourth maps
// that ID back to the block hash.
// and their blue block (or themselves) that included the transaction.
//
// NOTE: Although it is technically possible for multiple transactions to have
// the same hash as long as the previous transaction with the same hash is fully
@@ -67,123 +53,43 @@ var (
// <block id> = <start offset><tx length>
//
// Field Type Size
// block id uint32 4 bytes
// block id uint64 8 bytes
// start offset uint32 4 bytes
// tx length uint32 4 bytes
// -----
// Total: 12 bytes
// Total: 16 bytes
//
// The accepting blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
//
// <accepting block id> = <including block id>
//
// Field Type Size
// accepting block id uint32 4 bytes
// including block id uint32 4 bytes
// accepting block id uint64 8 bytes
// including block id uint64 8 bytes
// -----
// Total: 8 bytes
//
// The serialized format for keys and values in the block hash to ID bucket is:
// <hash> = <ID>
//
// Field Type Size
// hash daghash.Hash 32 bytes
// ID uint32 4 bytes
// -----
// Total: 36 bytes
//
// The serialized format for keys and values in the ID to block hash bucket is:
// <ID> = <hash>
//
// Field Type Size
// ID uint32 4 bytes
// hash daghash.Hash 32 bytes
// -----
// Total: 36 bytes
// Total: 16 bytes
//
// -----------------------------------------------------------------------------
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
// the index entries for the hash to id and id to hash mappings for the provided
// values.
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, id uint32) error {
// Serialize the height for use in the index entries.
var serializedID [4]byte
byteOrder.PutUint32(serializedID[:], id)
// Add the block hash to ID mapping to the index.
meta := dbTx.Metadata()
hashIndex := meta.Bucket(idByHashIndexBucketName)
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
return err
}
// Add the block ID to hash mapping to the index.
idIndex := meta.Bucket(hashByIDIndexBucketName)
return idIndex.Put(serializedID[:], hash[:])
}
// dbFetchBlockIDByHash uses an existing database transaction to retrieve the
// block id for the provided hash from the index.
func dbFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint32, error) {
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
serializedID := hashIndex.Get(hash[:])
if serializedID == nil {
return 0, fmt.Errorf("No entry in the block ID index for block with hash %s", hash)
}
return byteOrder.Uint32(serializedID), nil
}
// dbFetchBlockHashBySerializedID uses an existing database transaction to
// retrieve the hash for the provided serialized block id from the index.
func dbFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
hashBytes := idIndex.Get(serializedID)
if hashBytes == nil {
return nil, fmt.Errorf("No entry in the block ID index for block with id %d", byteOrder.Uint32(serializedID))
}
var hash daghash.Hash
copy(hash[:], hashBytes)
return &hash, nil
}
// dbFetchBlockHashByID uses an existing database transaction to retrieve the
// hash for the provided block id from the index.
func dbFetchBlockHashByID(dbTx database.Tx, id uint32) (*daghash.Hash, error) {
var serializedID [4]byte
byteOrder.PutUint32(serializedID[:], id)
return dbFetchBlockHashBySerializedID(dbTx, serializedID[:])
}
func putIncludingBlocksEntry(target []byte, txLoc wire.TxLoc) {
byteOrder.PutUint32(target, uint32(txLoc.TxStart))
byteOrder.PutUint32(target[4:], uint32(txLoc.TxLen))
}
func putAcceptingBlocksEntry(target []byte, includingBlockID uint32) {
byteOrder.PutUint32(target, includingBlockID)
}
func dbPutIncludingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint32, serializedData []byte) error {
func dbPutIncludingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
bucket, err := dbTx.Metadata().Bucket(includingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
if err != nil {
return err
}
blockIDBytes := make([]byte, 4)
byteOrder.PutUint32(blockIDBytes, uint32(blockID))
return bucket.Put(blockIDBytes, serializedData)
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
}
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint32, serializedData []byte) error {
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
bucket, err := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
if err != nil {
return err
}
blockIDBytes := make([]byte, 4)
byteOrder.PutUint32(blockIDBytes, uint32(blockID))
return bucket.Put(blockIDBytes, serializedData)
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
}
// dbFetchFirstTxRegion uses an existing database transaction to fetch the block
@@ -199,7 +105,7 @@ func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.Block
if txBucket == nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No block region"+
Description: fmt.Sprintf("No block region "+
"was found for %s", txID),
}
}
@@ -207,11 +113,11 @@ func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.Block
if ok := cursor.First(); !ok {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No block region"+
Description: fmt.Sprintf("No block region "+
"was found for %s", txID),
}
}
blockIDBytes := cursor.Key()
serializedBlockID := cursor.Key()
serializedData := cursor.Value()
if len(serializedData) == 0 {
return nil, nil
@@ -227,7 +133,7 @@ func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.Block
}
// Load the block hash associated with the block ID.
hash, err := dbFetchBlockHashBySerializedID(dbTx, blockIDBytes)
hash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
if err != nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
@@ -247,7 +153,7 @@ func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.Block
// dbAddTxIndexEntries uses an existing database transaction to add a
// transaction index entry for every transaction in the passed block.
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint32, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint64, multiBlockTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// The offset and length of the transactions within the serialized
// block.
txLocs, err := block.TxLoc()
@@ -273,22 +179,21 @@ func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint32, tx
includingBlocksOffset += includingBlocksIndexKeyEntrySize
}
for includingBlockHash, blockTxsAcceptanceData := range txsAcceptanceData {
var includingBlockID uint32
if includingBlockHash.IsEqual(block.Hash()) {
for _, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
var includingBlockID uint64
if blockTxsAcceptanceData.BlockHash.IsEqual(block.Hash()) {
includingBlockID = blockID
} else {
includingBlockID, err = dbFetchBlockIDByHash(dbTx, &includingBlockHash)
includingBlockID, err = blockdag.DBFetchBlockIDByHash(dbTx, &blockTxsAcceptanceData.BlockHash)
if err != nil {
return err
}
}
includingBlockIDBytes := make([]byte, 4)
byteOrder.PutUint32(includingBlockIDBytes, uint32(includingBlockID))
serializedIncludingBlockID := blockdag.SerializeBlockID(includingBlockID)
for _, txAcceptanceData := range blockTxsAcceptanceData {
err = dbPutAcceptingBlocksEntry(dbTx, txAcceptanceData.Tx.ID(), blockID, includingBlockIDBytes)
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
err = dbPutAcceptingBlocksEntry(dbTx, txAcceptanceData.Tx.ID(), blockID, serializedIncludingBlockID)
if err != nil {
return err
}
@@ -298,11 +203,28 @@ func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint32, tx
return nil
}
func updateTxsAcceptedByVirtual(virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// Initialize a new txsAcceptedByVirtual
entries := 0
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
entries += len(blockTxsAcceptanceData.TxAcceptanceData)
}
txsAcceptedByVirtual = make(map[daghash.TxID]bool, entries)
// Copy virtualTxsAcceptanceData to txsAcceptedByVirtual
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
txsAcceptedByVirtual[*txAcceptanceData.Tx.ID()] = true
}
}
return nil
}
// TxIndex implements a transaction by hash index. That is to say, it supports
// querying all transactions by their hash.
type TxIndex struct {
db database.DB
curBlockID uint32
db database.DB
}
// Ensure the TxIndex type implements the Indexer interface.
@@ -313,63 +235,18 @@ var _ Indexer = (*TxIndex)(nil)
// disconnecting blocks.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Init(db database.DB) error {
func (idx *TxIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
idx.db = db
// Find the latest known block id field for the internal block id
// index and initialize it. This is done because it's a lot more
// efficient to do a single search at initialize time than it is to
// write another value to the database on every update.
err := idx.db.View(func(dbTx database.Tx) error {
// Scan forward in large gaps to find a block id that doesn't
// exist yet to serve as an upper bound for the binary search
// below.
var highestKnown, nextUnknown uint32
testBlockID := uint32(1)
increment := uint32(100000)
for {
_, err := dbFetchBlockHashByID(dbTx, testBlockID)
if err != nil {
nextUnknown = testBlockID
break
}
highestKnown = testBlockID
testBlockID += increment
}
log.Tracef("Forward scan (highest known %d, next unknown %d)",
highestKnown, nextUnknown)
// No used block IDs due to new database.
if nextUnknown == 1 {
return nil
}
// Use a binary search to find the final highest used block id.
// This will take at most ceil(log_2(increment)) attempts.
for {
testBlockID = (highestKnown + nextUnknown) / 2
_, err := dbFetchBlockHashByID(dbTx, testBlockID)
if err != nil {
nextUnknown = testBlockID
} else {
highestKnown = testBlockID
}
log.Tracef("Binary scan (highest known %d, next "+
"unknown %d)", highestKnown, nextUnknown)
if highestKnown+1 == nextUnknown {
break
}
}
idx.curBlockID = highestKnown
return nil
})
// Initialize the txsAcceptedByVirtual index
virtualTxsAcceptanceData, err := dag.TxsAcceptedByVirtual()
if err != nil {
return err
}
err = updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
if err != nil {
return err
}
log.Debugf("Current internal block ID: %d", idx.curBlockID)
return nil
}
@@ -394,12 +271,6 @@ func (idx *TxIndex) Name() string {
// This is part of the Indexer interface.
func (idx *TxIndex) Create(dbTx database.Tx) error {
meta := dbTx.Metadata()
if _, err := meta.CreateBucket(idByHashIndexBucketName); err != nil {
return err
}
if _, err := meta.CreateBucket(hashByIDIndexBucketName); err != nil {
return err
}
if _, err := meta.CreateBucket(includingBlocksIndexKey); err != nil {
return err
}
@@ -413,24 +284,16 @@ func (idx *TxIndex) Create(dbTx database.Tx) error {
// for every transaction in the passed block.
//
// This is part of the Indexer interface.
func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, _ *blockdag.BlockDAG, acceptedTxsData blockdag.MultiBlockTxsAcceptanceData) error {
// Increment the internal block ID to use for the block being connected
// and add all of the transactions in the block to the index.
newBlockID := idx.curBlockID + 1
if block.MsgBlock().Header.IsGenesis() {
newBlockID = 0
}
if err := dbAddTxIndexEntries(dbTx, block, newBlockID, acceptedTxsData); err != nil {
func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
if err := dbAddTxIndexEntries(dbTx, block, blockID, acceptedTxsData); err != nil {
return err
}
// Add the new block ID index entry for the block being connected and
// update the current internal block ID accordingly.
err := dbPutBlockIDIndexEntry(dbTx, block.Hash(), newBlockID)
err := updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
if err != nil {
return err
}
idx.curBlockID = newBlockID
return nil
}
@@ -474,9 +337,8 @@ func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]*daghash.Hash, e
"were found for %s", txHash),
}
}
err := bucket.ForEach(func(blockIDBytes, _ []byte) error {
blockID := byteOrder.Uint32(blockIDBytes)
blockHash, err := dbFetchBlockHashByID(dbTx, blockID)
err := bucket.ForEach(func(serializedBlockID, _ []byte) error {
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
if err != nil {
return err
}
@@ -501,29 +363,34 @@ func (idx *TxIndex) BlockThatAcceptedTx(dag *blockdag.BlockDAG, txID *daghash.Tx
}
func dbFetchTxAcceptingBlock(dbTx database.Tx, txID *daghash.TxID, dag *blockdag.BlockDAG) (*daghash.Hash, error) {
// If the transaction was accepted by the current virtual,
// return the zeroHash immediately
if _, ok := txsAcceptedByVirtual[*txID]; ok {
return &daghash.ZeroHash, nil
}
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txID[:])
if bucket == nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No accepting blocks "+
"were found for %s", txID),
Description: fmt.Sprintf("No accepting blocks bucket "+
"exists for %s", txID),
}
}
cursor := bucket.Cursor()
if !cursor.First() {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No accepting blocks "+
"were found for %s", txID),
Description: fmt.Sprintf("Accepting blocks bucket is "+
"empty for %s", txID),
}
}
for ; cursor.Key() != nil; cursor.Next() {
blockID := byteOrder.Uint32(cursor.Key())
blockHash, err := dbFetchBlockHashByID(dbTx, blockID)
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, cursor.Key())
if err != nil {
return nil, err
}
if dag.IsInSelectedPathChain(blockHash) {
if dag.IsInSelectedParentChain(blockHash) {
return blockHash, nil
}
}
@@ -541,19 +408,6 @@ func NewTxIndex() *TxIndex {
return &TxIndex{}
}
// dropBlockIDIndex drops the internal block id index.
func dropBlockIDIndex(db database.DB) error {
return db.Update(func(dbTx database.Tx) error {
meta := dbTx.Metadata()
err := meta.DeleteBucket(idByHashIndexBucketName)
if err != nil {
return err
}
return meta.DeleteBucket(hashByIDIndexBucketName)
})
}
// DropTxIndex drops the transaction index from the provided database if it
// exists. Since the address index relies on it, the address index will also be
// dropped when it exists.
@@ -570,3 +424,12 @@ func DropTxIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, acceptingBlocksIndexKey, txIndexName, interrupt)
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("txindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the txindex with --droptxindex", lastKnownBlockID-currentBlockID)
}

View File

@@ -7,19 +7,25 @@ import (
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/mining"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
func createTransaction(value uint64, originTx *wire.MsgTx, outputIndex uint32) *wire.MsgTx {
func createTransaction(t *testing.T, value uint64, originTx *wire.MsgTx, outputIndex uint32) *wire.MsgTx {
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
if err != nil {
t.Fatalf("Error creating signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutPoint: wire.OutPoint{
TxID: originTx.TxID(),
PreviousOutpoint: wire.Outpoint{
TxID: *originTx.TxID(),
Index: outputIndex,
},
Sequence: wire.MaxTxInSequenceNum,
Sequence: wire.MaxTxInSequenceNum,
SignatureScript: signatureScript,
}
txOut := wire.NewTxOut(value, blockdag.OpTrueScript)
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
@@ -34,7 +40,7 @@ func TestTxIndexConnectBlock(t *testing.T) {
indexManager := NewManager([]Indexer{txIndex})
params := dagconfig.SimNetParams
params.BlockRewardMaturity = 1
params.BlockCoinbaseMaturity = 0
params.K = 1
config := blockdag.Config{
@@ -51,16 +57,20 @@ func TestTxIndexConnectBlock(t *testing.T) {
}
prepareAndProcessBlock := func(parentHashes []*daghash.Hash, transactions []*wire.MsgTx, blockName string) *wire.MsgBlock {
block, err := mining.PrepareBlockForTest(dag, &params, parentHashes, transactions, false, 1)
block, err := mining.PrepareBlockForTest(dag, &params, parentHashes, transactions, false)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: block %v got unexpected error from PrepareBlockForTest: %v", blockName, err)
}
utilBlock := util.NewBlock(block)
blocks[*block.BlockHash()] = utilBlock
isOrphan, err := dag.ProcessBlock(utilBlock, blockdag.BFNoPoWCheck)
isOrphan, delay, err := dag.ProcessBlock(utilBlock, blockdag.BFNoPoWCheck)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: dag.ProcessBlock got unexpected error for block %v: %v", blockName, err)
}
if delay != 0 {
t.Fatalf("TestTxIndexConnectBlock: block %s "+
"is too far in the future", blockName)
}
if isOrphan {
t.Fatalf("TestTxIndexConnectBlock: block %v was unexpectedly orphan", blockName)
}
@@ -68,37 +78,47 @@ func TestTxIndexConnectBlock(t *testing.T) {
}
block1 := prepareAndProcessBlock([]*daghash.Hash{params.GenesisHash}, nil, "1")
block2Tx := createTransaction(block1.Transactions[0].TxOut[0].Value, block1.Transactions[0], 0)
block2Tx := createTransaction(t, block1.Transactions[0].TxOut[0].Value, block1.Transactions[0], 0)
block2 := prepareAndProcessBlock([]*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{block2Tx}, "2")
block3Tx := createTransaction(block2.Transactions[0].TxOut[0].Value, block2.Transactions[0], 0)
block3Tx := createTransaction(t, block2.Transactions[0].TxOut[0].Value, block2.Transactions[0], 0)
block3 := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3")
block3TxID := block3Tx.TxID()
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, &block3TxID)
block2TxID := block2Tx.TxID()
block2TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
block3Hash := block3.BlockHash()
if !block3TxNewAcceptedBlock.IsEqual(block3Hash) {
if !block2TxNewAcceptedBlock.IsEqual(block3Hash) {
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3Hash, block2TxNewAcceptedBlock)
}
block3TxID := block3Tx.TxID()
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block3TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
if !block3TxNewAcceptedBlock.IsEqual(&daghash.ZeroHash) {
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3Hash, block3TxNewAcceptedBlock)
"been accepted by the virtual block but instead got accepted in block %v", block3TxNewAcceptedBlock)
}
block3A := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3A")
block4 := prepareAndProcessBlock([]*daghash.Hash{block3.BlockHash()}, nil, "4")
prepareAndProcessBlock([]*daghash.Hash{block3A.BlockHash(), block4.BlockHash()}, nil, "5")
block3TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, &block3TxID)
block2TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
block3AHash := block3A.BlockHash()
if !block3TxAcceptedBlock.IsEqual(block3AHash) {
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3AHash, block3TxAcceptedBlock)
if !block2TxAcceptedBlock.IsEqual(block3AHash) {
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3AHash, block2TxAcceptedBlock)
}
region, err := txIndex.TxFirstBlockRegion(&block3TxID)
region, err := txIndex.TxFirstBlockRegion(block3TxID)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: no block region was found for block3Tx")
}

View File

@@ -5,16 +5,9 @@
package blockdag
import (
"github.com/btcsuite/btclog"
"github.com/daglabs/btcd/logger"
"github.com/daglabs/btcd/util/panics"
)
// log is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
var log btclog.Logger
// The default amount of logging is none.
func init() {
log, _ = logger.Get(logger.SubsystemTags.CHAN)
}
var log, _ = logger.Get(logger.SubsystemTags.BDAG)
var spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)

View File

@@ -7,8 +7,8 @@ package blockdag
import (
"math"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
)
// MerkleTree holds the hashes of a merkle tree

View File

@@ -5,6 +5,7 @@
package blockdag
import (
"github.com/daglabs/btcd/util/daghash"
"testing"
"github.com/daglabs/btcd/util"
@@ -24,8 +25,11 @@ func TestMerkle(t *testing.T) {
idMerkleTree := BuildIDMerkleTreeStore(block.Transactions())
calculatedIDMerkleRoot := idMerkleTree.Root()
wantIDMerkleRoot := Block100000.Header.IDMerkleRoot
if !wantIDMerkleRoot.IsEqual(calculatedIDMerkleRoot) {
wantIDMerkleRoot, err := daghash.NewHashFromStr("3f69feb7edf5d0d67930afc990c8ec931e3428d7c7a65d7af6b81079319eb110")
if err != nil {
t.Errorf("BuildIDMerkleTreeStore: unexpected error: %s", err)
}
if !calculatedIDMerkleRoot.IsEqual(wantIDMerkleRoot) {
t.Errorf("BuildIDMerkleTreeStore: ID merkle root mismatch - "+
"got %v, want %v", calculatedIDMerkleRoot, wantIDMerkleRoot)
}

View File

@@ -6,6 +6,8 @@ package blockdag
import (
"fmt"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
)
// NotificationType represents the type of a notification message.
@@ -20,12 +22,17 @@ const (
// NTBlockAdded indicates the associated block was added into
// the blockDAG.
NTBlockAdded NotificationType = iota
// NTChainChanged indicates that selected parent
// chain had changed.
NTChainChanged
)
// notificationTypeStrings is a map of notification types back to their constant
// names for pretty printing.
var notificationTypeStrings = map[NotificationType]string{
NTBlockAdded: "NTBlockAdded",
NTBlockAdded: "NTBlockAdded",
NTChainChanged: "NTChainChanged",
}
// String returns the NotificationType in human-readable form.
@@ -66,3 +73,17 @@ func (dag *BlockDAG) sendNotification(typ NotificationType, data interface{}) {
}
dag.notificationsLock.RUnlock()
}
// BlockAddedNotificationData defines data to be sent along with a BlockAdded
// notification
type BlockAddedNotificationData struct {
Block *util.Block
WasUnorphaned bool
}
// ChainChangedNotificationData defines data to be sent along with a ChainChanged
// notification
type ChainChangedNotificationData struct {
RemovedChainBlockHashes []*daghash.Hash
AddedChainBlockHashes []*daghash.Hash
}

View File

@@ -5,6 +5,7 @@
package blockdag
import (
"path/filepath"
"testing"
"github.com/daglabs/btcd/dagconfig"
@@ -12,7 +13,7 @@ import (
// TestNotifications ensures that notification callbacks are fired on events.
func TestNotifications(t *testing.T) {
blocks, err := loadBlocks("blk_0_to_4.dat")
blocks, err := LoadBlocks(filepath.Join("testdata/blk_0_to_4.dat"))
if err != nil {
t.Fatalf("Error loading file: %v\n", err)
}
@@ -40,14 +41,18 @@ func TestNotifications(t *testing.T) {
dag.Subscribe(callback)
}
isOrphan, err := dag.ProcessBlock(blocks[1], BFNone)
isOrphan, delay, err := dag.ProcessBlock(blocks[1], BFNone)
if err != nil {
t.Fatalf("ProcessBlock fail on block 1: %v\n", err)
}
if delay != 0 {
t.Fatalf("ProcessBlock: block 1 " +
"is too far in the future")
}
if isOrphan {
t.Fatalf("ProcessBlock incorrectly returned block " +
"is an orphan\n")
}
if err != nil {
t.Fatalf("ProcessBlock fail on block 1: %v\n", err)
}
if notificationCount != numSubscribers {
t.Fatalf("Expected notification callback to be executed %d "+

View File

@@ -1,7 +1,7 @@
package blockdag
import (
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util/daghash"
)
// phantom calculates and returns the block's blue set, selected parent and blue score.
@@ -77,7 +77,7 @@ func blueCandidates(chainStart *blockNode) blockSet {
func traverseCandidates(newBlock *blockNode, candidates blockSet, selectedParent *blockNode) []*blockNode {
blues := []*blockNode{}
selectedParentPast := newSet()
queue := NewDownHeap()
queue := newDownHeap()
visited := newSet()
for _, parent := range newBlock.parents {

View File

@@ -7,7 +7,7 @@ import (
"testing"
"time"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/dagconfig"
)
@@ -20,11 +20,6 @@ type testBlockData struct {
expectedBlues []string
}
type hashIDPair struct {
hash *daghash.Hash
id string
}
//TestPhantom iterate over several dag simulations, and checks
//that the blue score, blue set and selected parent of each
//block calculated as expected
@@ -113,7 +108,7 @@ func TestPhantom(t *testing.T) {
id: "K",
expectedScore: 9,
expectedSelectedParent: "H",
expectedBlues: []string{"I", "J", "G", "F", "H"},
expectedBlues: []string{"I", "G", "J", "F", "H"},
},
},
},

Some files were not shown because too many files have changed in this diff Show More