Compare commits

..

24 Commits

Author SHA1 Message Date
Svarog
686c25c72d [NOD-1064] Don't send GetBlockInvsMsg with lowHash = nil (#769) 2020-06-21 08:58:29 +03:00
stasatdaglabs
956b6f7d95 [NOD-900] Fix bad key in Seek (#687)
* [NOD-900] Fix Seek not working at expected.

* [NOD-900] Wrap error messages.

* [NOD-900] Use ldbIterator.Key instead of LevelDBCursor.Key.

* [NOD-900] Add a comment.
2020-04-02 17:47:51 +03:00
stasatdaglabs
c1a039de3f [NOD-900] Fix Seek not working as expected (#686)
* [NOD-900] Fix Seek not working at expected.

* [NOD-900] Wrap error messages.
2020-04-02 17:05:58 +03:00
stasatdaglabs
f8b18e09d6 [NOD-805] Redesign the database (#685)
* [NOD-828] Reimplement FFLDB (#663)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-887] Add a couple of QoL features to Cursor (#674)

* [NOD-887] Changed First to not return an error.

* [NOD-887] Fix merge error.

* [NOD-887] Make Cursor.Key not return the entire key path.

* [NOD-888] Add RollbackUnlessClosed to Context (#676)

* [NOD-888] Add RollbackUnlessClosed to Context.

* [NOD-888] Fix copy+paste error.

* [NOD-889] Instead of returning a boolean for not-found, return an error (#677)

* [NOD-889] Instead of returning a boolean for not-found, return an error.

* [NOD-889] Wrapped ErrNotFound for Get calls with nicer error messages.

* [NOD-889] Fix format.

* [NOD-889] Fix double space in a comment.

* [NOD-889] Add IsNotFoundError to dbaccess.

* [NOD-862] Replace calls to Tx.StoreBlock, Tx.HasBlock, Tx.FetchBlock with appropriate calls in dbaccess (#672)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-862] Use dbaccess.HasBlock instead of Tx.HasBlock in initDAGState.

* [NOD-862] Use dbaccess.StoreBlock instead of dbStoreBlock.

* [NOD-862] Use dbaccess.FetchBlock instead of various block fetching mechanisms.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Implement a dbaccess.BlockNode struct.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* [NOD-828] Implement toDBBlockNode and fromDBBlockNode.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Implement storing index blocks.

* [NOD-862] Use database transactions where appropriate.

* [NOD-862] Fix tests failing on DAGSetup.

* [NOD-862] Fix bad make call.

* [NOD-862] Fix remaining database opening problems in tests.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Iterate over the new block index in dagio.

* [NOD-862] Fix block index key.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-862] Pass byte slices to dbaccess instead of objects.

* [NOD-862] Fix errors.

* [NOD-862] Fix maybeAcceptBlock not checking block existence.

* [NOD-862] Fix TestAcceptanceIndexRecover.

* [NOD-862] Add comments to StoreBlockIndex and BlockIndexCursor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-862] Fix merge errors.

* [NOD-862] Fix DAGSetup using bad temp directories.

* [NOD-862] Fix TestProcessDelayedBlocks not closing the database properly.

* [NOD-862] Fix merge errors.

* [NOD-862] Merge flushToDBWithContext and flushToDB.

* [NOD-862] Remove TODO.

* [NOD-862] Add prefix to the temp dir in DAGSetup.

* [NOD-862] Bring back dbFetchBlockByHash.

* [NOD-862] Use BlockDAG.BlockByHash in p2p and rpc.

* [NOD-862] Use daghash.Hash in dbaccess.

* [NOD-862] Add defer to RollbackUnlessClosed after NewTx().

* [NOD-862] Extract dbStoreBlock to a separate function.

* [NOD-862] Fix grammar in comment.

* [NOD-862] Fix merge errors.

* [NOD-867] Migrate database logic in blockdag/dagio.go to dbaccess (#675)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-862] Use dbaccess.HasBlock instead of Tx.HasBlock in initDAGState.

* [NOD-862] Use dbaccess.StoreBlock instead of dbStoreBlock.

* [NOD-862] Use dbaccess.FetchBlock instead of various block fetching mechanisms.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Implement a dbaccess.BlockNode struct.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* [NOD-828] Implement toDBBlockNode and fromDBBlockNode.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Implement storing index blocks.

* [NOD-862] Use database transactions where appropriate.

* [NOD-862] Fix tests failing on DAGSetup.

* [NOD-862] Fix bad make call.

* [NOD-862] Fix remaining database opening problems in tests.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-862] Iterate over the new block index in dagio.

* [NOD-862] Fix block index key.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-862] Pass byte slices to dbaccess instead of objects.

* [NOD-862] Fix errors.

* [NOD-862] Fix maybeAcceptBlock not checking block existence.

* [NOD-862] Fix TestAcceptanceIndexRecover.

* [NOD-862] Add comments to StoreBlockIndex and BlockIndexCursor.

* [NOD-828] Fix a couple of places that erroneously referenced blocks.

* [NOD-828] Add a comment on top of flatFileLocationSerializedSize.

* [NOD-828] Add Seek to Cursor.

* [NOD-828] Add First to Cursor.

* [NOD-828] Rename db to accessor in Context.

* [NOD-828] Make Get/Fetch calls return a boolean to indicate whether the requested item was found.

* [NOD-828] Name the output parameters of all Get functions.

* [NOD-828] Make RetrieveFromStore return whether the data was found.

* [NOD-862] Fix merge errors.

* [NOD-862] Fix DAGSetup using bad temp directories.

* [NOD-862] Fix TestProcessDelayedBlocks not closing the database properly.

* [NOD-867] Remove blockIndexBucket from dagio.

* [NOD-867] Fix wrong key in StoreIndexBucket.

* [NOD-867] Migrate DAG state to dbaccess.

* [NOD-867] Remove utxoSetVersionKeyName.

* [NOD-862] Fix merge errors.

* [NOD-867] Move localSubnetworkID into dagState.

* [NOD-867] Fix a comment.

* [NOD-867] Remove an unused function.

* [NOD-867] Migrate the database's UTXO set to dbaccess.

* [NOD-867] Add missing error check.

* [NOD-867] Changed First to not return an error.

* [NOD-867] Make Cursor.Key not return the entire key path.

* [NOD-887] Fix the comment above BlockIndexCursorFrom.

* [NOD-862] Merge flushToDBWithContext and flushToDB.

* [NOD-862] Remove TODO.

* [NOD-862] Add prefix to the temp dir in DAGSetup.

* [NOD-862] Bring back dbFetchBlockByHash.

* [NOD-862] Use BlockDAG.BlockByHash in p2p and rpc.

* [NOD-862] Use daghash.Hash in dbaccess.

* [NOD-862] Add defer to RollbackUnlessClosed after NewTx().

* [NOD-862] Extract dbStoreBlock to a separate function.

* [NOD-867] Remove TODOs.

* [NOD-867] Fix merge errors.

* [NOD-867] Fix comments and errors.

* [NOD-867] Unexport blockIndexKey.

* [NOD-867] Fix merge errors.

* [NOD-867] Move a misplaced comment.

* [NOD-867] Fix an error message.

* [NOD-867] Remove preallocation in initDAGState.

* [NOD-866] Migrate database logic in blockdag/indexers package to dbaccess (#682)

* [NOD-865] Delete blockidhash.go.

* [NOD-865] Remove a lot of no-longer relevant logic from indexers.

* [NOD-865] Pass TxContext to ConnectBlock.

* [NOD-865] Migrate the acceptance index to dbaccess.

* [NOD-865] Fix a block not being sent to ConnectBlock.

* [NOD-865] Pass the block's hash instead of the whole block.

* [NOD-865] Add forgotten Commit call.

* [NOD-865] Add comments.

* [NOD-866] Fix a comment.

* [NOD-866] Fix a comment.

* [NOD-866] Remove pointless indirection in acceptanceindex.

* [NOD-866] Fix comment over ForEachHash.

* [NOD-866] Rename ClearAcceptanceIndex to DropAcceptanceIndex.

* [NOD-866] Explain collecting keys before deleting them.

* [NOD-865] Move misc db logic to db access (#681)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-865] Move fee data db operations to dbaccess

* [NOD-865] Move reachability data db operations to dbaccess

* [NOD-865] Move UTXO diff data db operations to dbaccess

* [NOD-865] Move subnetwork data db operations to dbaccess

* [NOD-865] Fix createDAGState

* [NOD-865] Remove old Get signature with "exists"

* [NOD-865] Move multiset db operations to dbaccess

* [NOD-865] Use dbaccess transactions where possible

* [NOD-865] Remove old Get signature with "exists"

* [NOD-881] Recover TestGHOSTDAGErrors

* [NOD-865] Create function for db keys

* [NOD-865] Change Exists to Has, and use accessor.Has where possible

* [NOD-865] Make ClearReachabilityData transactive

* [NOD-865] Don't iterate cursors while changing db data

* [NOD-865] Rename RegisterSubnetwork -> StoreSubnetwork

* [NOD-865] Change bucket from utxodiffs to utxo-diffs

* [NOD-865] Rename SubnetworkExists->HasSubnetwork

* [NOD-865] Change a comment

* [NOD-865] Fix tests

* [NOD-865] Fix comment

* [NOD-865] Remove the prefix "db" from some functions

* [NOD-865] Remove redundant comments

* [NOD-865] Make clearBucket function

* [NOD-865] Make clear functions get a dbTx as an arg

* [NOD-865] Remove erroneous tx commit

Co-authored-by: stasatdaglabs <stas@daglabs.com>

* [NOD-868] Delete the old database package (#683)

* [NOD-828] Create the database2 package that will some day replace the database package.

* [NOD-828] Implement a "bucket" key mechanism.

* [NOD-828] Move bucket.go into the ffldb2 package.

* [NOD-828] Delete the un-interfaced ffldb package from database2, since we aren't going to be using it anyway.

* [NOD-828] Copy over + fixup flat file structs from the old ffldb.

* [NOD-828] Implement flatFilePath.

* [NOD-828] Implement flatFileStore.write().

* [NOD-828] Implement flatFileStore.read().

* [NOD-828] Implement flatFileStore.rollback().

* [NOD-828] Sync the file to disk at the end of write().

* [NOD-828] Extract crc32ByteOrder to a separate variable.

* [NOD-828] Add a sanity test.

* [NOD-828] Remove context-unrelated methods from the Database interface.

* [NOD-828] Create an ffldb object. Simply work against a context.

* [NOD-828] Open the new database on start.

* [NOD-828] Create the leveldb package.

* [NOD-828] Implement opening/closing leveldb.

* [NOD-828] Implement get/put out of/into leveldb.

* [NOD-828] Implement transactions and make them implement a generic database interface.

* [NOD-828] Write sanity tests for leveldb with and without transactions.

* [NOD-828] Add another case to the transaction sanity test.

* [NOD-828] Implement AppendBlock/RetrieveBlock.

* [NOD-828] Refactor so that concepts such as "block" and "metadata" don't leak into the database package.

* [NOD-828] Add RollbackFlatData to the database interface.

* [NOD-828] Remove anything from dbaccess that I'm not planning to implement as part of this ticket.

* [NOD-828] Implement StoreBlock.

* [NOD-828] Implement FetchBlock.

* [NOD-828] Implement HasBlock.

* [NOD-828] Write a sanity test for block insertion.

* [NOD-828] Implement CurrentFlatDataLocation.

* [NOD-828] Implement storing the current block location.

* [NOD-828] Implement initializing/syncing the flat file block store and the "metadata".

* [NOD-828] Add InitBlockStore to TestBlockStoreSanity.

* [NOD-828] Fix rename errors.

* [NOD-828] Fix lint errors in the root database package.

* [NOD-828] Fix lint errors in the ffldb.go.

* [NOD-828] Fix lint errors in the flatfile/db.go.

* [NOD-828] Rename packages in such a way to make the linter happy.

* [NOD-828] Finish satisfying the linter.

* [NOD-828] Fix doc.go.

* [NOD-828] Fix comments in block.go.

* [NOD-828] Move dbaccess out of the database package.

* [NOD-828] Move opening/closing the database to dbaccess.

* [NOD-828] Move the Database interface to the root database package, since it's meant to be generic.

* [NOD-828] Make ffldb generic to later support additional databases.

* [NOD-828] Make ffldb.Open return DatabaseHandle, since ffldb is no longer exported.

* [NOD-828] Fix comments.

* [NOD-828] Rename AppendFlatData to AppendToStore and RetrieveFlatData to RetrieveFromStore.

* [NOD-828] Make buckets nicer to use.

* [NOD-828] Implement cursors that iterate over some bucket.

* [NOD-828] Generalize flat-file repairing and move block database repairing into ffldb.

* [NOD-828] Write a test making sure that flat file repair works.

* [NOD-828] Properly close the database in TestRepairFlatFiles.

* [NOD-828] Add a comment warning against putting and getting the same data within the same transaction.

* [NOD-828] Fix the flatFilesBucket description.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Fix the ffldb description.

* [NOD-828] Rename Database to DataAccessor and rename Handle to Database.

* [NOD-828] Make Get return nil if the value doesn't exist.

* [NOD-828] Attempt to close leveldb even if closing ffdb failed.

* [NOD-828] Fix a bug where the wrong location would be written to the current store location bucket.

* [NOD-828] Fix not updating the store location in ffldb transactions.

* [NOD-828] Make scanFlatFiles return an error if os.Stat fails for any reason other than file-not-found.

* [NOD-828] Update the README and doc.go.

* [NOD-828] Simplify Bucket.Path().

* [NOD-828] Since LevelDBCursor satisfied the database2.Cursor interface, use it directly.

* [NOD-828] Combine two lines into one.

* [NOD-828] Combine another two lines into one.

* [NOD-828] Move a misplaced comment.

* [NOD-828] Use Wrapf instead of Errorf where appropriate.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Fix Wrapf calls.

* [NOD-828] Fix comments.

* [NOD-828] Remove superfluous whitespace.

* [NOD-828] Add a comment explaining the use of batches and snapshots.

* [NOD-828] Implement RollbackUnlessClosed().

* [NOD-828] Return both errors in StoreBlock rollback.

* [NOD-828] Move rollback-on-error logic into ffldb. Remove CurrentStoreLocation and RollbackStore from DataAccessor.

* [NOD-828] Make bucket a type alias instead of a struct.

* [NOD-828] Fix a typo.

* [NOD-828] Use copy instead of append in Bucket.

* [NOD-828] Extract flatFileLocationSerializedSize to a const.

* [NOD-828] Debugf -> Warnf in rollback.go.

* [NOD-828] Fix a comment.

* [NOD-828] Fix a comment.

* [NOD-828] Remove data length from flat file data format.

* [NOD-828] Rearrange TestLevelDBTransactionSanity a bit.

* [NOD-828] Add stack traces to all errors that come out of library functions.

* [NOD-828] Return errors from rollback().

* [NOD-828] Remove an irrelevant comment.

* [NOD-828] Remove redundant whitespace.

* [NOD-828] Handle nil in FetchBlock.

* [NOD-828] Move the explanation about batches and snapshots to the LevelDBTransaction struct.

* Revert "[NOD-828] Make bucket a type alias instead of a struct."

This reverts commit 1fd39652

* [NOD-828] Fix revert errors.

* Revert "[NOD-828] Remove data length from flat file data format."

This reverts commit ef408e32

* [NOD-862] Move Cursor() into the DataAccessor interface.

* [NOD-828] Add Delete to DataAccessor.

* [NOD-865] Move fee data db operations to dbaccess

* [NOD-865] Move reachability data db operations to dbaccess

* [NOD-865] Move UTXO diff data db operations to dbaccess

* [NOD-865] Move subnetwork data db operations to dbaccess

* [NOD-865] Fix createDAGState

* [NOD-865] Remove old Get signature with "exists"

* [NOD-865] Move multiset db operations to dbaccess

* [NOD-865] Use dbaccess transactions where possible

* [NOD-865] Remove old Get signature with "exists"

* [NOD-881] Recover TestGHOSTDAGErrors

* [NOD-865] Create function for db keys

* [NOD-865] Change Exists to Has, and use accessor.Has where possible

* [NOD-865] Make ClearReachabilityData transactive

* [NOD-865] Don't iterate cursors while changing db data

* [NOD-865] Rename RegisterSubnetwork -> StoreSubnetwork

* [NOD-865] Change bucket from utxodiffs to utxo-diffs

* [NOD-865] Rename SubnetworkExists->HasSubnetwork

* [NOD-865] Change a comment

* [NOD-868] Remove all tests from old database.

* [NOD-868] Remove all unused methods from the old database's interfaces.

* [NOD-865] Fix tests

* [NOD-868] Remove references to DB.

* [NOD-865] Fix comment

* [NOD-868] Remove the old ffldb besides the interface and errors.go.

* [NOD-868] Remove errors.go.

* [NOD-868] Remove the old database package.

* [NOD-868] Add openDB to DAGSetup to emulate the old dbpath in dag.config.

* [NOD-868] Rename database2 to database.

* [NOD-868] Use NewTx instead of NoTx where required.

* [NOD-868] Fix merge errors.

* [NOD-868] Rename dbXXX functions to just xxx.

* [NOD-868] Rename putDAGState to saveDAGState.

* [NOD-868] Replace comments in initDAGState with logs.

* [NOD-868] Explain the openDB parameter in DAGSetup.

* [NOD-868] Fixup doc.go and README.md.

* [NOD-868] Remove pointless transactions.

Co-authored-by: Ori Newman <orinewman1@gmail.com>

* [NOD-805] Fix merge errors.

* [NOD-805] Fix a comment.

* [NOD-805] Don't return virtualTxsAcceptanceData from applyDAGChanges.

* [NOD-805] Add missing error handling in TestAcceptanceDataIndexRecover.

* [NOD-805] Rename blockDAG to dag in indexers/manager.go.

* [NOD-805] Defer cursor.Close() everywhere.

* [NOD-805] Rename scanFlatFiles to findCurrentLocation.

* [NOD-805] Extract crc32ChecksumLength and dataLengthLength to constants.

* [NOD-805] Handle open files properly in rollback.go.

* [NOD-805] Remove unnecessary func wrapper.

* [NOD-805] Remove unnecessary trimming in initialize.

* [NOD-805] Made StoreBlock accept only TxContext.

* [NOD-805] Changed the log level of an error message to Error.

* [NOD-805] Add a note about holding mutexes over deleteFile.

* [NOD-805] Remove a false comment.

* [NOD-805] Fix a comment.

* [NOD-805] Rename blk to block.

* [NOD-805] Extract utxoKey to a separate function.

* [NOD-805] Move dbaccess.xxxKey functions to the tops of their respective files.

* [NOD-805] Fix grammar in dbaccess/db.go.

* [NOD-805] Wrap a failed database corruption recovery error.

* [NOD-805] Split lines with WithStack in them.

* [NOD-805] Fix the comment over initialize.

* [NOD-805] Rename ffdb to flatFileDB and ldb to levelDB.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a comment.

* [NOD-805] Use s.writeCursor instead of cursor.

* [NOD-805] Embed file in lockableFile.

* [NOD-805] the the -> the

* [NOD-805] openDB -> db

* [NOD-805] Use TxContext in all flushToDB functions.

* [NOD-805] Rename context -> dbContext.

* [NOD-805] Reword the comment at the beginning on initDAGState.

* [NOD-805] Explain cursor key trimming.

* [NOD-805] Remove Error from Cursor.

* [NOD-805] Return ErrNotFound from done Cursor Key and Value.

* [NOD-805] Add missing error handling.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a variable name.

* [NOD-805] Remove pointless underscore.

* [NOD-805] Fix a comment.

* [NOD-805] Fix a variable name.

Co-authored-by: Mike Zak <feanorr@gmail.com>
Co-authored-by: Ori Newman <orinewman1@gmail.com>
2020-04-02 13:56:32 +03:00
Ori Newman
b20a7a679b [NOD-874] If the node is not current call sm.restartSyncIfNeeded() on handleInvMsg (#684)
* [NOD-874] If the node is not current call sm.restartSyncIfNeeded() on handleInvMsg

* [NOD-874] Check haveUnknownInvBlock before restartSyncIfNeeded

* [NOD-874] Fix comment

* [NOD-874] Fix comment

* [NOD-874] Fix comment
2020-04-01 12:56:10 +03:00
Ori Newman
36d866375e [NOD-881] Don't recalculate subtreesize for children (#678)
* [NOD-881] Don't recalculate subtreesize for children

* [NOD-881] Make BenchmarkReindexInterval clearer

* [NOD-881] Use b.ResetTimer

* [NOD-881] Fix BenchmarkReindexInterval to use b.N
2020-03-31 12:43:02 +03:00
Svarog
024edc30a3 [NOD-857] Add generalized profiler package and use it everwhere (#679)
* [NOD-857] Add generalized profiler package and use it everwhere

* [NOD-857] Dependency-inject log into profiling.Start()
2020-03-31 12:41:21 +03:00
Ori Newman
6aa5e0b5a8 [NOD-882] Remove ecc and hdkeychain (#680)
* [NOD-882] Remove ecc and hdkeychain

* [NOD-882] Remove HDCoinType from dagParams
2020-03-31 10:58:11 +03:00
Mike Zak
1a38550fdd Update to version 0.3.0 2020-03-29 14:15:17 +03:00
stasatdaglabs
3e7ebb5a84 [NOD-861] Get rid of dbtool/fetchblockregion.go. (#667) 2020-03-29 12:47:13 +03:00
Svarog
4bca7342d3 [NOD-883] Fix dockerfile in kaspaminer + set real version for go-libsecp256k1 (#673) 2020-03-26 17:50:09 +02:00
Elichai Turkel
f80908fb4e [NOD-876] Replace ecc with go-secp256k1 for public keys (#670)
* Replace ecc with go-secp256k1 in txscript

* Replace ecc with go-secp256k1 in util and cmd

* Replace ecc.Multiset with secp256k1.MultiSet
2020-03-26 17:03:39 +02:00
stasatdaglabs
e000e10738 [NOD-880] Remove CGO_ENABLED=0 from Dockerfile. (#671) 2020-03-26 14:02:57 +02:00
Ori Newman
d83862f36c [NOD-855] Save ECMH for block utxo and not diff utxo (#669)
* [NOD-855] Save ECMH for each block UTXO

* [NOD-855] Remove UpdateExtraNonce method

* [NOD-855] Remove multiset data from UTXO diffs

* [NOD-855] Fix to fetch multiset of selected parent

* [NOD-855] Don't remove coinbase inputs from multiset

* [NOD-855] Create multisetBucketName on startup

* [NOD-855] Remove multiset from UTXO diff tests

* [NOD-855] clear new entries from multisetstore on saveChangesFromBlock

* [NOD-855] Fix tests

* [NOD-855] Use UnacceptedBlueScore when adding current block transactions to multiset

* [NOD-855] Hash utxo before adding it to multiset

* [NOD-855] Pass isCoinbase to NewUTXOEntry

* [NOD-855] Do not use hash when adding entries to multiset

* [NOD-855] When calculating multiset, replace the unaccepted blue score of selected parent transaction with the block blue score

* [NOD-855] Manually add a chained transaction to a block in TestChainedTransactions

* [NOD-855] Change name and comments

* [NOD-855] Use FindAcceptanceData to find a specific block acceptance data

* [NOD-855] Remove redundant copy of txIn.PreviousOutpoint

* [NOD-855] Use fmt.Sprintf when creating internalRPCError
2020-03-26 13:06:12 +02:00
Svarog
1020402b34 [NOD-869] Close panicHandlerDone instead of sending an empty struct + use time.After instead of time.Tick (#668) 2020-03-25 16:14:08 +02:00
Mike Zak
bc6ce6ed53 Update version to v0.2.0 2020-03-25 11:51:14 +02:00
Ori Newman
d3b1953deb [NOD-848] optimize utxo diffs serialize allocations (#666)
* [NOD-848] Optimize allocations when serializing UTXO diffs

* [NOD-848] Use same UTXO serialization everywhere, and use compression as well

* [NOD-848] Fix usage of wrong buffer

* [NOD-848] Fix tests

* [NOD-848] Fix wire tests

* [NOD-848] Fix tests

* [NOD-848] Remove VLQ

* [NOD-848] Fix comments

* [NOD-848] Add varint for big endian encoding

* [NOD-848] In TestVarIntWire, assume the expected decoded value is the same as the serialization input

* [NOD-848] Serialize outpoint index with big endian varint

* [NOD-848] Remove p2pk from compression support

* [NOD-848] Fix comments

* [NOD-848] Remove p2pk from decompression support

* [NOD-848] Make entry compression optional

* [NOD-848] Fix tests

* [NOD-848] Fix comments and var names

* [NOD-848] Remove UTXO compression

* [NOD-848] Fix tests

* [NOD-848] Remove big endian varint

* [NOD-848] Fix comments

* [NOD-848] Rename ReadVarIntLittleEndian->ReadVarInt and fix WriteVarInt comment

* [NOD-848] Add outpointIndexByteOrder variable

* [NOD-848] Remove redundant comment

* [NOD-848] Fix outpointMaxSerializeSize to the correct value

* [NOD-848] Move subBuffer to utils
2020-03-24 16:44:41 +02:00
Svarog
3c67215e76 [NOD-796] Upgrade to go 1.14 (#665) 2020-03-22 14:50:13 +02:00
Svarog
586624c836 [NOD-853] Add profiler server to kaspaminer (#664) 2020-03-19 17:19:31 +02:00
Svarog
49855e6333 [NOD-823] Use WithDiffInPlace for the implementation of WithDiff (#657)
* [NOD-823] Use WithDiffInPlace for the implementation of WithDiff

* [NOD-823] Unexport withDiffInPlace
2020-03-17 11:19:02 +02:00
Ori Newman
624249c0f3 [NOD-842] Use flushToDB with the same transaction as everything else in saveChangesFromBlock and never ignore flushToDB errors (#662) 2020-03-16 11:05:17 +02:00
Ori Newman
1cf443a63b [NOD-841] Fix tests to not be dependent on block rate (#661)
* [NOD-841] Fix TestDifficulty

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Fix TestCheckBlockSanity

* [NOD-841] Fix TestProcessDelayedBlocks

* [NOD-841] Shorten long lines
2020-03-15 18:08:03 +02:00
Ori Newman
8909679f44 [NOD-818] Remove time adjustment (#658)
* [NOD-818] Remove time adjustment

* [NOD-818] Remove interface ensuring and copyright message

* [NOD-818] Update comment
2020-03-15 17:37:01 +02:00
Ori Newman
e58efbf0ea [NOD-839] Panic from non-rule error from ProcessBlock (#660) 2020-03-15 17:26:53 +02:00
239 changed files with 5210 additions and 26319 deletions

View File

@@ -6,7 +6,7 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
)
@@ -16,7 +16,17 @@ func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error
newNode, _ := dag.newBlockNode(blockHeader, newBlockSet())
newNode.status = statusInvalidAncestor
dag.index.AddNode(newNode)
return dag.index.flushToDB()
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
err = dag.index.flushToDB(dbTx)
if err != nil {
return err
}
return dbTx.Commit()
}
// maybeAcceptBlock potentially accepts a block into the block DAG. It
@@ -62,13 +72,26 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
// expensive connection logic. It also has some other nice properties
// such as making blocks that never become part of the DAG or
// blocks that fail to connect available for further analysis.
err = dag.db.Update(func(dbTx database.Tx) error {
err := dbStoreBlock(dbTx, block)
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
blockExists, err := dbaccess.HasBlock(dbTx, block.Hash())
if err != nil {
return err
}
if !blockExists {
err := storeBlock(dbTx, block)
if err != nil {
return err
}
return dag.index.flushToDBWithTx(dbTx)
})
}
err = dag.index.flushToDB(dbTx)
if err != nil {
return err
}
err = dbTx.Commit()
if err != nil {
return err
}

View File

@@ -10,7 +10,7 @@ import (
func TestMaybeAcceptBlockErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", Config{
dag, teardownFunc, err := DAGSetup("TestMaybeAcceptBlockErrors", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {

View File

@@ -10,7 +10,7 @@ import (
// TestBlockHeap tests pushing, popping, and determining the length of the heap.
func TestBlockHeap(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestBlockHeap", Config{
dag, teardownFunc, err := DAGSetup("TestBlockHeap", true, Config{
DAGParams: &dagconfig.MainnetParams,
})
if err != nil {

View File

@@ -1,136 +0,0 @@
package blockdag
import (
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
)
var (
// idByHashIndexBucketName is the name of the db bucket used to house
// the block hash -> block id index.
idByHashIndexBucketName = []byte("idbyhashidx")
// hashByIDIndexBucketName is the name of the db bucket used to house
// the block id -> block hash index.
hashByIDIndexBucketName = []byte("hashbyididx")
currentBlockIDKey = []byte("currentblockid")
)
// -----------------------------------------------------------------------------
// This is a mapping between block hashes and unique IDs. The ID
// is simply a sequentially incremented uint64 that is used instead of block hash
// for the indexers. This is useful because it is only 8 bytes versus 32 bytes
// hashes and thus saves a ton of space when a block is referenced in an index.
// It consists of three buckets: the first bucket maps the hash of each
// block to the unique ID and the second maps that ID back to the block hash.
// The third bucket contains the last received block ID, and is used
// when starting the node to check that the enabled indexes are up to date
// with the latest received block, and if not, initiate recovery process.
//
// The serialized format for keys and values in the block hash to ID bucket is:
// <hash> = <ID>
//
// Field Type Size
// hash daghash.Hash 32 bytes
// ID uint64 8 bytes
// -----
// Total: 40 bytes
//
// The serialized format for keys and values in the ID to block hash bucket is:
// <ID> = <hash>
//
// Field Type Size
// ID uint64 8 bytes
// hash daghash.Hash 32 bytes
// -----
// Total: 40 bytes
//
// -----------------------------------------------------------------------------
const blockIDSize = 8 // 8 bytes for block ID
// DBFetchBlockIDByHash uses an existing database transaction to retrieve the
// block id for the provided hash from the index.
func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error) {
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
serializedID := hashIndex.Get(hash[:])
if serializedID == nil {
return 0, errors.Errorf("no entry in the block ID index for block with hash %s", hash)
}
return DeserializeBlockID(serializedID), nil
}
// DBFetchBlockHashBySerializedID uses an existing database transaction to
// retrieve the hash for the provided serialized block id from the index.
func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*daghash.Hash, error) {
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
hashBytes := idIndex.Get(serializedID)
if hashBytes == nil {
return nil, errors.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
}
var hash daghash.Hash
copy(hash[:], hashBytes)
return &hash, nil
}
// dbPutBlockIDIndexEntry uses an existing database transaction to update or add
// the index entries for the hash to id and id to hash mappings for the provided
// values.
func dbPutBlockIDIndexEntry(dbTx database.Tx, hash *daghash.Hash, serializedID []byte) error {
// Add the block hash to ID mapping to the index.
meta := dbTx.Metadata()
hashIndex := meta.Bucket(idByHashIndexBucketName)
if err := hashIndex.Put(hash[:], serializedID[:]); err != nil {
return err
}
// Add the block ID to hash mapping to the index.
idIndex := meta.Bucket(hashByIDIndexBucketName)
return idIndex.Put(serializedID[:], hash[:])
}
// DBFetchCurrentBlockID returns the last known block ID.
func DBFetchCurrentBlockID(dbTx database.Tx) uint64 {
serializedID := dbTx.Metadata().Get(currentBlockIDKey)
if serializedID == nil {
return 0
}
return DeserializeBlockID(serializedID)
}
// DeserializeBlockID returns a deserialized block id
func DeserializeBlockID(serializedID []byte) uint64 {
return byteOrder.Uint64(serializedID)
}
// SerializeBlockID returns a serialized block id
func SerializeBlockID(blockID uint64) []byte {
serializedBlockID := make([]byte, blockIDSize)
byteOrder.PutUint64(serializedBlockID, blockID)
return serializedBlockID
}
// DBFetchBlockHashByID uses an existing database transaction to retrieve the
// hash for the provided block id from the index.
func DBFetchBlockHashByID(dbTx database.Tx, id uint64) (*daghash.Hash, error) {
return DBFetchBlockHashBySerializedID(dbTx, SerializeBlockID(id))
}
func createBlockID(dbTx database.Tx, blockHash *daghash.Hash) (uint64, error) {
currentBlockID := DBFetchCurrentBlockID(dbTx)
newBlockID := currentBlockID + 1
serializedNewBlockID := SerializeBlockID(newBlockID)
err := dbTx.Metadata().Put(currentBlockIDKey, serializedNewBlockID)
if err != nil {
return 0, err
}
err = dbPutBlockIDIndexEntry(dbTx, blockHash, serializedNewBlockID)
if err != nil {
return 0, err
}
return newBlockID, nil
}

View File

@@ -5,10 +5,10 @@
package blockdag
import (
"github.com/kaspanet/kaspad/dbaccess"
"sync"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
@@ -18,7 +18,6 @@ type blockIndex struct {
// The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a
// separate mutex.
db database.DB
dagParams *dagconfig.Params
sync.RWMutex
@@ -29,9 +28,8 @@ type blockIndex struct {
// newBlockIndex returns a new empty instance of a block index. The index will
// be dynamically populated as block nodes are loaded from the database and
// manually added.
func newBlockIndex(db database.DB, dagParams *dagconfig.Params) *blockIndex {
func newBlockIndex(dagParams *dagconfig.Params) *blockIndex {
return &blockIndex{
db: db,
dagParams: dagParams,
index: make(map[daghash.Hash]*blockNode),
dirty: make(map[*blockNode]struct{}),
@@ -111,17 +109,8 @@ func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) {
bi.dirty[node] = struct{}{}
}
// flushToDB writes all dirty block nodes to the database. If all writes
// succeed, this clears the dirty set.
func (bi *blockIndex) flushToDB() error {
return bi.db.Update(func(dbTx database.Tx) error {
return bi.flushToDBWithTx(dbTx)
})
}
// flushToDBWithTx writes all dirty block nodes to the database. If all
// writes succeed, this clears the dirty set.
func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
// flushToDB writes all dirty block nodes to the database.
func (bi *blockIndex) flushToDB(dbContext *dbaccess.TxContext) error {
bi.Lock()
defer bi.Unlock()
if len(bi.dirty) == 0 {
@@ -129,7 +118,12 @@ func (bi *blockIndex) flushToDBWithTx(dbTx database.Tx) error {
}
for node := range bi.dirty {
err := dbStoreBlockNode(dbTx, node)
serializedBlockNode, err := serializeBlockNode(node)
if err != nil {
return err
}
key := blockIndexKey(node.hash, node.blueScore)
err = dbaccess.StoreIndexBlock(dbContext, key, serializedBlockNode)
if err != nil {
return err
}

View File

@@ -10,7 +10,7 @@ import (
func TestAncestorErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", Config{
dag, teardownFunc, err := DAGSetup("TestAncestorErrors", true, Config{
DAGParams: &params,
})
if err != nil {

View File

@@ -110,7 +110,7 @@ func (dag *BlockDAG) newBlockNode(blockHeader *wire.BlockHeader, parents blockSe
parents: parents,
children: make(blockSet),
blueScore: math.MaxUint64, // Initialized to the max value to avoid collisions with the genesis block
timestamp: dag.AdjustedTime().Unix(),
timestamp: dag.Now().Unix(),
bluesAnticoneSizes: make(map[*blockNode]dagconfig.KType),
}

View File

@@ -9,7 +9,7 @@ import (
// This test is to ensure the size BlueAnticoneSizesSize is serialized to the size of KType.
// We verify that by serializing and deserializing the block while making sure that we stay within the expected range.
func TestBlueAnticoneSizesSize(t *testing.T) {
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", Config{
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {

View File

@@ -12,7 +12,7 @@ import (
func TestBlueBlockWindow(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", Config{
dag, teardownFunc, err := DAGSetup("TestBlueBlockWindow", true, Config{
DAGParams: &params,
})
if err != nil {

View File

@@ -4,12 +4,12 @@ import (
"bufio"
"bytes"
"encoding/binary"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/txsort"
@@ -73,55 +73,24 @@ func (cfr *compactFeeIterator) next() (uint64, error) {
}
// The following functions relate to storing and retrieving fee data from the database
var feeBucket = []byte("fees")
// getBluesFeeData returns the compactFeeData for all nodes's blues,
// used to calculate the fees this blockNode needs to pay
func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactFeeData, error) {
bluesFeeData := make(map[daghash.Hash]compactFeeData)
err := dag.db.View(func(dbTx database.Tx) error {
for _, blueBlock := range node.blues {
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
if err != nil {
return errors.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
}
bluesFeeData[*blueBlock.hash] = feeData
for _, blueBlock := range node.blues {
feeData, err := dbaccess.FetchFeeData(dbaccess.NoTx(), blueBlock.hash)
if err != nil {
return nil, err
}
return nil
})
if err != nil {
return nil, err
bluesFeeData[*blueBlock.hash] = feeData
}
return bluesFeeData, nil
}
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
if err != nil {
return errors.Errorf("Error creating or retrieving fee bucket: %s", err)
}
return feeBucket.Put(blockHash.CloneBytes(), feeData)
}
func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData, error) {
feeBucket := dbTx.Metadata().Bucket(feeBucket)
if feeBucket == nil {
return nil, errors.New("Fee bucket does not exist")
}
feeData := feeBucket.Get(blockHash.CloneBytes())
if feeData == nil {
return nil, errors.Errorf("No fee data found for block %s", blockHash)
}
return feeData, nil
}
// The following functions deal with building and validating the coinbase transaction
func (node *blockNode) validateCoinbaseTransaction(dag *BlockDAG, block *util.Block, txsAcceptanceData MultiBlockTxsAcceptanceData) error {

View File

@@ -17,7 +17,6 @@ import (
"time"
"github.com/kaspanet/kaspad/dagconfig"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
@@ -73,15 +72,8 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
return nil, err
}
// Serialized utxo entry.
serialized := make([]byte, numBytes)
_, err = io.ReadAtLeast(r, serialized, int(numBytes))
if err != nil {
return nil, err
}
// Deserialize it and add it to the view.
entry, err := deserializeUTXOEntry(serialized)
// Deserialize the UTXO entry and add it to the UTXO set.
entry, err := deserializeUTXOEntry(r)
if err != nil {
return nil, err
}
@@ -102,11 +94,11 @@ func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) {
// it is not usable with all functions and the tests must take care when making
// use of it.
func newTestDAG(params *dagconfig.Params) *BlockDAG {
index := newBlockIndex(nil, params)
index := newBlockIndex(params)
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
dag := &BlockDAG{
dagParams: params,
timeSource: NewMedianTime(),
timeSource: NewTimeSource(),
targetTimePerBlock: targetTimePerBlock,
difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize,
TimestampDeviationTolerance: params.TimestampDeviationTolerance,
@@ -211,3 +203,15 @@ func nodeByMsgBlock(t *testing.T, dag *BlockDAG, block *wire.MsgBlock) *blockNod
}
return node
}
type fakeTimeSource struct {
time time.Time
}
func (fts *fakeTimeSource) Now() time.Time {
return time.Unix(fts.time.Unix(), 0)
}
func newFakeTimeSource(fakeTime time.Time) TimeSource {
return &fakeTimeSource{time: fakeTime}
}

View File

@@ -1,584 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/txscript"
)
// -----------------------------------------------------------------------------
// A variable length quantity (VLQ) is an encoding that uses an arbitrary number
// of binary octets to represent an arbitrarily large integer. The scheme
// employs a most significant byte (MSB) base-128 encoding where the high bit in
// each byte indicates whether or not the byte is the final one. In addition,
// to ensure there are no redundant encodings, an offset is subtracted every
// time a group of 7 bits is shifted out. Therefore each integer can be
// represented in exactly one way, and each representation stands for exactly
// one integer.
//
// Another nice property of this encoding is that it provides a compact
// representation of values that are typically used to indicate sizes. For
// example, the values 0 - 127 are represented with a single byte, 128 - 16511
// with two bytes, and 16512 - 2113663 with three bytes.
//
// While the encoding allows arbitrarily large integers, it is artificially
// limited in this code to an unsigned 64-bit integer for efficiency purposes.
//
// Example encodings:
// 0 -> [0x00]
// 127 -> [0x7f] * Max 1-byte value
// 128 -> [0x80 0x00]
// 129 -> [0x80 0x01]
// 255 -> [0x80 0x7f]
// 256 -> [0x81 0x00]
// 16511 -> [0xff 0x7f] * Max 2-byte value
// 16512 -> [0x80 0x80 0x00]
// 32895 -> [0x80 0xff 0x7f]
// 2113663 -> [0xff 0xff 0x7f] * Max 3-byte value
// 270549119 -> [0xff 0xff 0xff 0x7f] * Max 4-byte value
// 2^64-1 -> [0x80 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0xfe 0x7f]
//
// References:
// https://en.wikipedia.org/wiki/Variable-length_quantity
// http://www.codecodex.com/wiki/Variable-Length_Integers
// -----------------------------------------------------------------------------
// serializeSizeVLQ returns the number of bytes it would take to serialize the
// passed number as a variable-length quantity according to the format described
// above.
func serializeSizeVLQ(n uint64) int {
size := 1
for ; n > 0x7f; n = (n >> 7) - 1 {
size++
}
return size
}
// putVLQ serializes the provided number to a variable-length quantity according
// to the format described above and returns the number of bytes of the encoded
// value. The result is placed directly into the passed byte slice which must
// be at least large enough to handle the number of bytes returned by the
// serializeSizeVLQ function or it will panic.
func putVLQ(target []byte, n uint64) int {
offset := 0
for ; ; offset++ {
// The high bit is set when another byte follows.
highBitMask := byte(0x80)
if offset == 0 {
highBitMask = 0x00
}
target[offset] = byte(n&0x7f) | highBitMask
if n <= 0x7f {
break
}
n = (n >> 7) - 1
}
// Reverse the bytes so it is MSB-encoded.
for i, j := 0, offset; i < j; i, j = i+1, j-1 {
target[i], target[j] = target[j], target[i]
}
return offset + 1
}
// deserializeVLQ deserializes the provided variable-length quantity according
// to the format described above. It also returns the number of bytes
// deserialized.
func deserializeVLQ(serialized []byte) (uint64, int) {
var n uint64
var size int
for _, val := range serialized {
size++
n = (n << 7) | uint64(val&0x7f)
if val&0x80 != 0x80 {
break
}
n++
}
return n, size
}
// -----------------------------------------------------------------------------
// In order to reduce the size of stored scripts, a domain specific compression
// algorithm is used which recognizes standard scripts and stores them using
// less bytes than the original script.
//
// The general serialized format is:
//
// <script size or type><script data>
//
// Field Type Size
// script size or type VLQ variable
// script data []byte variable
//
// The specific serialized format for each recognized standard script is:
//
// - Pay-to-pubkey-hash: (21 bytes) - <0><20-byte pubkey hash>
// - Pay-to-script-hash: (21 bytes) - <1><20-byte script hash>
// - Pay-to-pubkey**: (33 bytes) - <2, 3, 4, or 5><32-byte pubkey X value>
// 2, 3 = compressed pubkey with bit 0 specifying the y coordinate to use
// 4, 5 = uncompressed pubkey with bit 0 specifying the y coordinate to use
// ** Only valid public keys starting with 0x02, 0x03, and 0x04 are supported.
//
// Any scripts which are not recognized as one of the aforementioned standard
// scripts are encoded using the general serialized format and encode the script
// size as the sum of the actual size of the script and the number of special
// cases.
// -----------------------------------------------------------------------------
// The following constants specify the special constants used to identify a
// special script type in the domain-specific compressed script encoding.
//
// NOTE: This section specifically does not use iota since these values are
// serialized and must be stable for long-term storage.
const (
// cstPayToPubKeyHash identifies a compressed pay-to-pubkey-hash script.
cstPayToPubKeyHash = 0
// cstPayToScriptHash identifies a compressed pay-to-script-hash script.
cstPayToScriptHash = 1
// cstPayToPubKeyComp2 identifies a compressed pay-to-pubkey script to
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyComp2 = 2
// cstPayToPubKeyComp3 identifies a compressed pay-to-pubkey script to
// a compressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyComp3 = 3
// cstPayToPubKeyUncomp4 identifies a compressed pay-to-pubkey script to
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyUncomp4 = 4
// cstPayToPubKeyUncomp5 identifies a compressed pay-to-pubkey script to
// an uncompressed pubkey. Bit 0 specifies which y-coordinate to use
// to reconstruct the full uncompressed pubkey.
cstPayToPubKeyUncomp5 = 5
// numSpecialScripts is the number of special scripts recognized by the
// domain-specific script compression algorithm.
numSpecialScripts = 6
)
// isPubKeyHash returns whether or not the passed public key script is a
// standard pay-to-pubkey-hash script along with the pubkey hash it is paying to
// if it is.
func isPubKeyHash(script []byte) (bool, []byte) {
if len(script) == 25 && script[0] == txscript.OpDup &&
script[1] == txscript.OpHash160 &&
script[2] == txscript.OpData20 &&
script[23] == txscript.OpEqualVerify &&
script[24] == txscript.OpCheckSig {
return true, script[3:23]
}
return false, nil
}
// isScriptHash returns whether or not the passed public key script is a
// standard pay-to-script-hash script along with the script hash it is paying to
// if it is.
func isScriptHash(script []byte) (bool, []byte) {
if len(script) == 23 && script[0] == txscript.OpHash160 &&
script[1] == txscript.OpData20 &&
script[22] == txscript.OpEqual {
return true, script[2:22]
}
return false, nil
}
// isPubKey returns whether or not the passed public key script is a standard
// pay-to-pubkey script that pays to a valid compressed or uncompressed public
// key along with the serialized pubkey it is paying to if it is.
//
// NOTE: This function ensures the public key is actually valid since the
// compression algorithm requires valid pubkeys. It does not support hybrid
// pubkeys. This means that even if the script has the correct form for a
// pay-to-pubkey script, this function will only return true when it is paying
// to a valid compressed or uncompressed pubkey.
func isPubKey(script []byte) (bool, []byte) {
// Pay-to-compressed-pubkey script.
if len(script) == 35 && script[0] == txscript.OpData33 &&
script[34] == txscript.OpCheckSig && (script[1] == 0x02 ||
script[1] == 0x03) {
// Ensure the public key is valid.
serializedPubKey := script[1:34]
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
if err == nil {
return true, serializedPubKey
}
}
// Pay-to-uncompressed-pubkey script.
if len(script) == 67 && script[0] == txscript.OpData65 &&
script[66] == txscript.OpCheckSig && script[1] == 0x04 {
// Ensure the public key is valid.
serializedPubKey := script[1:66]
_, err := ecc.ParsePubKey(serializedPubKey, ecc.S256())
if err == nil {
return true, serializedPubKey
}
}
return false, nil
}
// compressedScriptSize returns the number of bytes the passed script would take
// when encoded with the domain specific compression algorithm described above.
func compressedScriptSize(scriptPubKey []byte) int {
// Pay-to-pubkey-hash script.
if valid, _ := isPubKeyHash(scriptPubKey); valid {
return 21
}
// Pay-to-script-hash script.
if valid, _ := isScriptHash(scriptPubKey); valid {
return 21
}
// Pay-to-pubkey (compressed or uncompressed) script.
if valid, _ := isPubKey(scriptPubKey); valid {
return 33
}
// When none of the above special cases apply, encode the script as is
// preceded by the sum of its size and the number of special cases
// encoded as a variable length quantity.
return serializeSizeVLQ(uint64(len(scriptPubKey)+numSpecialScripts)) +
len(scriptPubKey)
}
// decodeCompressedScriptSize treats the passed serialized bytes as a compressed
// script, possibly followed by other data, and returns the number of bytes it
// occupies taking into account the special encoding of the script size by the
// domain specific compression algorithm described above.
func decodeCompressedScriptSize(serialized []byte) int {
scriptSize, bytesRead := deserializeVLQ(serialized)
if bytesRead == 0 {
return 0
}
switch scriptSize {
case cstPayToPubKeyHash:
return 21
case cstPayToScriptHash:
return 21
case cstPayToPubKeyComp2, cstPayToPubKeyComp3, cstPayToPubKeyUncomp4,
cstPayToPubKeyUncomp5:
return 33
}
scriptSize -= numSpecialScripts
scriptSize += uint64(bytesRead)
return int(scriptSize)
}
// putCompressedScript compresses the passed script according to the domain
// specific compression algorithm described above directly into the passed
// target byte slice. The target byte slice must be at least large enough to
// handle the number of bytes returned by the compressedScriptSize function or
// it will panic.
func putCompressedScript(target, scriptPubKey []byte) int {
// Pay-to-pubkey-hash script.
if valid, hash := isPubKeyHash(scriptPubKey); valid {
target[0] = cstPayToPubKeyHash
copy(target[1:21], hash)
return 21
}
// Pay-to-script-hash script.
if valid, hash := isScriptHash(scriptPubKey); valid {
target[0] = cstPayToScriptHash
copy(target[1:21], hash)
return 21
}
// Pay-to-pubkey (compressed or uncompressed) script.
if valid, serializedPubKey := isPubKey(scriptPubKey); valid {
pubKeyFormat := serializedPubKey[0]
switch pubKeyFormat {
case 0x02, 0x03:
target[0] = pubKeyFormat
copy(target[1:33], serializedPubKey[1:33])
return 33
case 0x04:
// Encode the oddness of the serialized pubkey into the
// compressed script type.
target[0] = pubKeyFormat | (serializedPubKey[64] & 0x01)
copy(target[1:33], serializedPubKey[1:33])
return 33
}
}
// When none of the above special cases apply, encode the unmodified
// script preceded by the sum of its size and the number of special
// cases encoded as a variable length quantity.
encodedSize := uint64(len(scriptPubKey) + numSpecialScripts)
vlqSizeLen := putVLQ(target, encodedSize)
copy(target[vlqSizeLen:], scriptPubKey)
return vlqSizeLen + len(scriptPubKey)
}
// decompressScript returns the original script obtained by decompressing the
// passed compressed script according to the domain specific compression
// algorithm described above.
//
// NOTE: The script parameter must already have been proven to be long enough
// to contain the number of bytes returned by decodeCompressedScriptSize or it
// will panic. This is acceptable since it is only an internal function.
func decompressScript(compressedScriptPubKey []byte) []byte {
// In practice this function will not be called with a zero-length or
// nil script since the nil script encoding includes the length, however
// the code below assumes the length exists, so just return nil now if
// the function ever ends up being called with a nil script in the
// future.
if len(compressedScriptPubKey) == 0 {
return nil
}
// Decode the script size and examine it for the special cases.
encodedScriptSize, bytesRead := deserializeVLQ(compressedScriptPubKey)
switch encodedScriptSize {
// Pay-to-pubkey-hash script. The resulting script is:
// <OP_DUP><OP_HASH160><20 byte hash><OP_EQUALVERIFY><OP_CHECKSIG>
case cstPayToPubKeyHash:
scriptPubKey := make([]byte, 25)
scriptPubKey[0] = txscript.OpDup
scriptPubKey[1] = txscript.OpHash160
scriptPubKey[2] = txscript.OpData20
copy(scriptPubKey[3:], compressedScriptPubKey[bytesRead:bytesRead+20])
scriptPubKey[23] = txscript.OpEqualVerify
scriptPubKey[24] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-script-hash script. The resulting script is:
// <OP_HASH160><20 byte script hash><OP_EQUAL>
case cstPayToScriptHash:
scriptPubKey := make([]byte, 23)
scriptPubKey[0] = txscript.OpHash160
scriptPubKey[1] = txscript.OpData20
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+20])
scriptPubKey[22] = txscript.OpEqual
return scriptPubKey
// Pay-to-compressed-pubkey script. The resulting script is:
// <OP_DATA_33><33 byte compressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyComp2, cstPayToPubKeyComp3:
scriptPubKey := make([]byte, 35)
scriptPubKey[0] = txscript.OpData33
scriptPubKey[1] = byte(encodedScriptSize)
copy(scriptPubKey[2:], compressedScriptPubKey[bytesRead:bytesRead+32])
scriptPubKey[34] = txscript.OpCheckSig
return scriptPubKey
// Pay-to-uncompressed-pubkey script. The resulting script is:
// <OP_DATA_65><65 byte uncompressed pubkey><OP_CHECKSIG>
case cstPayToPubKeyUncomp4, cstPayToPubKeyUncomp5:
// Change the leading byte to the appropriate compressed pubkey
// identifier (0x02 or 0x03) so it can be decoded as a
// compressed pubkey. This really should never fail since the
// encoding ensures it is valid before compressing to this type.
compressedKey := make([]byte, 33)
compressedKey[0] = byte(encodedScriptSize - 2)
copy(compressedKey[1:], compressedScriptPubKey[1:])
key, err := ecc.ParsePubKey(compressedKey, ecc.S256())
if err != nil {
return nil
}
scriptPubKey := make([]byte, 67)
scriptPubKey[0] = txscript.OpData65
copy(scriptPubKey[1:], key.SerializeUncompressed())
scriptPubKey[66] = txscript.OpCheckSig
return scriptPubKey
}
// When none of the special cases apply, the script was encoded using
// the general format, so reduce the script size by the number of
// special cases and return the unmodified script.
scriptSize := int(encodedScriptSize - numSpecialScripts)
scriptPubKey := make([]byte, scriptSize)
copy(scriptPubKey, compressedScriptPubKey[bytesRead:bytesRead+scriptSize])
return scriptPubKey
}
// -----------------------------------------------------------------------------
// In order to reduce the size of stored amounts, a domain specific compression
// algorithm is used which relies on there typically being a lot of zeroes at
// end of the amounts.
//
// While this is simply exchanging one uint64 for another, the resulting value
// for typical amounts has a much smaller magnitude which results in fewer bytes
// when encoded as variable length quantity. For example, consider the amount
// of 0.1 KAS which is 10000000 sompi. Encoding 10000000 as a VLQ would take
// 4 bytes while encoding the compressed value of 8 as a VLQ only takes 1 byte.
//
// Essentially the compression is achieved by splitting the value into an
// exponent in the range [0-9] and a digit in the range [1-9], when possible,
// and encoding them in a way that can be decoded. More specifically, the
// encoding is as follows:
// - 0 is 0
// - Find the exponent, e, as the largest power of 10 that evenly divides the
// value up to a maximum of 9
// - When e < 9, the final digit can't be 0 so store it as d and remove it by
// dividing the value by 10 (call the result n). The encoded value is thus:
// 1 + 10*(9*n + d-1) + e
// - When e==9, the only thing known is the amount is not 0. The encoded value
// is thus:
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
//
// Example encodings:
// (The numbers in parenthesis are the number of bytes when serialized as a VLQ)
// 0 (1) -> 0 (1) * 0.00000000 KAS
// 1000 (2) -> 4 (1) * 0.00001000 KAS
// 10000 (2) -> 5 (1) * 0.00010000 KAS
// 12345678 (4) -> 111111101(4) * 0.12345678 KAS
// 50000000 (4) -> 47 (1) * 0.50000000 KAS
// 100000000 (4) -> 9 (1) * 1.00000000 KAS
// 500000000 (5) -> 49 (1) * 5.00000000 KAS
// 1000000000 (5) -> 10 (1) * 10.00000000 KAS
// -----------------------------------------------------------------------------
// compressTxOutAmount compresses the passed amount according to the domain
// specific compression algorithm described above.
func compressTxOutAmount(amount uint64) uint64 {
// No need to do any work if it's zero.
if amount == 0 {
return 0
}
// Find the largest power of 10 (max of 9) that evenly divides the
// value.
exponent := uint64(0)
for amount%10 == 0 && exponent < 9 {
amount /= 10
exponent++
}
// The compressed result for exponents less than 9 is:
// 1 + 10*(9*n + d-1) + e
if exponent < 9 {
lastDigit := amount % 10
amount /= 10
return 1 + 10*(9*amount+lastDigit-1) + exponent
}
// The compressed result for an exponent of 9 is:
// 1 + 10*(n-1) + e == 10 + 10*(n-1)
return 10 + 10*(amount-1)
}
// decompressTxOutAmount returns the original amount the passed compressed
// amount represents according to the domain specific compression algorithm
// described above.
func decompressTxOutAmount(amount uint64) uint64 {
// No need to do any work if it's zero.
if amount == 0 {
return 0
}
// The decompressed amount is either of the following two equations:
// x = 1 + 10*(9*n + d - 1) + e
// x = 1 + 10*(n - 1) + 9
amount--
// The decompressed amount is now one of the following two equations:
// x = 10*(9*n + d - 1) + e
// x = 10*(n - 1) + 9
exponent := amount % 10
amount /= 10
// The decompressed amount is now one of the following two equations:
// x = 9*n + d - 1 | where e < 9
// x = n - 1 | where e = 9
n := uint64(0)
if exponent < 9 {
lastDigit := amount%9 + 1
amount /= 9
n = amount*10 + lastDigit
} else {
n = amount + 1
}
// Apply the exponent.
for ; exponent > 0; exponent-- {
n *= 10
}
return n
}
// -----------------------------------------------------------------------------
// Compressed transaction outputs consist of an amount and a public key script
// both compressed using the domain specific compression algorithms previously
// described.
//
// The serialized format is:
//
// <compressed amount><compressed script>
//
// Field Type Size
// compressed amount VLQ variable
// compressed script []byte variable
// -----------------------------------------------------------------------------
// compressedTxOutSize returns the number of bytes the passed transaction output
// fields would take when encoded with the format described above.
func compressedTxOutSize(amount uint64, scriptPubKey []byte) int {
return serializeSizeVLQ(compressTxOutAmount(amount)) +
compressedScriptSize(scriptPubKey)
}
// putCompressedTxOut compresses the passed amount and script according to their
// domain specific compression algorithms and encodes them directly into the
// passed target byte slice with the format described above. The target byte
// slice must be at least large enough to handle the number of bytes returned by
// the compressedTxOutSize function or it will panic.
func putCompressedTxOut(target []byte, amount uint64, scriptPubKey []byte) int {
offset := putVLQ(target, compressTxOutAmount(amount))
offset += putCompressedScript(target[offset:], scriptPubKey)
return offset
}
// decodeCompressedTxOut decodes the passed compressed txout, possibly followed
// by other data, into its uncompressed amount and script and returns them along
// with the number of bytes they occupied prior to decompression.
func decodeCompressedTxOut(serialized []byte) (uint64, []byte, int, error) {
// Deserialize the compressed amount and ensure there are bytes
// remaining for the compressed script.
compressedAmount, bytesRead := deserializeVLQ(serialized)
if bytesRead >= len(serialized) {
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
"data after compressed amount")
}
// Decode the compressed script size and ensure there are enough bytes
// left in the slice for it.
scriptSize := decodeCompressedScriptSize(serialized[bytesRead:])
if len(serialized[bytesRead:]) < scriptSize {
return 0, nil, bytesRead, errDeserialize("unexpected end of " +
"data after script size")
}
// Decompress and return the amount and script.
amount := decompressTxOutAmount(compressedAmount)
script := decompressScript(serialized[bytesRead : bytesRead+scriptSize])
return amount, script, bytesRead + scriptSize, nil
}

View File

@@ -1,436 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"bytes"
"encoding/hex"
"testing"
)
// hexToBytes converts the passed hex string into bytes and will panic if there
// is an error. This is only provided for the hard-coded constants so errors in
// the source code can be detected. It will only (and must only) be called with
// hard-coded values.
func hexToBytes(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
panic("invalid hex in source file: " + s)
}
return b
}
// TestVLQ ensures the variable length quantity serialization, deserialization,
// and size calculation works as expected.
func TestVLQ(t *testing.T) {
t.Parallel()
tests := []struct {
val uint64
serialized []byte
}{
{0, hexToBytes("00")},
{1, hexToBytes("01")},
{127, hexToBytes("7f")},
{128, hexToBytes("8000")},
{129, hexToBytes("8001")},
{255, hexToBytes("807f")},
{256, hexToBytes("8100")},
{16383, hexToBytes("fe7f")},
{16384, hexToBytes("ff00")},
{16511, hexToBytes("ff7f")}, // Max 2-byte value
{16512, hexToBytes("808000")},
{16513, hexToBytes("808001")},
{16639, hexToBytes("80807f")},
{32895, hexToBytes("80ff7f")},
{2113663, hexToBytes("ffff7f")}, // Max 3-byte value
{2113664, hexToBytes("80808000")},
{270549119, hexToBytes("ffffff7f")}, // Max 4-byte value
{270549120, hexToBytes("8080808000")},
{2147483647, hexToBytes("86fefefe7f")},
{2147483648, hexToBytes("86fefeff00")},
{4294967295, hexToBytes("8efefefe7f")}, // Max uint32, 5 bytes
// Max uint64, 10 bytes
{18446744073709551615, hexToBytes("80fefefefefefefefe7f")},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the value is calculated properly.
gotSize := serializeSizeVLQ(test.val)
if gotSize != len(test.serialized) {
t.Errorf("serializeSizeVLQ: did not get expected size "+
"for %d - got %d, want %d", test.val, gotSize,
len(test.serialized))
continue
}
// Ensure the value serializes to the expected bytes.
gotBytes := make([]byte, gotSize)
gotBytesWritten := putVLQ(gotBytes, test.val)
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("putVLQUnchecked: did not get expected bytes "+
"for %d - got %x, want %x", test.val, gotBytes,
test.serialized)
continue
}
if gotBytesWritten != len(test.serialized) {
t.Errorf("putVLQUnchecked: did not get expected number "+
"of bytes written for %d - got %d, want %d",
test.val, gotBytesWritten, len(test.serialized))
continue
}
// Ensure the serialized bytes deserialize to the expected
// value.
gotVal, gotBytesRead := deserializeVLQ(test.serialized)
if gotVal != test.val {
t.Errorf("deserializeVLQ: did not get expected value "+
"for %x - got %d, want %d", test.serialized,
gotVal, test.val)
continue
}
if gotBytesRead != len(test.serialized) {
t.Errorf("deserializeVLQ: did not get expected number "+
"of bytes read for %d - got %d, want %d",
test.serialized, gotBytesRead,
len(test.serialized))
continue
}
}
}
// TestScriptCompression ensures the domain-specific script compression and
// decompression works as expected.
func TestScriptCompression(t *testing.T) {
t.Parallel()
tests := []struct {
name string
uncompressed []byte
compressed []byte
}{
{
name: "nil",
uncompressed: nil,
compressed: hexToBytes("06"),
},
{
name: "pay-to-pubkey-hash 1",
uncompressed: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
compressed: hexToBytes("001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
},
{
name: "pay-to-pubkey-hash 2",
uncompressed: hexToBytes("76a914e34cce70c86373273efcc54ce7d2a491bb4a0e8488ac"),
compressed: hexToBytes("00e34cce70c86373273efcc54ce7d2a491bb4a0e84"),
},
{
name: "pay-to-script-hash 1",
uncompressed: hexToBytes("a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87"),
compressed: hexToBytes("01da1745e9b549bd0bfa1a569971c77eba30cd5a4b"),
},
{
name: "pay-to-script-hash 2",
uncompressed: hexToBytes("a914f815b036d9bbbce5e9f2a00abd1bf3dc91e9551087"),
compressed: hexToBytes("01f815b036d9bbbce5e9f2a00abd1bf3dc91e95510"),
},
{
name: "pay-to-pubkey compressed 0x02",
uncompressed: hexToBytes("2102192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4ac"),
compressed: hexToBytes("02192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
{
name: "pay-to-pubkey compressed 0x03",
uncompressed: hexToBytes("2103b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65ac"),
compressed: hexToBytes("03b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e65"),
},
{
name: "pay-to-pubkey uncompressed 0x04 even",
uncompressed: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
compressed: hexToBytes("04192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
{
name: "pay-to-pubkey uncompressed 0x04 odd",
uncompressed: hexToBytes("410411db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3ac"),
compressed: hexToBytes("0511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c"),
},
{
name: "pay-to-pubkey invalid pubkey",
uncompressed: hexToBytes("3302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
compressed: hexToBytes("293302aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
},
{
name: "requires 2 size bytes - data push 200 bytes",
uncompressed: append(hexToBytes("4cc8"), bytes.Repeat([]byte{0x00}, 200)...),
// [0x80, 0x50] = 208 as a variable length quantity
// [0x4c, 0xc8] = OP_PUSHDATA1 200
compressed: append(hexToBytes("80504cc8"), bytes.Repeat([]byte{0x00}, 200)...),
},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the value is calculated properly.
gotSize := compressedScriptSize(test.uncompressed)
if gotSize != len(test.compressed) {
t.Errorf("compressedScriptSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotSize, len(test.compressed))
continue
}
// Ensure the script compresses to the expected bytes.
gotCompressed := make([]byte, gotSize)
gotBytesWritten := putCompressedScript(gotCompressed,
test.uncompressed)
if !bytes.Equal(gotCompressed, test.compressed) {
t.Errorf("putCompressedScript (%s): did not get "+
"expected bytes - got %x, want %x", test.name,
gotCompressed, test.compressed)
continue
}
if gotBytesWritten != len(test.compressed) {
t.Errorf("putCompressedScript (%s): did not get "+
"expected number of bytes written - got %d, "+
"want %d", test.name, gotBytesWritten,
len(test.compressed))
continue
}
// Ensure the compressed script size is properly decoded from
// the compressed script.
gotDecodedSize := decodeCompressedScriptSize(test.compressed)
if gotDecodedSize != len(test.compressed) {
t.Errorf("decodeCompressedScriptSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotDecodedSize, len(test.compressed))
continue
}
// Ensure the script decompresses to the expected bytes.
gotDecompressed := decompressScript(test.compressed)
if !bytes.Equal(gotDecompressed, test.uncompressed) {
t.Errorf("decompressScript (%s): did not get expected "+
"bytes - got %x, want %x", test.name,
gotDecompressed, test.uncompressed)
continue
}
}
}
// TestScriptCompressionErrors ensures calling various functions related to
// script compression with incorrect data returns the expected results.
func TestScriptCompressionErrors(t *testing.T) {
t.Parallel()
// A nil script must result in a decoded size of 0.
if gotSize := decodeCompressedScriptSize(nil); gotSize != 0 {
t.Fatalf("decodeCompressedScriptSize with nil script did not "+
"return 0 - got %d", gotSize)
}
// A nil script must result in a nil decompressed script.
if gotScript := decompressScript(nil); gotScript != nil {
t.Fatalf("decompressScript with nil script did not return nil "+
"decompressed script - got %x", gotScript)
}
// A compressed script for a pay-to-pubkey (uncompressed) that results
// in an invalid pubkey must result in a nil decompressed script.
compressedScript := hexToBytes("04012d74d0cb94344c9569c2e77901573d8d" +
"7903c3ebec3a957724895dca52c6b4")
if gotScript := decompressScript(compressedScript); gotScript != nil {
t.Fatalf("decompressScript with compressed pay-to-"+
"uncompressed-pubkey that is invalid did not return "+
"nil decompressed script - got %x", gotScript)
}
}
// TestAmountCompression ensures the domain-specific transaction output amount
// compression and decompression works as expected.
func TestAmountCompression(t *testing.T) {
t.Parallel()
tests := []struct {
name string
uncompressed uint64
compressed uint64
}{
{
name: "0 KAS",
uncompressed: 0,
compressed: 0,
},
{
name: "546 Sompi (current network dust value)",
uncompressed: 546,
compressed: 4911,
},
{
name: "0.00001 KAS (typical transaction fee)",
uncompressed: 1000,
compressed: 4,
},
{
name: "0.0001 KAS (typical transaction fee)",
uncompressed: 10000,
compressed: 5,
},
{
name: "0.12345678 KAS",
uncompressed: 12345678,
compressed: 111111101,
},
{
name: "0.5 KAS",
uncompressed: 50000000,
compressed: 48,
},
{
name: "1 KAS",
uncompressed: 100000000,
compressed: 9,
},
{
name: "5 KAS",
uncompressed: 500000000,
compressed: 49,
},
{
name: "21000000 KAS (max minted coins)",
uncompressed: 2100000000000000,
compressed: 21000000,
},
}
for _, test := range tests {
// Ensure the amount compresses to the expected value.
gotCompressed := compressTxOutAmount(test.uncompressed)
if gotCompressed != test.compressed {
t.Errorf("compressTxOutAmount (%s): did not get "+
"expected value - got %d, want %d", test.name,
gotCompressed, test.compressed)
continue
}
// Ensure the value decompresses to the expected value.
gotDecompressed := decompressTxOutAmount(test.compressed)
if gotDecompressed != test.uncompressed {
t.Errorf("decompressTxOutAmount (%s): did not get "+
"expected value - got %d, want %d", test.name,
gotDecompressed, test.uncompressed)
continue
}
}
}
// TestCompressedTxOut ensures the transaction output serialization and
// deserialization works as expected.
func TestCompressedTxOut(t *testing.T) {
t.Parallel()
tests := []struct {
name string
amount uint64
scriptPubKey []byte
compressed []byte
}{
{
name: "pay-to-pubkey-hash dust",
amount: 546,
scriptPubKey: hexToBytes("76a9141018853670f9f3b0582c5b9ee8ce93764ac32b9388ac"),
compressed: hexToBytes("a52f001018853670f9f3b0582c5b9ee8ce93764ac32b93"),
},
{
name: "pay-to-pubkey uncompressed 1 KAS",
amount: 100000000,
scriptPubKey: hexToBytes("4104192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b40d45264838c0bd96852662ce6a847b197376830160c6d2eb5e6a4c44d33f453eac"),
compressed: hexToBytes("0904192d74d0cb94344c9569c2e77901573d8d7903c3ebec3a957724895dca52c6b4"),
},
}
for _, test := range tests {
// Ensure the function to calculate the serialized size without
// actually serializing the txout is calculated properly.
gotSize := compressedTxOutSize(test.amount, test.scriptPubKey)
if gotSize != len(test.compressed) {
t.Errorf("compressedTxOutSize (%s): did not get "+
"expected size - got %d, want %d", test.name,
gotSize, len(test.compressed))
continue
}
// Ensure the txout compresses to the expected value.
gotCompressed := make([]byte, gotSize)
gotBytesWritten := putCompressedTxOut(gotCompressed,
test.amount, test.scriptPubKey)
if !bytes.Equal(gotCompressed, test.compressed) {
t.Errorf("compressTxOut (%s): did not get expected "+
"bytes - got %x, want %x", test.name,
gotCompressed, test.compressed)
continue
}
if gotBytesWritten != len(test.compressed) {
t.Errorf("compressTxOut (%s): did not get expected "+
"number of bytes written - got %d, want %d",
test.name, gotBytesWritten,
len(test.compressed))
continue
}
// Ensure the serialized bytes are decoded back to the expected
// uncompressed values.
gotAmount, gotScript, gotBytesRead, err := decodeCompressedTxOut(
test.compressed)
if err != nil {
t.Errorf("decodeCompressedTxOut (%s): unexpected "+
"error: %v", test.name, err)
continue
}
if gotAmount != test.amount {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected amount - got %d, want %d",
test.name, gotAmount, test.amount)
continue
}
if !bytes.Equal(gotScript, test.scriptPubKey) {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected script - got %x, want %x",
test.name, gotScript, test.scriptPubKey)
continue
}
if gotBytesRead != len(test.compressed) {
t.Errorf("decodeCompressedTxOut (%s): did not get "+
"expected number of bytes read - got %d, want %d",
test.name, gotBytesRead, len(test.compressed))
continue
}
}
}
// TestTxOutCompressionErrors ensures calling various functions related to
// txout compression with incorrect data returns the expected results.
func TestTxOutCompressionErrors(t *testing.T) {
t.Parallel()
// A compressed txout with missing compressed script must error.
compressedTxOut := hexToBytes("00")
_, _, _, err := decodeCompressedTxOut(compressedTxOut)
if !isDeserializeErr(err) {
t.Fatalf("decodeCompressedTxOut with missing compressed script "+
"did not return expected error type - got %T, want "+
"errDeserialize", err)
}
// A compressed txout with short compressed script must error.
compressedTxOut = hexToBytes("0010")
_, _, _, err = decodeCompressedTxOut(compressedTxOut)
if !isDeserializeErr(err) {
t.Fatalf("decodeCompressedTxOut with short compressed script "+
"did not return expected error type - got %T, want "+
"errDeserialize", err)
}
}

View File

@@ -6,6 +6,7 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dbaccess"
"math"
"sort"
"sync"
@@ -15,8 +16,8 @@ import (
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
@@ -59,9 +60,8 @@ type BlockDAG struct {
// The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a
// separate mutex.
db database.DB
dagParams *dagconfig.Params
timeSource MedianTimeSource
timeSource TimeSource
sigCache *txscript.SigCache
indexManager IndexManager
genesis *blockNode
@@ -150,9 +150,9 @@ type BlockDAG struct {
lastFinalityPoint *blockNode
SubnetworkStore *SubnetworkStore
utxoDiffStore *utxoDiffStore
reachabilityStore *reachabilityStore
multisetStore *multisetStore
}
// IsKnownBlock returns whether or not the DAG instance has the block represented
@@ -486,25 +486,24 @@ func (dag *BlockDAG) addBlock(node *blockNode,
if err != nil {
if errors.As(err, &RuleError{}) {
dag.index.SetStatusFlags(node, statusValidateFailed)
} else {
return nil, err
}
} else {
dag.blockCount++
}
// Intentionally ignore errors writing updated node status to DB. If
// it fails to write, it's not the end of the world. If the block is
// invalid, the worst that can happen is we revalidate the block
// after a restart.
if writeErr := dag.index.flushToDB(); writeErr != nil {
log.Warnf("Error flushing block index changes to disk: %s",
writeErr)
}
// If dag.connectBlock returned a rule error, return it here after updating DB
if err != nil {
dbTx, err := dbaccess.NewTx()
if err != nil {
return nil, err
}
defer dbTx.RollbackUnlessClosed()
err = dag.index.flushToDB(dbTx)
if err != nil {
return nil, err
}
err = dbTx.Commit()
if err != nil {
return nil, err
}
}
return nil, err
}
dag.blockCount++
return chainUpdates, nil
}
@@ -571,14 +570,13 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
return nil, err
}
newBlockUTXO, txsAcceptanceData, newBlockFeeData, err := node.verifyAndBuildUTXO(dag, block.Transactions(), fastAdd)
newBlockUTXO, txsAcceptanceData, newBlockFeeData, newBlockMultiSet, err := node.verifyAndBuildUTXO(dag, block.Transactions(), fastAdd)
if err != nil {
newErrString := fmt.Sprintf("error verifying UTXO for %s: %s", node, err)
var ruleErr RuleError
if ok := errors.As(err, &ruleErr); ok {
return nil, ruleError(ruleErr.ErrorCode, newErrString)
return nil, ruleError(ruleErr.ErrorCode, fmt.Sprintf("error verifying UTXO for %s: %s", node, err))
}
return nil, errors.New(newErrString)
return nil, errors.Wrapf(err, "error verifying UTXO for %s", node)
}
err = node.validateCoinbaseTransaction(dag, block, txsAcceptanceData)
@@ -587,7 +585,7 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
}
// Apply all changes to the DAG.
virtualUTXODiff, virtualTxsAcceptanceData, chainUpdates, err := dag.applyDAGChanges(node, newBlockUTXO, selectedParentAnticone)
virtualUTXODiff, chainUpdates, err := dag.applyDAGChanges(node, newBlockUTXO, newBlockMultiSet, selectedParentAnticone)
if err != nil {
// Since all validation logic has already ran, if applyDAGChanges errors out,
// this means we have a problem in the internal structure of the DAG - a problem which is
@@ -596,7 +594,7 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
panic(err)
}
err = dag.saveChangesFromBlock(block, virtualUTXODiff, txsAcceptanceData, virtualTxsAcceptanceData, newBlockFeeData)
err = dag.saveChangesFromBlock(block, virtualUTXODiff, txsAcceptanceData, newBlockFeeData)
if err != nil {
return nil, err
}
@@ -604,77 +602,207 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
return chainUpdates, nil
}
func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UTXODiff,
txsAcceptanceData MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData,
feeData compactFeeData) error {
// Write any block status changes to DB before updating the DAG state.
err := dag.index.flushToDB()
// calcMultiset returns the multiset of the UTXO of the given block with the given transactions.
func (node *blockNode) calcMultiset(dag *BlockDAG, transactions []*util.Tx, acceptanceData MultiBlockTxsAcceptanceData, selectedParentUTXO, pastUTXO UTXOSet) (*secp256k1.MultiSet, error) {
ms, err := node.pastUTXOMultiSet(dag, acceptanceData, selectedParentUTXO)
if err != nil {
return err
return nil, err
}
// Atomically insert info into the database.
err = dag.db.Update(func(dbTx database.Tx) error {
err := dag.utxoDiffStore.flushToDB(dbTx)
for _, tx := range transactions {
ms, err = addTxToMultiset(ms, tx.MsgTx(), pastUTXO, UnacceptedBlueScore)
if err != nil {
return err
return nil, err
}
}
err = dag.reachabilityStore.flushToDB(dbTx)
if err != nil {
return err
}
return ms, nil
}
// Update best block state.
state := &dagState{
TipHashes: dag.TipHashes(),
LastFinalityPoint: dag.lastFinalityPoint.hash,
}
err = dbPutDAGState(dbTx, state)
if err != nil {
return err
}
// acceptedSelectedParentMultiset takes the multiset of the selected
// parent, replaces all the selected parent outputs' blue score with
// the block blue score and returns the result.
func (node *blockNode) acceptedSelectedParentMultiset(dag *BlockDAG,
acceptanceData MultiBlockTxsAcceptanceData) (*secp256k1.MultiSet, error) {
// Update the UTXO set using the diffSet that was melded into the
// full UTXO set.
err = dbPutUTXODiff(dbTx, virtualUTXODiff)
if err != nil {
return err
}
if node.isGenesis() {
return secp256k1.NewMultiset(), nil
}
// Scan all accepted transactions and register any subnetwork registry
// transaction. If any subnetwork registry transaction is not well-formed,
// fail the entire block.
err = registerSubnetworks(dbTx, block.Transactions())
if err != nil {
return err
}
ms, err := dag.multisetStore.multisetByBlockNode(node.selectedParent)
if err != nil {
return nil, err
}
blockID, err := createBlockID(dbTx, block.Hash())
if err != nil {
return err
}
selectedParentAcceptanceData, exists := acceptanceData.FindAcceptanceData(node.selectedParent.hash)
if !exists {
return nil, errors.Errorf("couldn't find selected parent acceptance data for block %s", node)
}
for _, txAcceptanceData := range selectedParentAcceptanceData.TxAcceptanceData {
tx := txAcceptanceData.Tx
msgTx := tx.MsgTx()
isCoinbase := tx.IsCoinBase()
for i, txOut := range msgTx.TxOut {
outpoint := *wire.NewOutpoint(tx.ID(), uint32(i))
// Allow the index manager to call each of the currently active
// optional indexes with the block being connected so they can
// update themselves accordingly.
if dag.indexManager != nil {
err := dag.indexManager.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData)
unacceptedEntry := NewUTXOEntry(txOut, isCoinbase, UnacceptedBlueScore)
acceptedEntry := NewUTXOEntry(txOut, isCoinbase, node.blueScore)
var err error
ms, err = removeUTXOFromMultiset(ms, unacceptedEntry, &outpoint)
if err != nil {
return err
return nil, err
}
ms, err = addUTXOToMultiset(ms, acceptedEntry, &outpoint)
if err != nil {
return nil, err
}
}
}
// Apply the fee data into the database
return dbStoreFeeData(dbTx, block.Hash(), feeData)
})
return ms, nil
}
func (node *blockNode) pastUTXOMultiSet(dag *BlockDAG, acceptanceData MultiBlockTxsAcceptanceData, selectedParentUTXO UTXOSet) (*secp256k1.MultiSet, error) {
ms, err := node.acceptedSelectedParentMultiset(dag, acceptanceData)
if err != nil {
return nil, err
}
for _, blockAcceptanceData := range acceptanceData {
if blockAcceptanceData.BlockHash.IsEqual(node.selectedParent.hash) {
continue
}
for _, txAcceptanceData := range blockAcceptanceData.TxAcceptanceData {
if !txAcceptanceData.IsAccepted {
continue
}
tx := txAcceptanceData.Tx.MsgTx()
var err error
ms, err = addTxToMultiset(ms, tx, selectedParentUTXO, node.blueScore)
if err != nil {
return nil, err
}
}
}
return ms, nil
}
func addTxToMultiset(ms *secp256k1.MultiSet, tx *wire.MsgTx, pastUTXO UTXOSet, blockBlueScore uint64) (*secp256k1.MultiSet, error) {
isCoinbase := tx.IsCoinBase()
if !isCoinbase {
for _, txIn := range tx.TxIn {
entry, ok := pastUTXO.Get(txIn.PreviousOutpoint)
if !ok {
return nil, errors.Errorf("Couldn't find entry for outpoint %s", txIn.PreviousOutpoint)
}
var err error
ms, err = removeUTXOFromMultiset(ms, entry, &txIn.PreviousOutpoint)
if err != nil {
return nil, err
}
}
}
for i, txOut := range tx.TxOut {
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
entry := NewUTXOEntry(txOut, isCoinbase, blockBlueScore)
var err error
ms, err = addUTXOToMultiset(ms, entry, &outpoint)
if err != nil {
return nil, err
}
}
return ms, nil
}
func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UTXODiff,
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData compactFeeData) error {
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
err = dag.index.flushToDB(dbTx)
if err != nil {
return err
}
err = dag.utxoDiffStore.flushToDB(dbTx)
if err != nil {
return err
}
err = dag.reachabilityStore.flushToDB(dbTx)
if err != nil {
return err
}
err = dag.multisetStore.flushToDB(dbTx)
if err != nil {
return err
}
// Update DAG state.
state := &dagState{
TipHashes: dag.TipHashes(),
LastFinalityPoint: dag.lastFinalityPoint.hash,
LocalSubnetworkID: dag.subnetworkID,
}
err = saveDAGState(dbTx, state)
if err != nil {
return err
}
// Update the UTXO set using the diffSet that was melded into the
// full UTXO set.
err = updateUTXOSet(dbTx, virtualUTXODiff)
if err != nil {
return err
}
// Scan all accepted transactions and register any subnetwork registry
// transaction. If any subnetwork registry transaction is not well-formed,
// fail the entire block.
err = registerSubnetworks(dbTx, block.Transactions())
if err != nil {
return err
}
// Allow the index manager to call each of the currently active
// optional indexes with the block being connected so they can
// update themselves accordingly.
if dag.indexManager != nil {
err := dag.indexManager.ConnectBlock(dbTx, block.Hash(), txsAcceptanceData)
if err != nil {
return err
}
}
// Apply the fee data into the database
err = dbaccess.StoreFeeData(dbTx, block.Hash(), feeData)
if err != nil {
return err
}
err = dbTx.Commit()
if err != nil {
return err
}
dag.index.clearDirtyEntries()
dag.utxoDiffStore.clearDirtyEntries()
dag.reachabilityStore.clearDirtyEntries()
dag.multisetStore.clearNewEntries()
return nil
}
@@ -698,7 +826,7 @@ func (dag *BlockDAG) validateGasLimit(block *util.Block) error {
if !msgTx.SubnetworkID.IsEqual(currentSubnetworkID) {
currentSubnetworkID = &msgTx.SubnetworkID
currentGasUsage = 0
currentSubnetworkGasLimit, err = dag.SubnetworkStore.GasLimit(currentSubnetworkID)
currentSubnetworkGasLimit, err = GasLimit(currentSubnetworkID)
if err != nil {
return errors.Errorf("Error getting gas limit for subnetworkID '%s': %s", currentSubnetworkID, err)
}
@@ -796,9 +924,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
}
}
if deleteDiffData {
err := dag.db.Update(func(dbTx database.Tx) error {
return dag.utxoDiffStore.removeBlocksDiffData(dbTx, blockHashesToDelete)
})
err := dag.utxoDiffStore.removeBlocksDiffData(dbaccess.NoTx(), blockHashesToDelete)
if err != nil {
panic(fmt.Sprintf("Error removing diff data from utxoDiffStore: %s", err))
}
@@ -851,7 +977,7 @@ func (dag *BlockDAG) NextAcceptedIDMerkleRootNoLock() (*daghash.Hash, error) {
//
// This function MUST be called with the DAG read-lock held
func (dag *BlockDAG) TxsAcceptedByVirtual() (MultiBlockTxsAcceptanceData, error) {
_, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
_, _, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
return txsAcceptanceData, err
}
@@ -863,7 +989,7 @@ func (dag *BlockDAG) TxsAcceptedByBlockHash(blockHash *daghash.Hash) (MultiBlock
if node == nil {
return nil, errors.Errorf("Couldn't find block %s", blockHash)
}
_, txsAcceptanceData, err := dag.pastUTXO(node)
_, _, txsAcceptanceData, err := dag.pastUTXO(node)
return txsAcceptanceData, err
}
@@ -874,38 +1000,40 @@ func (dag *BlockDAG) TxsAcceptedByBlockHash(blockHash *daghash.Hash) (MultiBlock
// 4. Updates each of the tips' utxoDiff.
// 5. Applies the new virtual's blue score to all the unaccepted UTXOs
// 6. Adds the block to the reachability structures
// 7. Updates the finality point of the DAG (if required).
// 7. Adds the multiset of the block to the multiset store.
// 8. Updates the finality point of the DAG (if required).
//
// It returns the diff in the virtual block's UTXO set.
//
// This function MUST be called with the DAG state lock held (for writes).
func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, selectedParentAnticone []*blockNode) (
virtualUTXODiff *UTXODiff, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData,
chainUpdates *chainUpdates, err error) {
func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, newBlockMultiset *secp256k1.MultiSet, selectedParentAnticone []*blockNode) (
virtualUTXODiff *UTXODiff, chainUpdates *chainUpdates, err error) {
// Add the block to the reachability structures
err = dag.updateReachability(node, selectedParentAnticone)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "failed updating reachability")
return nil, nil, errors.Wrap(err, "failed updating reachability")
}
dag.multisetStore.setMultiset(node, newBlockMultiset)
if err = node.updateParents(dag, newBlockUTXO); err != nil {
return nil, nil, nil, errors.Wrapf(err, "failed updating parents of %s", node)
return nil, nil, errors.Wrapf(err, "failed updating parents of %s", node)
}
// Update the virtual block's parents (the DAG tips) to include the new block.
chainUpdates = dag.virtual.AddTip(node)
// Build a UTXO set for the new virtual block
newVirtualUTXO, virtualTxsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
newVirtualUTXO, _, _, err := dag.pastUTXO(&dag.virtual.blockNode)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "could not restore past UTXO for virtual")
return nil, nil, errors.Wrap(err, "could not restore past UTXO for virtual")
}
// Apply new utxoDiffs to all the tips
err = updateTipsUTXO(dag, newVirtualUTXO)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "failed updating the tips' UTXO")
return nil, nil, errors.Wrap(err, "failed updating the tips' UTXO")
}
// It is now safe to meld the UTXO set to base.
@@ -913,7 +1041,7 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, sele
virtualUTXODiff = diffSet.UTXODiff
err = dag.meldVirtualUTXO(diffSet)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "failed melding the virtual UTXO")
return nil, nil, errors.Wrap(err, "failed melding the virtual UTXO")
}
dag.index.SetStatusFlags(node, statusValid)
@@ -921,7 +1049,7 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, newBlockUTXO UTXOSet, sele
// And now we can update the finality point of the DAG (if required)
dag.updateFinalityPoint()
return virtualUTXODiff, virtualTxsAcceptanceData, chainUpdates, nil
return virtualUTXODiff, chainUpdates, nil
}
func (dag *BlockDAG) meldVirtualUTXO(newVirtualUTXODiffSet *DiffUTXOSet) error {
@@ -948,42 +1076,49 @@ func (node *blockNode) diffFromTxs(pastUTXO UTXOSet, transactions []*util.Tx) (*
}
// verifyAndBuildUTXO verifies all transactions in the given block and builds its UTXO
// to save extra traversals it returns the transactions acceptance data and the compactFeeData for the new block
// to save extra traversals it returns the transactions acceptance data, the compactFeeData
// for the new block and its multiset.
func (node *blockNode) verifyAndBuildUTXO(dag *BlockDAG, transactions []*util.Tx, fastAdd bool) (
newBlockUTXO UTXOSet, txsAcceptanceData MultiBlockTxsAcceptanceData, newBlockFeeData compactFeeData, err error) {
newBlockUTXO UTXOSet, txsAcceptanceData MultiBlockTxsAcceptanceData, newBlockFeeData compactFeeData, multiset *secp256k1.MultiSet, err error) {
pastUTXO, txsAcceptanceData, err := dag.pastUTXO(node)
pastUTXO, selectedParentUTXO, txsAcceptanceData, err := dag.pastUTXO(node)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
err = node.validateAcceptedIDMerkleRoot(dag, txsAcceptanceData)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
feeData, err := dag.checkConnectToPastUTXO(node, pastUTXO, transactions, fastAdd)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
diffFromTxs, err := node.diffFromTxs(pastUTXO, transactions)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
utxo, err := pastUTXO.WithDiff(diffFromTxs)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
calculatedMultisetHash := utxo.Multiset().Hash()
multiset, err = node.calcMultiset(dag, transactions, txsAcceptanceData, selectedParentUTXO, pastUTXO)
if err != nil {
return nil, nil, nil, nil, err
}
calculatedMultisetHash := daghash.Hash(*multiset.Finalize())
if !calculatedMultisetHash.IsEqual(node.utxoCommitment) {
str := fmt.Sprintf("block %s UTXO commitment is invalid - block "+
"header indicates %s, but calculated value is %s", node.hash,
node.utxoCommitment, calculatedMultisetHash)
return nil, nil, nil, ruleError(ErrBadUTXOCommitment, str)
return nil, nil, nil, nil, ruleError(ErrBadUTXOCommitment, str)
}
return utxo, txsAcceptanceData, feeData, nil
return utxo, txsAcceptanceData, feeData, multiset, nil
}
// TxAcceptanceData stores a transaction together with an indication
@@ -1026,21 +1161,17 @@ func genesisPastUTXO(virtual *virtualBlock) UTXOSet {
return genesisPastUTXO
}
func (node *blockNode) fetchBlueBlocks(db database.DB) ([]*util.Block, error) {
func (node *blockNode) fetchBlueBlocks() ([]*util.Block, error) {
blueBlocks := make([]*util.Block, len(node.blues))
err := db.View(func(dbTx database.Tx) error {
for i, blueBlockNode := range node.blues {
blueBlock, err := dbFetchBlockByNode(dbTx, blueBlockNode)
if err != nil {
return err
}
blueBlocks[i] = blueBlock
for i, blueBlockNode := range node.blues {
blueBlock, err := fetchBlockByHash(dbaccess.NoTx(), blueBlockNode.hash)
if err != nil {
return nil, err
}
return nil
})
return blueBlocks, err
blueBlocks[i] = blueBlock
}
return blueBlocks, nil
}
// applyBlueBlocks adds all transactions in the blue blocks to the selectedParent's UTXO set
@@ -1137,28 +1268,33 @@ func (node *blockNode) updateParentsDiffs(dag *BlockDAG, newBlockUTXO UTXOSet) e
// To save traversals over the blue blocks, it also returns the transaction acceptance data for
// all blue blocks
func (dag *BlockDAG) pastUTXO(node *blockNode) (
pastUTXO UTXOSet, bluesTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) {
pastUTXO, selectedParentUTXO UTXOSet, bluesTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) {
if node.isGenesis() {
return genesisPastUTXO(dag.virtual), MultiBlockTxsAcceptanceData{}, nil
return genesisPastUTXO(dag.virtual), NewFullUTXOSet(), MultiBlockTxsAcceptanceData{}, nil
}
selectedParentUTXO, err := dag.restoreUTXO(node.selectedParent)
selectedParentUTXO, err = dag.restoreUTXO(node.selectedParent)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
blueBlocks, err := node.fetchBlueBlocks(dag.db)
blueBlocks, err := node.fetchBlueBlocks()
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
selectedParent := blueBlocks[0]
acceptedSelectedParentUTXO, selectedParentAcceptanceData, err := node.acceptSelectedParentTransactions(selectedParent, selectedParentUTXO)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
return node.applyBlueBlocks(acceptedSelectedParentUTXO, selectedParentAcceptanceData, blueBlocks)
pastUTXO, bluesTxsAcceptanceData, err = node.applyBlueBlocks(acceptedSelectedParentUTXO, selectedParentAcceptanceData, blueBlocks)
if err != nil {
return nil, nil, nil, err
}
return pastUTXO, selectedParentUTXO, bluesTxsAcceptanceData, nil
}
func (node *blockNode) acceptSelectedParentTransactions(selectedParent *util.Block, selectedParentUTXO UTXOSet) (acceptedSelectedParentUTXO UTXOSet, txAcceptanceData []TxAcceptanceData, err error) {
@@ -1214,8 +1350,8 @@ func (dag *BlockDAG) restoreUTXO(node *blockNode) (UTXOSet, error) {
if err != nil {
return nil, err
}
// Use WithDiffInPlace, otherwise copying the diffs again and again create a polynomial overhead
err = accumulatedDiff.WithDiffInPlace(diff)
// Use withDiffInPlace, otherwise copying the diffs again and again create a polynomial overhead
err = accumulatedDiff.withDiffInPlace(diff)
if err != nil {
return nil, err
}
@@ -1265,14 +1401,14 @@ func (dag *BlockDAG) isCurrent() bool {
dagTimestamp = selectedTip.timestamp
}
dagTime := time.Unix(dagTimestamp, 0)
return dag.AdjustedTime().Sub(dagTime) <= isDAGCurrentMaxDiff
return dag.Now().Sub(dagTime) <= isDAGCurrentMaxDiff
}
// AdjustedTime returns the adjusted time according to
// dag.timeSource. See MedianTimeSource.AdjustedTime for
// Now returns the adjusted time according to
// dag.timeSource. See TimeSource.Now for
// more details.
func (dag *BlockDAG) AdjustedTime() time.Time {
return dag.timeSource.AdjustedTime()
func (dag *BlockDAG) Now() time.Time {
return dag.timeSource.Now()
}
// IsCurrent returns whether or not the DAG believes it is current. Several
@@ -1393,11 +1529,6 @@ func (dag *BlockDAG) UTXOConfirmations(outpoint *wire.Outpoint) (uint64, bool) {
return confirmations, true
}
// UTXOCommitment returns a commitment to the dag's current UTXOSet
func (dag *BlockDAG) UTXOCommitment() string {
return dag.UTXOSet().UTXOMultiset.Hash().String()
}
// blockConfirmations returns the current confirmations number of the given node
// The confirmations number is defined as follows:
// * If the node is in the selected tip red set -> 0
@@ -1810,8 +1941,23 @@ func (dag *BlockDAG) SubnetworkID() *subnetworkid.SubnetworkID {
return dag.subnetworkID
}
// ForEachHash runs the given fn on every hash that's currently known to
// the DAG.
//
// This function is NOT safe for concurrent access. It is meant to be
// used either on initialization or when the dag lock is held for reads.
func (dag *BlockDAG) ForEachHash(fn func(hash daghash.Hash) error) error {
for hash := range dag.index.index {
err := fn(hash)
if err != nil {
return err
}
}
return nil
}
func (dag *BlockDAG) addDelayedBlock(block *util.Block, delay time.Duration) error {
processTime := dag.AdjustedTime().Add(delay)
processTime := dag.Now().Add(delay)
log.Debugf("Adding block to delayed blocks queue (block hash: %s, process time: %s)", block.Hash().String(), processTime)
delayedBlock := &delayedBlock{
block: block,
@@ -1829,7 +1975,7 @@ func (dag *BlockDAG) processDelayedBlocks() error {
// Check if the delayed block with the earliest process time should be processed
for dag.delayedBlocksQueue.Len() > 0 {
earliestDelayedBlockProcessTime := dag.peekDelayedBlock().processTime
if earliestDelayedBlockProcessTime.After(dag.AdjustedTime()) {
if earliestDelayedBlockProcessTime.After(dag.Now()) {
break
}
delayedBlock := dag.popDelayedBlock()
@@ -1863,25 +2009,16 @@ func (dag *BlockDAG) peekDelayedBlock() *delayedBlock {
// connected to the DAG for the purpose of supporting optional indexes.
type IndexManager interface {
// Init is invoked during DAG initialize in order to allow the index
// manager to initialize itself and any indexes it is managing. The
// channel parameter specifies a channel the caller can close to signal
// that the process should be interrupted. It can be nil if that
// behavior is not desired.
Init(database.DB, *BlockDAG, <-chan struct{}) error
// manager to initialize itself and any indexes it is managing.
Init(*BlockDAG) error
// ConnectBlock is invoked when a new block has been connected to the
// DAG.
ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *BlockDAG, acceptedTxsData MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData) error
ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash, acceptedTxsData MultiBlockTxsAcceptanceData) error
}
// Config is a descriptor which specifies the blockDAG instance configuration.
type Config struct {
// DB defines the database which houses the blocks and will be used to
// store all metadata created by this package such as the utxo set.
//
// This field is required.
DB database.DB
// Interrupt specifies a channel the caller can close to signal that
// long running operations, such as catching up indexes or performing
// database migrations, should be interrupted.
@@ -1895,13 +2032,9 @@ type Config struct {
// This field is required.
DAGParams *dagconfig.Params
// TimeSource defines the median time source to use for things such as
// TimeSource defines the time source to use for things such as
// block processing and determining whether or not the DAG is current.
//
// The caller is expected to keep a reference to the time source as well
// and add time samples from other peers on the network so the local
// time is adjusted to be in agreement with other peers.
TimeSource MedianTimeSource
TimeSource TimeSource
// SigCache defines a signature cache to use when when validating
// signatures. This is typically most useful when individual
@@ -1929,9 +2062,6 @@ type Config struct {
// New returns a BlockDAG instance using the provided configuration details.
func New(config *Config) (*BlockDAG, error) {
// Enforce required config fields.
if config.DB == nil {
return nil, AssertError("BlockDAG.New database is nil")
}
if config.DAGParams == nil {
return nil, AssertError("BlockDAG.New DAG parameters nil")
}
@@ -1942,9 +2072,8 @@ func New(config *Config) (*BlockDAG, error) {
params := config.DAGParams
targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second)
index := newBlockIndex(config.DB, params)
index := newBlockIndex(params)
dag := &BlockDAG{
db: config.DB,
dagParams: params,
timeSource: config.TimeSource,
sigCache: config.SigCache,
@@ -1961,13 +2090,13 @@ func New(config *Config) (*BlockDAG, error) {
warningCaches: newThresholdCaches(vbNumBits),
deploymentCaches: newThresholdCaches(dagconfig.DefinedDeployments),
blockCount: 0,
SubnetworkStore: newSubnetworkStore(config.DB),
subnetworkID: config.SubnetworkID,
}
dag.virtual = newVirtualBlock(dag, nil)
dag.utxoDiffStore = newUTXODiffStore(dag)
dag.reachabilityStore = newReachabilityStore(dag)
dag.multisetStore = newMultisetStore(dag)
// Initialize the DAG state from the passed database. When the db
// does not yet contain any DAG state, both it and the DAG state
@@ -1976,19 +2105,11 @@ func New(config *Config) (*BlockDAG, error) {
if err != nil {
return nil, err
}
defer func() {
if err != nil {
err := dag.removeDAGState()
if err != nil {
panic(fmt.Sprintf("Couldn't remove the DAG State: %s", err))
}
}
}()
// Initialize and catch up all of the currently active optional indexes
// as needed.
if config.IndexManager != nil {
err = config.IndexManager.Init(dag.db, dag, config.Interrupt)
err = config.IndexManager.Init(dag)
if err != nil {
return nil, err
}

View File

@@ -6,6 +6,7 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
"os"
"path/filepath"
@@ -13,7 +14,6 @@ import (
"time"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
@@ -40,7 +40,7 @@ func TestBlockCount(t *testing.T) {
}
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestBlockCount", Config{
dag, teardownFunc, err := DAGSetup("TestBlockCount", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -93,7 +93,7 @@ func TestIsKnownBlock(t *testing.T) {
}
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("haveblock", Config{
dag, teardownFunc, err := DAGSetup("haveblock", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -204,7 +204,7 @@ func TestIsKnownBlock(t *testing.T) {
{hash: dagconfig.SimnetParams.GenesisHash.String(), want: true},
// Block 3b should be present (as a second child of Block 2).
{hash: "264176fb6072e2362db18f92d3f4b739cff071a206736df7c407c0bf9a1d7fef", want: true},
{hash: "216301e3fc03cf89973b9192b4ecdd732bf3b677cf1ca4f6c340a56f1533fb4f", want: true},
// Block 100000 should be present (as an orphan).
{hash: "65b20b048a074793ebfd1196e49341c8d194dabfc6b44a4fd0c607406e122baf", want: true},
@@ -550,18 +550,17 @@ func TestNew(t *testing.T) {
dbPath := filepath.Join(tempDir, "TestNew")
_ = os.RemoveAll(dbPath)
db, err := database.Create(testDbType, dbPath, blockDataNet)
err := dbaccess.Open(dbPath)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
defer func() {
db.Close()
dbaccess.Close()
os.RemoveAll(dbPath)
}()
config := &Config{
DAGParams: &dagconfig.SimnetParams,
DB: db,
TimeSource: NewMedianTime(),
TimeSource: NewTimeSource(),
SigCache: txscript.NewSigCache(1000),
}
_, err = New(config)
@@ -590,20 +589,19 @@ func TestAcceptingInInit(t *testing.T) {
// Create a test database
dbPath := filepath.Join(tempDir, "TestAcceptingInInit")
_ = os.RemoveAll(dbPath)
db, err := database.Create(testDbType, dbPath, blockDataNet)
err := dbaccess.Open(dbPath)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
defer func() {
db.Close()
dbaccess.Close()
os.RemoveAll(dbPath)
}()
// Create a DAG to add the test block into
config := &Config{
DAGParams: &dagconfig.SimnetParams,
DB: db,
TimeSource: NewMedianTime(),
TimeSource: NewTimeSource(),
SigCache: txscript.NewSigCache(1000),
}
dag, err := New(config)
@@ -625,16 +623,30 @@ func TestAcceptingInInit(t *testing.T) {
testNode.status = statusDataStored
// Manually add the test block to the database
err = db.Update(func(dbTx database.Tx) error {
err := dbStoreBlock(dbTx, testBlock)
if err != nil {
return err
}
return dbStoreBlockNode(dbTx, testNode)
})
dbTx, err := dbaccess.NewTx()
if err != nil {
t.Fatalf("Failed to open database "+
"transaction: %s", err)
}
defer dbTx.RollbackUnlessClosed()
err = storeBlock(dbTx, testBlock)
if err != nil {
t.Fatalf("Failed to store block: %s", err)
}
dbTestNode, err := serializeBlockNode(testNode)
if err != nil {
t.Fatalf("Failed to serialize blockNode: %s", err)
}
key := blockIndexKey(testNode.hash, testNode.blueScore)
err = dbaccess.StoreIndexBlock(dbTx, key, dbTestNode)
if err != nil {
t.Fatalf("Failed to update block index: %s", err)
}
err = dbTx.Commit()
if err != nil {
t.Fatalf("Failed to commit database "+
"transaction: %s", err)
}
// Create a new DAG. We expect this DAG to process the
// test node
@@ -654,7 +666,7 @@ func TestConfirmations(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestConfirmations", Config{
dag, teardownFunc, err := DAGSetup("TestConfirmations", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -757,7 +769,7 @@ func TestAcceptingBlock(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 3
dag, teardownFunc, err := DAGSetup("TestAcceptingBlock", Config{
dag, teardownFunc, err := DAGSetup("TestAcceptingBlock", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -887,7 +899,7 @@ func TestFinalizeNodesBelowFinalityPoint(t *testing.T) {
func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("testFinalizeNodesBelowFinalityPoint", Config{
dag, teardownFunc, err := DAGSetup("testFinalizeNodesBelowFinalityPoint", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -899,13 +911,20 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
blockTime := dag.genesis.Header().Timestamp
flushUTXODiffStore := func() {
err := dag.db.Update(func(dbTx database.Tx) error {
return dag.utxoDiffStore.flushToDB(dbTx)
})
dbTx, err := dbaccess.NewTx()
if err != nil {
t.Fatalf("Failed to open database transaction: %s", err)
}
defer dbTx.RollbackUnlessClosed()
err = dag.utxoDiffStore.flushToDB(dbTx)
if err != nil {
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
}
dag.utxoDiffStore.clearDirtyEntries()
err = dbTx.Commit()
if err != nil {
t.Fatalf("Failed to commit database transaction: %s", err)
}
}
addNode := func(parent *blockNode) *blockNode {
@@ -946,12 +965,22 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
} else if !deleteDiffData && !ok {
t.Errorf("The diff data of node with blue score %d shouldn't have been unloaded if deleteDiffData is %T", node.blueScore, deleteDiffData)
}
if diffData, err := dag.utxoDiffStore.diffDataFromDB(node.hash); err != nil {
_, err := dag.utxoDiffStore.diffDataFromDB(node.hash)
exists := !dbaccess.IsNotFoundError(err)
if exists && err != nil {
t.Errorf("diffDataFromDB: %s", err)
} else if deleteDiffData && diffData != nil {
continue
}
if deleteDiffData && exists {
t.Errorf("The diff data of node with blue score %d should have been deleted from the database if deleteDiffData is %T", node.blueScore, deleteDiffData)
} else if !deleteDiffData && diffData == nil {
continue
}
if !deleteDiffData && !exists {
t.Errorf("The diff data of node with blue score %d shouldn't have been deleted from the database if deleteDiffData is %T", node.blueScore, deleteDiffData)
continue
}
}
@@ -972,7 +1001,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
func TestDAGIndexFailedStatus(t *testing.T) {
params := dagconfig.SimnetParams
dag, teardownFunc, err := DAGSetup("TestDAGIndexFailedStatus", Config{
dag, teardownFunc, err := DAGSetup("TestDAGIndexFailedStatus", true, Config{
DAGParams: &params,
})
if err != nil {

View File

@@ -9,63 +9,22 @@ import (
"encoding/binary"
"encoding/json"
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/pkg/errors"
"io"
"sync"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/buffers"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
)
const (
// blockHdrSize is the size of a block header. This is simply the
// constant from wire and is only provided here for convenience since
// wire.MaxBlockHeaderPayload is quite long.
blockHdrSize = wire.MaxBlockHeaderPayload
// latestUTXOSetBucketVersion is the current version of the UTXO set
// bucket that is used to track all unspent outputs.
latestUTXOSetBucketVersion = 1
)
var (
// blockIndexBucketName is the name of the database bucket used to house the
// block headers and contextual information.
blockIndexBucketName = []byte("blockheaderidx")
// dagStateKeyName is the name of the db key used to store the DAG
// tip hashes.
dagStateKeyName = []byte("dagstate")
// utxoSetVersionKeyName is the name of the db key used to store the
// version of the utxo set currently in the database.
utxoSetVersionKeyName = []byte("utxosetversion")
// utxoSetBucketName is the name of the database bucket used to house the
// unspent transaction output set.
utxoSetBucketName = []byte("utxoset")
// utxoDiffsBucketName is the name of the database bucket used to house the
// diffs and diff children of blocks.
utxoDiffsBucketName = []byte("utxodiffs")
// reachabilityDataBucketName is the name of the database bucket used to house the
// reachability tree nodes and future covering sets of blocks.
reachabilityDataBucketName = []byte("reachability")
// subnetworksBucketName is the name of the database bucket used to store the
// subnetwork registry.
subnetworksBucketName = []byte("subnetworks")
// localSubnetworkKeyName is the name of the db key used to store the
// node's local subnetwork ID.
localSubnetworkKeyName = []byte("localsubnetworkidkey")
// byteOrder is the preferred byte order used for serializing numeric
// fields for storage in the database.
byteOrder = binary.LittleEndian
@@ -87,167 +46,88 @@ func isNotInDAGErr(err error) bool {
return errors.As(err, &notInDAGErr)
}
// errDeserialize signifies that a problem was encountered when deserializing
// data.
type errDeserialize string
// Error implements the error interface.
func (e errDeserialize) Error() string {
return string(e)
}
// isDeserializeErr returns whether or not the passed error is an errDeserialize
// error.
func isDeserializeErr(err error) bool {
var deserializeErr errDeserialize
return errors.As(err, &deserializeErr)
}
// dbPutVersion uses an existing database transaction to update the provided
// key in the metadata bucket to the given version. It is primarily used to
// track versions on entities such as buckets.
func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
var serialized [4]byte
byteOrder.PutUint32(serialized[:], version)
return dbTx.Metadata().Put(key, serialized[:])
}
// -----------------------------------------------------------------------------
// The unspent transaction output (UTXO) set consists of an entry for each
// unspent output using a format that is optimized to reduce space using domain
// specific compression algorithms.
//
// Each entry is keyed by an outpoint as specified below. It is important to
// note that the key encoding uses a VLQ, which employs an MSB encoding so
// iteration of UTXOs when doing byte-wise comparisons will produce them in
// order.
//
// The serialized key format is:
// <hash><output index>
//
// Field Type Size
// hash daghash.Hash daghash.HashSize
// output index VLQ variable
//
// The serialized value format is:
//
// <header code><compressed txout>
//
// Field Type Size
// header code VLQ variable
// compressed txout
// compressed amount VLQ variable
// compressed script []byte variable
//
// The serialized header code format is:
// bit 0 - containing transaction is a coinbase
// bits 1-x - height of the block that contains the unspent txout
//
// Example 1:
// b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
//
// 03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
// <><------------------------------------------------------------------>
// | |
// header code compressed txout
//
// - header code: 0x03 (coinbase, height 1)
// - compressed txout:
// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 KAS)
// - 0x04: special script type pay-to-pubkey
// - 0x96...52: x-coordinate of the pubkey
//
// Example 2:
// 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2
//
// 8cf316800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
// <----><------------------------------------------>
// | |
// header code compressed txout
//
// - header code: 0x8cf316 (not coinbase, height 113931)
// - compressed txout:
// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 KAS)
// - 0x00: special script type pay-to-pubkey-hash
// - 0xb8...58: pubkey hash
//
// Example 3:
// 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22
//
// a8a2588ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
// <----><-------------------------------------------------->
// | |
// header code compressed txout
//
// - header code: 0xa8a258 (not coinbase, height 338156)
// - compressed txout:
// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 KAS)
// - 0x01: special script type pay-to-script-hash
// - 0x1d...e6: script hash
// -----------------------------------------------------------------------------
// maxUint32VLQSerializeSize is the maximum number of bytes a max uint32 takes
// to serialize as a VLQ.
var maxUint32VLQSerializeSize = serializeSizeVLQ(1<<32 - 1)
// outpointKeyPool defines a concurrent safe free list of byte slices used to
// outpointKeyPool defines a concurrent safe free list of byte buffers used to
// provide temporary buffers for outpoint database keys.
var outpointKeyPool = sync.Pool{
New: func() interface{} {
b := make([]byte, daghash.HashSize+maxUint32VLQSerializeSize)
return &b // Pointer to slice to avoid boxing alloc.
return &bytes.Buffer{} // Pointer to a buffer to avoid boxing alloc.
},
}
// outpointKey returns a key suitable for use as a database key in the UTXO set
// while making use of a free list. A new buffer is allocated if there are not
// already any available on the free list. The returned byte slice should be
// returned to the free list by using the recycleOutpointKey function when the
// caller is done with it _unless_ the slice will need to live for longer than
// the caller can calculate such as when used to write to the database.
func outpointKey(outpoint wire.Outpoint) *[]byte {
// A VLQ employs an MSB encoding, so they are useful not only to reduce
// the amount of storage space, but also so iteration of UTXOs when
// doing byte-wise comparisons will produce them in order.
key := outpointKeyPool.Get().(*[]byte)
idx := uint64(outpoint.Index)
*key = (*key)[:daghash.HashSize+serializeSizeVLQ(idx)]
copy(*key, outpoint.TxID[:])
putVLQ((*key)[daghash.HashSize:], idx)
return key
// outpointIndexByteOrder is the byte order for serializing the outpoint index.
// It uses big endian to ensure that when outpoint is used as database key, the
// keys will be iterated in an ascending order by the outpoint index.
var outpointIndexByteOrder = binary.BigEndian
func serializeOutpoint(w io.Writer, outpoint *wire.Outpoint) error {
_, err := w.Write(outpoint.TxID[:])
if err != nil {
return err
}
return binaryserializer.PutUint32(w, outpointIndexByteOrder, outpoint.Index)
}
// recycleOutpointKey puts the provided byte slice, which should have been
// obtained via the outpointKey function, back on the free list.
func recycleOutpointKey(key *[]byte) {
outpointKeyPool.Put(key)
var outpointSerializeSize = daghash.TxIDSize + 4
// deserializeOutpoint decodes an outpoint from the passed serialized byte
// slice into a new wire.Outpoint using a format that is suitable for long-
// term storage. This format is described in detail above.
func deserializeOutpoint(r io.Reader) (*wire.Outpoint, error) {
outpoint := &wire.Outpoint{}
_, err := r.Read(outpoint.TxID[:])
if err != nil {
return nil, err
}
outpoint.Index, err = binaryserializer.Uint32(r, outpointIndexByteOrder)
if err != nil {
return nil, err
}
return outpoint, nil
}
// dbPutUTXODiff uses an existing database transaction to update the UTXO set
// in the database based on the provided UTXO view contents and state. In
// particular, only the entries that have been marked as modified are written
// to the database.
func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
for outpoint := range diff.toRemove {
key := outpointKey(outpoint)
err := utxoBucket.Delete(*key)
recycleOutpointKey(key)
// updateUTXOSet updates the UTXO set in the database based on the provided
// UTXO diff.
func updateUTXOSet(dbContext dbaccess.Context, virtualUTXODiff *UTXODiff) error {
for outpoint := range virtualUTXODiff.toRemove {
w := outpointKeyPool.Get().(*bytes.Buffer)
w.Reset()
err := serializeOutpoint(w, &outpoint)
if err != nil {
return err
}
key := w.Bytes()
err = dbaccess.RemoveFromUTXOSet(dbContext, key)
if err != nil {
return err
}
outpointKeyPool.Put(w)
}
for outpoint, entry := range diff.toAdd {
// We are preallocating for P2PKH entries because they are the most common ones.
// If we have entries with a compressed script bigger than P2PKH's, the buffer will grow.
bytesToPreallocate := (p2pkhUTXOEntrySerializeSize + outpointSerializeSize) * len(virtualUTXODiff.toAdd)
buff := bytes.NewBuffer(make([]byte, bytesToPreallocate))
for outpoint, entry := range virtualUTXODiff.toAdd {
// Serialize and store the UTXO entry.
serialized := serializeUTXOEntry(entry)
sBuff := buffers.NewSubBuffer(buff)
err := serializeUTXOEntry(sBuff, entry)
if err != nil {
return err
}
serializedEntry := sBuff.Bytes()
key := outpointKey(outpoint)
err := utxoBucket.Put(*key, serialized)
// NOTE: The key is intentionally not recycled here since the
// database interface contract prohibits modifications. It will
// be garbage collected normally when the database is done with
// it.
sBuff = buffers.NewSubBuffer(buff)
err = serializeOutpoint(sBuff, &outpoint)
if err != nil {
return err
}
key := sBuff.Bytes()
err = dbaccess.AddToUTXOSet(dbContext, key, serializedEntry)
if err != nil {
return err
}
@@ -259,6 +139,7 @@ func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
type dagState struct {
TipHashes []*daghash.Hash
LastFinalityPoint *daghash.Hash
LocalSubnetworkID *subnetworkid.SubnetworkID
}
// serializeDAGState returns the serialization of the DAG state.
@@ -274,366 +155,220 @@ func deserializeDAGState(serializedData []byte) (*dagState, error) {
var state *dagState
err := json.Unmarshal(serializedData, &state)
if err != nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: "corrupt DAG state",
}
return nil, err
}
return state, nil
}
// dbPutDAGState uses an existing database transaction to store the latest
// saveDAGState uses an existing database context to store the latest
// tip hashes of the DAG.
func dbPutDAGState(dbTx database.Tx, state *dagState) error {
serializedData, err := serializeDAGState(state)
func saveDAGState(dbContext dbaccess.Context, state *dagState) error {
serializedDAGState, err := serializeDAGState(state)
if err != nil {
return err
}
return dbTx.Metadata().Put(dagStateKeyName, serializedData)
return dbaccess.StoreDAGState(dbContext, serializedDAGState)
}
// createDAGState initializes both the database and the DAG state to the
// genesis block. This includes creating the necessary buckets, so it
// must only be called on an uninitialized database.
func (dag *BlockDAG) createDAGState() error {
// Create the initial the database DAG state including creating the
// necessary index buckets and inserting the genesis block.
err := dag.db.Update(func(dbTx database.Tx) error {
meta := dbTx.Metadata()
// Create the bucket that houses the block index data.
_, err := meta.CreateBucket(blockIndexBucketName)
if err != nil {
return err
}
// Create the buckets that house the utxo set, the utxo diffs, and their
// version.
_, err = meta.CreateBucket(utxoSetBucketName)
if err != nil {
return err
}
_, err = meta.CreateBucket(utxoDiffsBucketName)
if err != nil {
return err
}
_, err = meta.CreateBucket(reachabilityDataBucketName)
if err != nil {
return err
}
err = dbPutVersion(dbTx, utxoSetVersionKeyName,
latestUTXOSetBucketVersion)
if err != nil {
return err
}
// Create the bucket that houses the registered subnetworks.
_, err = meta.CreateBucket(subnetworksBucketName)
if err != nil {
return err
}
if err := dbPutLocalSubnetworkID(dbTx, dag.subnetworkID); err != nil {
return err
}
if _, err := meta.CreateBucketIfNotExists(idByHashIndexBucketName); err != nil {
return err
}
if _, err := meta.CreateBucketIfNotExists(hashByIDIndexBucketName); err != nil {
return err
}
return nil
// createDAGState initializes the DAG state to the
// genesis block and the node's local subnetwork id.
func (dag *BlockDAG) createDAGState(localSubnetworkID *subnetworkid.SubnetworkID) error {
return saveDAGState(dbaccess.NoTx(), &dagState{
TipHashes: []*daghash.Hash{dag.dagParams.GenesisHash},
LastFinalityPoint: dag.dagParams.GenesisHash,
LocalSubnetworkID: localSubnetworkID,
})
if err != nil {
return err
}
return nil
}
func (dag *BlockDAG) removeDAGState() error {
err := dag.db.Update(func(dbTx database.Tx) error {
meta := dbTx.Metadata()
err := meta.DeleteBucket(blockIndexBucketName)
if err != nil {
return err
}
err = meta.DeleteBucket(utxoSetBucketName)
if err != nil {
return err
}
err = meta.DeleteBucket(utxoDiffsBucketName)
if err != nil {
return err
}
err = meta.DeleteBucket(reachabilityDataBucketName)
if err != nil {
return err
}
err = dbTx.Metadata().Delete(utxoSetVersionKeyName)
if err != nil {
return err
}
err = meta.DeleteBucket(subnetworksBucketName)
if err != nil {
return err
}
err = dbTx.Metadata().Delete(localSubnetworkKeyName)
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
func dbPutLocalSubnetworkID(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID) error {
if subnetworkID == nil {
return dbTx.Metadata().Put(localSubnetworkKeyName, []byte{})
}
return dbTx.Metadata().Put(localSubnetworkKeyName, subnetworkID[:])
}
// initDAGState attempts to load and initialize the DAG state from the
// database. When the db does not yet contain any DAG state, both it and the
// DAG state are initialized to the genesis block.
func (dag *BlockDAG) initDAGState() error {
// Determine the state of the DAG database. We may need to initialize
// everything from scratch or upgrade certain buckets.
var initialized bool
err := dag.db.View(func(dbTx database.Tx) error {
initialized = dbTx.Metadata().Get(dagStateKeyName) != nil
if initialized {
var localSubnetworkID *subnetworkid.SubnetworkID
localSubnetworkIDBytes := dbTx.Metadata().Get(localSubnetworkKeyName)
if len(localSubnetworkIDBytes) != 0 {
localSubnetworkID = &subnetworkid.SubnetworkID{}
localSubnetworkID.SetBytes(localSubnetworkIDBytes)
}
if !localSubnetworkID.IsEqual(dag.subnetworkID) {
return errors.Errorf("Cannot start kaspad with subnetwork ID %s because"+
" its database is already built with subnetwork ID %s. If you"+
" want to switch to a new database, please reset the"+
" database by starting kaspad with --reset-db flag", dag.subnetworkID, localSubnetworkID)
}
}
return nil
})
// Fetch the stored DAG state from the database. If it doesn't exist,
// it means that kaspad is running for the first time.
serializedDAGState, err := dbaccess.FetchDAGState(dbaccess.NoTx())
if dbaccess.IsNotFoundError(err) {
// Initialize the database and the DAG state to the genesis block.
return dag.createDAGState(dag.subnetworkID)
}
if err != nil {
return err
}
if !initialized {
// At this point the database has not already been initialized, so
// initialize both it and the DAG state to the genesis block.
return dag.createDAGState()
dagState, err := deserializeDAGState(serializedDAGState)
if err != nil {
return err
}
if !dagState.LocalSubnetworkID.IsEqual(dag.subnetworkID) {
return errors.Errorf("Cannot start kaspad with subnetwork ID %s because"+
" its database is already built with subnetwork ID %s. If you"+
" want to switch to a new database, please reset the"+
" database by starting kaspad with --reset-db flag", dag.subnetworkID, dagState.LocalSubnetworkID)
}
// Attempt to load the DAG state from the database.
return dag.db.View(func(dbTx database.Tx) error {
// Fetch the stored DAG tipHashes from the database metadata.
// When it doesn't exist, it means the database hasn't been
// initialized for use with the DAG yet, so break out now to allow
// that to happen under a writable database transaction.
serializedData := dbTx.Metadata().Get(dagStateKeyName)
log.Tracef("Serialized DAG tip hashes: %x", serializedData)
state, err := deserializeDAGState(serializedData)
log.Debugf("Loading block index...")
var unprocessedBlockNodes []*blockNode
blockIndexCursor, err := dbaccess.BlockIndexCursor(dbaccess.NoTx())
if err != nil {
return err
}
defer blockIndexCursor.Close()
for blockIndexCursor.Next() {
serializedDBNode, err := blockIndexCursor.Value()
if err != nil {
return err
}
node, err := dag.deserializeBlockNode(serializedDBNode)
if err != nil {
return err
}
// Load all of the headers from the data for the known DAG
// and construct the block index accordingly. Since the
// number of nodes are already known, perform a single alloc
// for them versus a whole bunch of little ones to reduce
// pressure on the GC.
log.Infof("Loading block index...")
// Check to see if this node had been stored in the the block DB
// but not yet accepted. If so, add it to a slice to be processed later.
if node.status == statusDataStored {
unprocessedBlockNodes = append(unprocessedBlockNodes, node)
continue
}
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
var unprocessedBlockNodes []*blockNode
cursor := blockIndexBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
node, err := dag.deserializeBlockNode(cursor.Value())
if err != nil {
return err
}
// Check to see if this node had been stored in the the block DB
// but not yet accepted. If so, add it to a slice to be processed later.
if node.status == statusDataStored {
unprocessedBlockNodes = append(unprocessedBlockNodes, node)
continue
}
// If the node is known to be invalid add it as-is to the block
// index and continue.
if node.status.KnownInvalid() {
dag.index.addNode(node)
continue
}
if dag.blockCount == 0 {
if !node.hash.IsEqual(dag.dagParams.GenesisHash) {
return AssertError(fmt.Sprintf("initDAGState: Expected "+
"first entry in block index to be genesis block, "+
"found %s", node.hash))
}
} else {
if len(node.parents) == 0 {
return AssertError(fmt.Sprintf("initDAGState: Could "+
"not find any parent for block %s", node.hash))
}
}
// Add the node to its parents children, connect it,
// and add it to the block index.
node.updateParentsChildren()
// If the node is known to be invalid add it as-is to the block
// index and continue.
if node.status.KnownInvalid() {
dag.index.addNode(node)
dag.blockCount++
continue
}
// Load all of the known UTXO entries and construct the full
// UTXO set accordingly. Since the number of entries is already
// known, perform a single alloc for them versus a whole bunch
// of little ones to reduce pressure on the GC.
log.Infof("Loading UTXO set...")
utxoEntryBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
// Determine how many UTXO entries will be loaded into the index so we can
// allocate the right amount.
var utxoEntryCount int32
cursor = utxoEntryBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
utxoEntryCount++
}
fullUTXOCollection := make(utxoCollection, utxoEntryCount)
for ok := cursor.First(); ok; ok = cursor.Next() {
// Deserialize the outpoint
outpoint, err := deserializeOutpoint(cursor.Key())
if err != nil {
// Ensure any deserialization errors are returned as database
// corruption errors.
if isDeserializeErr(err) {
return database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt outpoint: %s", err),
}
}
return err
if dag.blockCount == 0 {
if !node.hash.IsEqual(dag.dagParams.GenesisHash) {
return AssertError(fmt.Sprintf("initDAGState: Expected "+
"first entry in block index to be genesis block, "+
"found %s", node.hash))
}
// Deserialize the utxo entry
entry, err := deserializeUTXOEntry(cursor.Value())
if err != nil {
// Ensure any deserialization errors are returned as database
// corruption errors.
if isDeserializeErr(err) {
return database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt utxo entry: %s", err),
}
}
return err
} else {
if len(node.parents) == 0 {
return AssertError(fmt.Sprintf("initDAGState: block %s "+
"has no parents but it's not the genesis block", node.hash))
}
fullUTXOCollection[*outpoint] = entry
}
// Initialize the reachability store
err = dag.reachabilityStore.init(dbTx)
// Add the node to its parents children, connect it,
// and add it to the block index.
node.updateParentsChildren()
dag.index.addNode(node)
dag.blockCount++
}
log.Debugf("Loading UTXO set...")
fullUTXOCollection := make(utxoCollection)
cursor, err := dbaccess.UTXOSetCursor(dbaccess.NoTx())
if err != nil {
return err
}
defer cursor.Close()
for cursor.Next() {
// Deserialize the outpoint
key, err := cursor.Key()
if err != nil {
return err
}
outpoint, err := deserializeOutpoint(bytes.NewReader(key))
if err != nil {
return err
}
// Apply the loaded utxoCollection to the virtual block.
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
// Deserialize the utxo entry
value, err := cursor.Value()
if err != nil {
return AssertError(fmt.Sprintf("Error loading UTXOSet: %s", err))
return err
}
entry, err := deserializeUTXOEntry(bytes.NewReader(value))
if err != nil {
return err
}
// Apply the stored tips to the virtual block.
tips := newBlockSet()
for _, tipHash := range state.TipHashes {
tip := dag.index.LookupNode(tipHash)
if tip == nil {
return AssertError(fmt.Sprintf("initDAGState: cannot find "+
"DAG tip %s in block index", state.TipHashes))
}
tips.add(tip)
fullUTXOCollection[*outpoint] = entry
}
log.Debugf("Loading reachability data...")
err = dag.reachabilityStore.init(dbaccess.NoTx())
if err != nil {
return err
}
log.Debugf("Loading multiset data...")
err = dag.multisetStore.init(dbaccess.NoTx())
if err != nil {
return err
}
log.Debugf("Applying the loaded utxoCollection to the virtual block...")
dag.virtual.utxoSet, err = newFullUTXOSetFromUTXOCollection(fullUTXOCollection)
if err != nil {
return AssertError(fmt.Sprintf("Error loading UTXOSet: %s", err))
}
log.Debugf("Applying the stored tips to the virtual block...")
tips := newBlockSet()
for _, tipHash := range dagState.TipHashes {
tip := dag.index.LookupNode(tipHash)
if tip == nil {
return AssertError(fmt.Sprintf("initDAGState: cannot find "+
"DAG tip %s in block index", dagState.TipHashes))
}
dag.virtual.SetTips(tips)
tips.add(tip)
}
dag.virtual.SetTips(tips)
// Set the last finality point
dag.lastFinalityPoint = dag.index.LookupNode(state.LastFinalityPoint)
dag.finalizeNodesBelowFinalityPoint(false)
log.Debugf("Setting the last finality point...")
dag.lastFinalityPoint = dag.index.LookupNode(dagState.LastFinalityPoint)
dag.finalizeNodesBelowFinalityPoint(false)
// Go over any unprocessed blockNodes and process them now.
for _, node := range unprocessedBlockNodes {
// Check to see if the block exists in the block DB. If it
// doesn't, the database has certainly been corrupted.
blockExists, err := dbTx.HasBlock(node.hash)
if err != nil {
return AssertError(fmt.Sprintf("initDAGState: HasBlock "+
"for block %s failed: %s", node.hash, err))
}
if !blockExists {
return AssertError(fmt.Sprintf("initDAGState: block %s "+
"exists in block index but not in block db", node.hash))
}
// Attempt to accept the block.
block, err := dbFetchBlockByNode(dbTx, node)
if err != nil {
return err
}
isOrphan, isDelayed, err := dag.ProcessBlock(block, BFWasStored)
if err != nil {
log.Warnf("Block %s, which was not previously processed, "+
"failed to be accepted to the DAG: %s", node.hash, err)
continue
}
// If the block is an orphan or is delayed then it couldn't have
// possibly been written to the block index in the first place.
if isOrphan {
return AssertError(fmt.Sprintf("Block %s, which was not "+
"previously processed, turned out to be an orphan, which is "+
"impossible.", node.hash))
}
if isDelayed {
return AssertError(fmt.Sprintf("Block %s, which was not "+
"previously processed, turned out to be delayed, which is "+
"impossible.", node.hash))
}
log.Debugf("Processing unprocessed blockNodes...")
for _, node := range unprocessedBlockNodes {
// Check to see if the block exists in the block DB. If it
// doesn't, the database has certainly been corrupted.
blockExists, err := dbaccess.HasBlock(dbaccess.NoTx(), node.hash)
if err != nil {
return AssertError(fmt.Sprintf("initDAGState: HasBlock "+
"for block %s failed: %s", node.hash, err))
}
if !blockExists {
return AssertError(fmt.Sprintf("initDAGState: block %s "+
"exists in block index but not in block db", node.hash))
}
return nil
})
// Attempt to accept the block.
block, err := fetchBlockByHash(dbaccess.NoTx(), node.hash)
if err != nil {
return err
}
isOrphan, isDelayed, err := dag.ProcessBlock(block, BFWasStored)
if err != nil {
log.Warnf("Block %s, which was not previously processed, "+
"failed to be accepted to the DAG: %s", node.hash, err)
continue
}
// If the block is an orphan or is delayed then it couldn't have
// possibly been written to the block index in the first place.
if isOrphan {
return AssertError(fmt.Sprintf("Block %s, which was not "+
"previously processed, turned out to be an orphan, which is "+
"impossible.", node.hash))
}
if isDelayed {
return AssertError(fmt.Sprintf("Block %s, which was not "+
"previously processed, turned out to be delayed, which is "+
"impossible.", node.hash))
}
}
log.Infof("DAG state initialized.")
return nil
}
// deserializeBlockNode parses a value in the block index bucket and returns a block node.
@@ -729,26 +464,26 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
return node, nil
}
// dbFetchBlockByNode uses an existing database transaction to retrieve the
// raw block for the provided node, deserialize it, and return a util.Block
// of it.
func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error) {
// Load the raw block bytes from the database.
blockBytes, err := dbTx.FetchBlock(node.hash)
// fetchBlockByHash retrieves the raw block for the provided hash,
// deserializes it, and returns a util.Block of it.
func fetchBlockByHash(dbContext dbaccess.Context, hash *daghash.Hash) (*util.Block, error) {
blockBytes, err := dbaccess.FetchBlock(dbContext, hash)
if err != nil {
return nil, err
}
return util.NewBlockFromBytes(blockBytes)
}
// Create the encapsulated block.
block, err := util.NewBlockFromBytes(blockBytes)
func storeBlock(dbContext *dbaccess.TxContext, block *util.Block) error {
blockBytes, err := block.Bytes()
if err != nil {
return nil, err
return err
}
return block, nil
return dbaccess.StoreBlock(dbContext, block.Hash(), blockBytes)
}
func serializeBlockNode(node *blockNode) ([]byte, error) {
w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1))
w := bytes.NewBuffer(make([]byte, 0, wire.MaxBlockHeaderPayload+1))
header := node.Header()
err := header.Serialize(w)
if err != nil {
@@ -805,37 +540,11 @@ func serializeBlockNode(node *blockNode) ([]byte, error) {
return w.Bytes(), nil
}
// dbStoreBlockNode stores the block node data into the block
// index bucket. This overwrites the current entry if there exists one.
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
serializedNode, err := serializeBlockNode(node)
if err != nil {
return err
}
// Write block header data to block index bucket.
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
key := BlockIndexKey(node.hash, node.blueScore)
return blockIndexBucket.Put(key, serializedNode)
}
// dbStoreBlock stores the provided block in the database if it is not already
// there. The full block data is written to ffldb.
func dbStoreBlock(dbTx database.Tx, block *util.Block) error {
hasBlock, err := dbTx.HasBlock(block.Hash())
if err != nil {
return err
}
if hasBlock {
return nil
}
return dbTx.StoreBlock(block)
}
// BlockIndexKey generates the binary key for an entry in the block index
// blockIndexKey generates the binary key for an entry in the block index
// bucket. The key is composed of the block blue score encoded as a big-endian
// 64-bit unsigned int followed by the 32 byte block hash.
// The blue score component is important for iteration order.
func BlockIndexKey(blockHash *daghash.Hash, blueScore uint64) []byte {
func blockIndexKey(blockHash *daghash.Hash, blueScore uint64) []byte {
indexKey := make([]byte, daghash.HashSize+8)
binary.BigEndian.PutUint64(indexKey[0:8], blueScore)
copy(indexKey[8:daghash.HashSize+8], blockHash[:])
@@ -857,13 +566,10 @@ func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
return nil, errNotInDAG(str)
}
// Load the block from the database and return it.
var block *util.Block
err := dag.db.View(func(dbTx database.Tx) error {
var err error
block, err = dbFetchBlockByNode(dbTx, node)
return err
})
block, err := fetchBlockByHash(dbaccess.NoTx(), node.hash)
if err != nil {
return nil, err
}
return block, err
}
@@ -888,27 +594,27 @@ func (dag *BlockDAG) BlockHashesFrom(lowHash *daghash.Hash, limit int) ([]*dagha
return nil, err
}
err = dag.index.db.View(func(dbTx database.Tx) error {
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
lowKey := BlockIndexKey(lowHash, blueScore)
cursor := blockIndexBucket.Cursor()
cursor.Seek(lowKey)
for ok := cursor.Next(); ok; ok = cursor.Next() {
key := cursor.Key()
blockHash, err := blockHashFromBlockIndexKey(key)
if err != nil {
return err
}
blockHashes = append(blockHashes, blockHash)
if len(blockHashes) == limit {
break
}
}
return nil
})
key := blockIndexKey(lowHash, blueScore)
cursor, err := dbaccess.BlockIndexCursorFrom(dbaccess.NoTx(), key)
if dbaccess.IsNotFoundError(err) {
return nil, errors.Wrapf(err, "block %s not in block index", lowHash)
}
if err != nil {
return nil, err
}
defer cursor.Close()
for cursor.Next() && len(blockHashes) < limit {
key, err := cursor.Key()
if err != nil {
return nil, err
}
blockHash, err := blockHashFromBlockIndexKey(key)
if err != nil {
return nil, err
}
blockHashes = append(blockHashes, blockHash)
}
return blockHashes, nil
}

View File

@@ -6,11 +6,11 @@ package blockdag
import (
"bytes"
"encoding/hex"
"github.com/pkg/errors"
"reflect"
"testing"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
@@ -36,9 +36,21 @@ func TestErrNotInDAG(t *testing.T) {
}
}
// TestUtxoSerialization ensures serializing and deserializing unspent
// hexToBytes converts the passed hex string into bytes and will panic if there
// is an error. This is only provided for the hard-coded constants so errors in
// the source code can be detected. It will only (and must only) be called with
// hard-coded values.
func hexToBytes(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
panic("invalid hex in source file: " + s)
}
return b
}
// TestUTXOSerialization ensures serializing and deserializing unspent
// trasaction output entries works as expected.
func TestUtxoSerialization(t *testing.T) {
func TestUTXOSerialization(t *testing.T) {
t.Parallel()
tests := []struct {
@@ -54,7 +66,7 @@ func TestUtxoSerialization(t *testing.T) {
blockBlueScore: 1,
packedFlags: tfCoinbase,
},
serialized: hexToBytes("03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"),
serialized: hexToBytes("030000000000000000f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac"),
},
{
name: "blue score 100001, not coinbase",
@@ -64,13 +76,21 @@ func TestUtxoSerialization(t *testing.T) {
blockBlueScore: 100001,
packedFlags: 0,
},
serialized: hexToBytes("8b99420700ee8bd501094a7d5ca318da2506de35e1cb025ddc"),
serialized: hexToBytes("420d03000000000040420f00000000001976a914ee8bd501094a7d5ca318da2506de35e1cb025ddc88ac"),
},
}
for i, test := range tests {
// Ensure the utxo entry serializes to the expected value.
gotBytes := serializeUTXOEntry(test.entry)
w := &bytes.Buffer{}
err := serializeUTXOEntry(w, test.entry)
if err != nil {
t.Errorf("serializeUTXOEntry #%d (%s) unexpected "+
"error: %v", i, test.name, err)
continue
}
gotBytes := w.Bytes()
if !bytes.Equal(gotBytes, test.serialized) {
t.Errorf("serializeUTXOEntry #%d (%s): mismatched "+
"bytes - got %x, want %x", i, test.name,
@@ -78,8 +98,8 @@ func TestUtxoSerialization(t *testing.T) {
continue
}
// Deserialize to a utxo entry.
utxoEntry, err := deserializeUTXOEntry(test.serialized)
// Deserialize to a utxo entry.gotBytes
utxoEntry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
if err != nil {
t.Errorf("deserializeUTXOEntry #%d (%s) unexpected "+
"error: %v", i, test.name, err)
@@ -124,28 +144,24 @@ func TestUtxoEntryDeserializeErrors(t *testing.T) {
tests := []struct {
name string
serialized []byte
errType error
}{
{
name: "no data after header code",
serialized: hexToBytes("02"),
errType: errDeserialize(""),
},
{
name: "incomplete compressed txout",
serialized: hexToBytes("0232"),
errType: errDeserialize(""),
},
}
for _, test := range tests {
// Ensure the expected error type is returned and the returned
// entry is nil.
entry, err := deserializeUTXOEntry(test.serialized)
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
t.Errorf("deserializeUTXOEntry (%s): expected error "+
"type does not match - got %T, want %T",
test.name, err, test.errType)
entry, err := deserializeUTXOEntry(bytes.NewReader(test.serialized))
if err == nil {
t.Errorf("deserializeUTXOEntry (%s): didn't return an error",
test.name)
continue
}
if entry != nil {
@@ -172,7 +188,7 @@ func TestDAGStateSerialization(t *testing.T) {
TipHashes: []*daghash.Hash{newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")},
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
},
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
serialized: []byte("{\"TipHashes\":[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
},
{
name: "block 1",
@@ -180,7 +196,7 @@ func TestDAGStateSerialization(t *testing.T) {
TipHashes: []*daghash.Hash{newHashFromStr("00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048")},
LastFinalityPoint: newHashFromStr("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"),
},
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0]}"),
serialized: []byte("{\"TipHashes\":[[72,96,235,24,191,27,22,32,227,126,148,144,252,138,66,117,20,65,111,215,81,89,171,134,104,142,154,131,0,0,0,0]],\"LastFinalityPoint\":[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,79,147,30,131,101,225,90,8,156,104,214,25,0,0,0,0,0],\"LocalSubnetworkID\":null}"),
},
}
@@ -217,51 +233,6 @@ func TestDAGStateSerialization(t *testing.T) {
}
}
// TestDAGStateDeserializeErrors performs negative tests against
// deserializing the DAG state to ensure error paths work as expected.
func TestDAGStateDeserializeErrors(t *testing.T) {
t.Parallel()
tests := []struct {
name string
serialized []byte
errType error
}{
{
name: "nothing serialized",
serialized: hexToBytes(""),
errType: database.Error{ErrorCode: database.ErrCorruption},
},
{
name: "corrupted data",
serialized: []byte("[[111,226,140,10,182,241,179,114,193,166,162,70,174,99,247,7"),
errType: database.Error{ErrorCode: database.ErrCorruption},
},
}
for _, test := range tests {
// Ensure the expected error type and code is returned.
_, err := deserializeDAGState(test.serialized)
if reflect.TypeOf(err) != reflect.TypeOf(test.errType) {
t.Errorf("deserializeDAGState (%s): expected "+
"error type does not match - got %T, want %T",
test.name, err, test.errType)
continue
}
var dbErr database.Error
if ok := errors.As(err, &dbErr); ok {
tderr := test.errType.(database.Error)
if dbErr.ErrorCode != tderr.ErrorCode {
t.Errorf("deserializeDAGState (%s): "+
"wrong error code got: %v, want: %v",
test.name, dbErr.ErrorCode,
tderr.ErrorCode)
continue
}
}
}
}
// newHashFromStr converts the passed big-endian hex string into a
// daghash.Hash. It only differs from the one available in daghash in that
// it panics in case of an error since it will only (and must only) be

View File

@@ -82,7 +82,7 @@ func TestDifficulty(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.DifficultyAdjustmentWindowSize = 264
dag, teardownFunc, err := DAGSetup("TestDifficulty", Config{
dag, teardownFunc, err := DAGSetup("TestDifficulty", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -94,7 +94,8 @@ func TestDifficulty(t *testing.T) {
addNode := func(parents blockSet, blockTime time.Time) *blockNode {
bluestParent := parents.bluest()
if blockTime == zeroTime {
blockTime = time.Unix(bluestParent.timestamp+1, 0)
blockTime = time.Unix(bluestParent.timestamp, 0)
blockTime = blockTime.Add(params.TargetTimePerBlock)
}
block, err := PrepareBlockForTest(dag, parents.hashes(), nil)
if err != nil {
@@ -119,7 +120,8 @@ func TestDifficulty(t *testing.T) {
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize; i++ {
tip = addNode(blockSetFromSlice(tip), zeroTime)
if tip.bits != dag.genesis.bits {
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment window size, the difficulty should be the same as genesis'")
t.Fatalf("As long as the bluest parent's blue score is less then the difficulty adjustment " +
"window size, the difficulty should be the same as genesis'")
}
}
for i := uint64(0); i < dag.difficultyAdjustmentWindowSize+100; i++ {
@@ -140,7 +142,8 @@ func TestDifficulty(t *testing.T) {
}
tip = addNode(blockSetFromSlice(tip), zeroTime)
if compareBits(tip.bits, nodeInThePast.bits) >= 0 {
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the block rate, so the difficulty should increase as well")
t.Fatalf("tip.bits should be smaller than nodeInThePast.bits because nodeInThePast increased the " +
"block rate, so the difficulty should increase as well")
}
expectedBits := uint32(0x207f83df)
if tip.bits != expectedBits {
@@ -167,7 +170,9 @@ func TestDifficulty(t *testing.T) {
sameBitsCount = 0
}
}
slowNode := addNode(blockSetFromSlice(tip), time.Unix(tip.timestamp+2, 0))
slowBlockTime := time.Unix(tip.timestamp, 0)
slowBlockTime = slowBlockTime.Add(params.TargetTimePerBlock + time.Second)
slowNode := addNode(blockSetFromSlice(tip), slowBlockTime)
if slowNode.bits != tip.bits {
t.Fatalf("The difficulty should only change when slowNode is in the past of a block bluest parent")
}
@@ -180,7 +185,8 @@ func TestDifficulty(t *testing.T) {
}
tip = addNode(blockSetFromSlice(tip), zeroTime)
if compareBits(tip.bits, slowNode.bits) <= 0 {
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block rate, so the difficulty should decrease as well")
t.Fatalf("tip.bits should be smaller than slowNode.bits because slowNode decreased the block" +
" rate, so the difficulty should decrease as well")
}
splitNode := addNode(blockSetFromSlice(tip), zeroTime)
@@ -197,7 +203,8 @@ func TestDifficulty(t *testing.T) {
tipWithRedPast := addNode(blockSetFromSlice(redChainTip, blueTip), zeroTime)
tipWithoutRedPast := addNode(blockSetFromSlice(blueTip), zeroTime)
if tipWithoutRedPast.bits != tipWithRedPast.bits {
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks shouldn't affect the difficulty")
t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because red blocks" +
" shouldn't affect the difficulty")
}
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"github.com/pkg/errors"
"math"
"strings"
"testing"
"github.com/kaspanet/kaspad/util/subnetworkid"
@@ -40,7 +41,7 @@ func TestFinality(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.FinalityInterval = 100
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -185,7 +186,7 @@ func TestSubnetworkRegistry(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.BlockCoinbaseMaturity = 0
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -198,7 +199,7 @@ func TestSubnetworkRegistry(t *testing.T) {
if err != nil {
t.Fatalf("could not register network: %s", err)
}
limit, err := dag.SubnetworkStore.GasLimit(subnetworkID)
limit, err := blockdag.GasLimit(subnetworkID)
if err != nil {
t.Fatalf("could not retrieve gas limit: %s", err)
}
@@ -211,7 +212,7 @@ func TestChainedTransactions(t *testing.T) {
params := dagconfig.SimnetParams
params.BlockCoinbaseMaturity = 0
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestChainedTransactions", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -267,11 +268,19 @@ func TestChainedTransactions(t *testing.T) {
}
chainedTx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{chainedTxIn}, []*wire.TxOut{chainedTxOut})
block2, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx, chainedTx}, true)
block2, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{tx}, false)
if err != nil {
t.Fatalf("PrepareBlockForTest: %v", err)
}
// Manually add a chained transaction to block2
block2.Transactions = append(block2.Transactions, chainedTx)
block2UtilTxs := make([]*util.Tx, len(block2.Transactions))
for i, tx := range block2.Transactions {
block2UtilTxs[i] = util.NewTx(tx)
}
block2.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(block2UtilTxs).Root()
//Checks that dag.ProcessBlock fails because we don't allow a transaction to spend another transaction from the same block
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(block2), blockdag.BFNoPoWCheck)
if err == nil {
@@ -331,7 +340,7 @@ func TestOrderInDiffFromAcceptanceData(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = math.MaxUint8
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -401,7 +410,7 @@ func TestGasLimit(t *testing.T) {
params := dagconfig.SimnetParams
params.K = 1
params.BlockCoinbaseMaturity = 0
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", blockdag.Config{
dag, teardownFunc, err := blockdag.DAGSetup("TestSubnetworkRegistry", true, blockdag.Config{
DAGParams: &params,
})
if err != nil {
@@ -548,7 +557,7 @@ func TestGasLimit(t *testing.T) {
isOrphan, isDelayed, err = dag.ProcessBlock(util.NewBlock(nonExistentSubnetworkBlock), blockdag.BFNoPoWCheck)
expectedErrStr := fmt.Sprintf("Error getting gas limit for subnetworkID '%s': subnetwork '%s' not found",
nonExistentSubnetwork, nonExistentSubnetwork)
if err.Error() != expectedErrStr {
if strings.Contains(err.Error(), expectedErrStr) {
t.Fatalf("ProcessBlock expected error \"%v\" but got \"%v\"", expectedErrStr, err)
}
if isDelayed {

View File

@@ -3,7 +3,7 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"reflect"
@@ -33,7 +33,7 @@ func TestGHOSTDAG(t *testing.T) {
}{
{
k: 3,
expectedReds: []string{"F", "G", "H", "I", "O", "P"},
expectedReds: []string{"F", "G", "H", "I", "N", "Q"},
dagData: []*testBlockData{
{
parents: []string{"A"},
@@ -166,7 +166,7 @@ func TestGHOSTDAG(t *testing.T) {
id: "T",
expectedScore: 13,
expectedSelectedParent: "S",
expectedBlues: []string{"S", "N", "Q"},
expectedBlues: []string{"S", "O", "P"},
},
},
},
@@ -176,7 +176,7 @@ func TestGHOSTDAG(t *testing.T) {
func() {
resetExtraNonceForTest()
dagParams.K = test.k
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), Config{
dag, teardownFunc, err := DAGSetup(fmt.Sprintf("TestGHOSTDAG%d", i), true, Config{
DAGParams: &dagParams,
})
if err != nil {
@@ -282,7 +282,7 @@ func checkReds(expectedReds []string, reds map[string]bool) bool {
func TestBlueAnticoneSizeErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", Config{
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizeErrors", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -323,7 +323,7 @@ func TestBlueAnticoneSizeErrors(t *testing.T) {
func TestGHOSTDAGErrors(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", Config{
dag, teardownFunc, err := DAGSetup("TestGHOSTDAGErrors", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -340,19 +340,21 @@ func TestGHOSTDAGErrors(t *testing.T) {
// Clear the reachability store
dag.reachabilityStore.loaded = map[daghash.Hash]*reachabilityData{}
err = dag.db.Update(func(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
cursor := bucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
err := bucket.Delete(cursor.Key())
if err != nil {
return err
}
}
return nil
})
dbTx, err := dbaccess.NewTx()
if err != nil {
t.Fatalf("TestGHOSTDAGErrors: db.Update failed: %s", err)
t.Fatalf("NewTx: %s", err)
}
defer dbTx.RollbackUnlessClosed()
err = dbaccess.ClearReachabilityData(dbTx)
if err != nil {
t.Fatalf("ClearReachabilityData: %s", err)
}
err = dbTx.Commit()
if err != nil {
t.Fatalf("Commit: %s", err)
}
// Try to rerun GHOSTDAG on the last block. GHOSTDAG uses

View File

@@ -4,29 +4,16 @@ import (
"bytes"
"encoding/gob"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
const (
// acceptanceIndexName is the human-readable name for the index.
acceptanceIndexName = "acceptance index"
)
var (
// acceptanceIndexKey is the key of the acceptance index and the db bucket used
// to house it.
acceptanceIndexKey = []byte("acceptanceidx")
)
// AcceptanceIndex implements a txAcceptanceData by block hash index. That is to say,
// it stores a mapping between a block's hash and the set of transactions that the
// block accepts among its blue blocks.
type AcceptanceIndex struct {
db database.DB
dag *blockdag.BlockDAG
}
@@ -43,122 +30,82 @@ func NewAcceptanceIndex() *AcceptanceIndex {
return &AcceptanceIndex{}
}
// DropAcceptanceIndex drops the acceptance index from the provided database if it
// exists.
func DropAcceptanceIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, acceptanceIndexKey, acceptanceIndexName, interrupt)
}
// DropAcceptanceIndex drops the acceptance index.
func DropAcceptanceIndex() error {
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
// Key returns the database key to use for the index as a byte slice.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Key() []byte {
return acceptanceIndexKey
}
err = dbaccess.DropAcceptanceIndex(dbTx)
if err != nil {
return err
}
// Name returns the human-readable name of the index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Name() string {
return acceptanceIndexName
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the bucket for the
// acceptance index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Create(dbTx database.Tx) error {
_, err := dbTx.Metadata().CreateBucket(acceptanceIndexKey)
return err
return dbTx.Commit()
}
// Init initializes the hash-based acceptance index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
idx.db = db
func (idx *AcceptanceIndex) Init(dag *blockdag.BlockDAG) error {
idx.dag = dag
return nil
return idx.recover()
}
// recover attempts to insert any data that's missing from the
// acceptance index.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) recover() error {
dbTx, err := dbaccess.NewTx()
if err != nil {
return err
}
defer dbTx.RollbackUnlessClosed()
err = idx.dag.ForEachHash(func(hash daghash.Hash) error {
exists, err := dbaccess.HasAcceptanceData(dbTx, &hash)
if err != nil {
return err
}
if exists {
return nil
}
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(&hash)
if err != nil {
return err
}
return idx.ConnectBlock(dbTx, &hash, txAcceptanceData)
})
if err != nil {
return err
}
return dbTx.Commit()
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the DAG.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) ConnectBlock(dbTx database.Tx, _ *util.Block, blockID uint64, _ *blockdag.BlockDAG,
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
return dbPutTxsAcceptanceData(dbTx, blockID, txsAcceptanceData)
}
// TxsAcceptanceData returns the acceptance data of all the transactions that
// were accepted by the block with hash blockHash.
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
var txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData
err := idx.db.View(func(dbTx database.Tx) error {
var err error
txsAcceptanceData, err = dbFetchTxsAcceptanceDataByHash(dbTx, blockHash)
return err
})
if err != nil {
return nil, err
}
return txsAcceptanceData, nil
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *AcceptanceIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
for blockID := currentBlockID + 1; blockID <= lastKnownBlockID; blockID++ {
hash, err := blockdag.DBFetchBlockHashByID(dbTx, currentBlockID)
if err != nil {
return err
}
txAcceptanceData, err := idx.dag.TxsAcceptedByBlockHash(hash)
if err != nil {
return err
}
err = idx.ConnectBlock(dbTx, nil, blockID, nil, txAcceptanceData, nil)
if err != nil {
return err
}
}
return nil
}
func dbPutTxsAcceptanceData(dbTx database.Tx, blockID uint64,
func (idx *AcceptanceIndex) ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash,
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(txsAcceptanceData)
if err != nil {
return err
}
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedTxsAcceptanceData)
return dbaccess.StoreAcceptanceData(dbContext, blockHash, serializedTxsAcceptanceData)
}
func dbFetchTxsAcceptanceDataByHash(dbTx database.Tx,
hash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
blockID, err := blockdag.DBFetchBlockIDByHash(dbTx, hash)
// TxsAcceptanceData returns the acceptance data of all the transactions that
// were accepted by the block with hash blockHash.
func (idx *AcceptanceIndex) TxsAcceptanceData(blockHash *daghash.Hash) (blockdag.MultiBlockTxsAcceptanceData, error) {
serializedTxsAcceptanceData, err := dbaccess.FetchAcceptanceData(dbaccess.NoTx(), blockHash)
if err != nil {
return nil, err
}
return dbFetchTxsAcceptanceDataByID(dbTx, blockID)
}
func dbFetchTxsAcceptanceDataByID(dbTx database.Tx,
blockID uint64) (blockdag.MultiBlockTxsAcceptanceData, error) {
serializedBlockID := blockdag.SerializeBlockID(blockID)
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
serializedTxsAcceptanceData := bucket.Get(serializedBlockID)
if serializedTxsAcceptanceData == nil {
return nil, errors.Errorf("no entry in the accpetance index for block id %d", blockID)
}
return deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
}

View File

@@ -3,7 +3,7 @@ package indexers
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
@@ -96,7 +96,7 @@ func TestAcceptanceIndexRecover(t *testing.T) {
}
defer os.RemoveAll(db1Path)
db1, err := database.Create("ffldb", db1Path, params.Net)
err = dbaccess.Open(db1Path)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
@@ -104,10 +104,9 @@ func TestAcceptanceIndexRecover(t *testing.T) {
db1Config := blockdag.Config{
IndexManager: db1IndexManager,
DAGParams: params,
DB: db1,
}
db1DAG, teardown, err := blockdag.DAGSetup("", db1Config)
db1DAG, teardown, err := blockdag.DAGSetup("", false, db1Config)
if err != nil {
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
}
@@ -130,11 +129,6 @@ func TestAcceptanceIndexRecover(t *testing.T) {
}
}
err = db1.FlushCache()
if err != nil {
t.Fatalf("Error flushing database to disk: %s", err)
}
db2Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover2")
if err != nil {
t.Fatalf("Error creating temporary directory: %s", err)
@@ -166,17 +160,20 @@ func TestAcceptanceIndexRecover(t *testing.T) {
t.Fatalf("Error fetching acceptance data: %s", err)
}
db2, err := database.Open("ffldb", db2Path, params.Net)
err = dbaccess.Close()
if err != nil {
t.Fatalf("Error opening database: %s", err)
t.Fatalf("Error closing the database: %s", err)
}
err = dbaccess.Open(db2Path)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
db2Config := blockdag.Config{
DAGParams: params,
DB: db2,
}
db2DAG, teardown, err := blockdag.DAGSetup("", db2Config)
db2DAG, teardown, err := blockdag.DAGSetup("", false, db2Config)
if err != nil {
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
}
@@ -199,10 +196,6 @@ func TestAcceptanceIndexRecover(t *testing.T) {
}
}
err = db2.FlushCache()
if err != nil {
t.Fatalf("Error flushing database to disk: %s", err)
}
db3Path, err := ioutil.TempDir("", "TestAcceptanceIndexRecover3")
if err != nil {
t.Fatalf("Error creating temporary directory: %s", err)
@@ -213,9 +206,13 @@ func TestAcceptanceIndexRecover(t *testing.T) {
t.Fatalf("copyDirectory: %s", err)
}
db3, err := database.Open("ffldb", db3Path, params.Net)
err = dbaccess.Close()
if err != nil {
t.Fatalf("Error opening database: %s", err)
t.Fatalf("Error closing the database: %s", err)
}
err = dbaccess.Open(db3Path)
if err != nil {
t.Fatalf("error creating db: %s", err)
}
db3AcceptanceIndex := NewAcceptanceIndex()
@@ -223,10 +220,9 @@ func TestAcceptanceIndexRecover(t *testing.T) {
db3Config := blockdag.Config{
IndexManager: db3IndexManager,
DAGParams: params,
DB: db3,
}
_, teardown, err = blockdag.DAGSetup("", db3Config)
_, teardown, err = blockdag.DAGSetup("", false, db3Config)
if err != nil {
t.Fatalf("TestAcceptanceIndexRecover: Failed to setup DAG instance: %v", err)
}

View File

@@ -1,112 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package indexers implements optional block DAG indexes.
*/
package indexers
import (
"encoding/binary"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
)
var (
// byteOrder is the preferred byte order used for serializing numeric
// fields for storage in the database.
byteOrder = binary.LittleEndian
// errInterruptRequested indicates that an operation was cancelled due
// to a user-requested interrupt.
errInterruptRequested = errors.New("interrupt requested")
)
// NeedsInputser provides a generic interface for an indexer to specify the it
// requires the ability to look up inputs for a transaction.
type NeedsInputser interface {
NeedsInputs() bool
}
// Indexer provides a generic interface for an indexer that is managed by an
// index manager such as the Manager type provided by this package.
type Indexer interface {
// Key returns the key of the index as a byte slice.
Key() []byte
// Name returns the human-readable name of the index.
Name() string
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time.
Create(dbTx database.Tx) error
// Init is invoked when the index manager is first initializing the
// index. This differs from the Create method in that it is called on
// every load, including the case the index was just created.
Init(db database.DB, dag *blockdag.BlockDAG) error
// ConnectBlock is invoked when the index manager is notified that a new
// block has been connected to the DAG.
ConnectBlock(dbTx database.Tx,
block *util.Block,
blockID uint64,
dag *blockdag.BlockDAG,
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData,
virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error
}
// AssertError identifies an error that indicates an internal code consistency
// issue and should be treated as a critical and unrecoverable error.
type AssertError string
// Error returns the assertion error as a huma-readable string and satisfies
// the error interface.
func (e AssertError) Error() string {
return "assertion failed: " + string(e)
}
// errDeserialize signifies that a problem was encountered when deserializing
// data.
type errDeserialize string
// Error implements the error interface.
func (e errDeserialize) Error() string {
return string(e)
}
// isDeserializeErr returns whether or not the passed error is an errDeserialize
// error.
func isDeserializeErr(err error) bool {
var deserializeErr errDeserialize
return errors.As(err, &deserializeErr)
}
// internalBucket is an abstraction over a database bucket. It is used to make
// the code easier to test since it allows mock objects in the tests to only
// implement these functions instead of everything a database.Bucket supports.
type internalBucket interface {
Get(key []byte) []byte
Put(key []byte, value []byte) error
Delete(key []byte) error
}
// interruptRequested returns true when the provided channel has been closed.
// This simplifies early shutdown slightly since the caller can just use an if
// statement instead of a select.
func interruptRequested(interrupted <-chan struct{}) bool {
select {
case <-interrupted:
return true
default:
}
return false
}

View File

@@ -0,0 +1,28 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package indexers implements optional block DAG indexes.
*/
package indexers
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
)
// Indexer provides a generic interface for an indexer that is managed by an
// index manager such as the Manager type provided by this package.
type Indexer interface {
// Init is invoked when the index manager is first initializing the
// index.
Init(dag *blockdag.BlockDAG) error
// ConnectBlock is invoked when the index manager is notified that a new
// block has been connected to the DAG.
ConnectBlock(dbContext *dbaccess.TxContext,
blockHash *daghash.Hash,
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData) error
}

View File

@@ -6,190 +6,30 @@ package indexers
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
)
var (
// indexTipsBucketName is the name of the db bucket used to house the
// current tip of each index.
indexTipsBucketName = []byte("idxtips")
indexCurrentBlockIDBucketName = []byte("idxcurrentblockid")
)
// Manager defines an index manager that manages multiple optional indexes and
// implements the blockdag.IndexManager interface so it can be seamlessly
// plugged into normal DAG processing.
type Manager struct {
db database.DB
enabledIndexes []Indexer
}
// Ensure the Manager type implements the blockdag.IndexManager interface.
var _ blockdag.IndexManager = (*Manager)(nil)
// indexDropKey returns the key for an index which indicates it is in the
// process of being dropped.
func indexDropKey(idxKey []byte) []byte {
dropKey := make([]byte, len(idxKey)+1)
dropKey[0] = 'd'
copy(dropKey[1:], idxKey)
return dropKey
}
// maybeFinishDrops determines if each of the enabled indexes are in the middle
// of being dropped and finishes dropping them when the are. This is necessary
// because dropping and index has to be done in several atomic steps rather than
// one big atomic step due to the massive number of entries.
func (m *Manager) maybeFinishDrops(interrupt <-chan struct{}) error {
indexNeedsDrop := make([]bool, len(m.enabledIndexes))
err := m.db.View(func(dbTx database.Tx) error {
// None of the indexes needs to be dropped if the index tips
// bucket hasn't been created yet.
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
if indexesBucket == nil {
return nil
}
// Mark the indexer as requiring a drop if one is already in
// progress.
for i, indexer := range m.enabledIndexes {
dropKey := indexDropKey(indexer.Key())
if indexesBucket.Get(dropKey) != nil {
indexNeedsDrop[i] = true
}
}
return nil
})
if err != nil {
return err
}
if interruptRequested(interrupt) {
return errInterruptRequested
}
// Finish dropping any of the enabled indexes that are already in the
// middle of being dropped.
for i, indexer := range m.enabledIndexes {
if !indexNeedsDrop[i] {
continue
}
log.Infof("Resuming %s drop", indexer.Name())
err := dropIndex(m.db, indexer.Key(), indexer.Name(), interrupt)
if err != nil {
return err
}
}
return nil
}
// maybeCreateIndexes determines if each of the enabled indexes have already
// been created and creates them if not.
func (m *Manager) maybeCreateIndexes(dbTx database.Tx) error {
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
for _, indexer := range m.enabledIndexes {
// Nothing to do if the index tip already exists.
idxKey := indexer.Key()
if indexesBucket.Get(idxKey) != nil {
continue
}
// The tip for the index does not exist, so create it and
// invoke the create callback for the index so it can perform
// any one-time initialization it requires.
if err := indexer.Create(dbTx); err != nil {
return err
}
// TODO (Mike): this is temporary solution to prevent node from not starting
// because it thinks indexers are not initialized.
// Indexers, however, do not work properly, and a general solution to their work operation is required
indexesBucket.Put(idxKey, []byte{0})
}
return nil
}
// Init initializes the enabled indexes. This is called during DAG
// initialization and primarily consists of catching up all indexes to the
// current tips. This is necessary since each index can be disabled
// and re-enabled at any time and attempting to catch-up indexes at the same
// time new blocks are being downloaded would lead to an overall longer time to
// catch up due to the I/O contention.
//
// Init initializes the enabled indexes.
// This is part of the blockdag.IndexManager interface.
func (m *Manager) Init(db database.DB, blockDAG *blockdag.BlockDAG, interrupt <-chan struct{}) error {
// Nothing to do when no indexes are enabled.
if len(m.enabledIndexes) == 0 {
return nil
}
if interruptRequested(interrupt) {
return errInterruptRequested
}
m.db = db
// Finish and drops that were previously interrupted.
if err := m.maybeFinishDrops(interrupt); err != nil {
return err
}
// Create the initial state for the indexes as needed.
err := m.db.Update(func(dbTx database.Tx) error {
// Create the bucket for the current tips as needed.
meta := dbTx.Metadata()
_, err := meta.CreateBucketIfNotExists(indexTipsBucketName)
if err != nil {
return err
}
if _, err := meta.CreateBucketIfNotExists(indexCurrentBlockIDBucketName); err != nil {
return err
}
return m.maybeCreateIndexes(dbTx)
})
if err != nil {
return err
}
// Initialize each of the enabled indexes.
func (m *Manager) Init(dag *blockdag.BlockDAG) error {
for _, indexer := range m.enabledIndexes {
if err := indexer.Init(db, blockDAG); err != nil {
if err := indexer.Init(dag); err != nil {
return err
}
}
return m.recoverIfNeeded()
}
// recoverIfNeeded checks if the node worked for some time
// without one of the current enabled indexes, and if it's
// the case, recovers the missing blocks from the index.
func (m *Manager) recoverIfNeeded() error {
return m.db.Update(func(dbTx database.Tx) error {
lastKnownBlockID := blockdag.DBFetchCurrentBlockID(dbTx)
for _, indexer := range m.enabledIndexes {
serializedCurrentIdxBlockID := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Get(indexer.Key())
currentIdxBlockID := uint64(0)
if serializedCurrentIdxBlockID != nil {
currentIdxBlockID = blockdag.DeserializeBlockID(serializedCurrentIdxBlockID)
}
if lastKnownBlockID > currentIdxBlockID {
err := indexer.Recover(dbTx, currentIdxBlockID, lastKnownBlockID)
if err != nil {
return err
}
}
}
return nil
})
return nil
}
// ConnectBlock must be invoked when a block is added to the DAG. It
@@ -197,32 +37,13 @@ func (m *Manager) recoverIfNeeded() error {
// checks, and invokes each indexer.
//
// This is part of the blockdag.IndexManager interface.
func (m *Manager) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
func (m *Manager) ConnectBlock(dbContext *dbaccess.TxContext, blockHash *daghash.Hash, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// Call each of the currently active optional indexes with the block
// being connected so they can update accordingly.
for _, index := range m.enabledIndexes {
// Notify the indexer with the connected block so it can index it.
if err := index.ConnectBlock(dbTx, block, blockID, dag, txsAcceptanceData, virtualTxsAcceptanceData); err != nil {
return err
}
}
// Add the new block ID index entry for the block being connected and
// update the current internal block ID accordingly.
err := m.updateIndexersWithCurrentBlockID(dbTx, block.Hash(), blockID)
if err != nil {
return err
}
return nil
}
func (m *Manager) updateIndexersWithCurrentBlockID(dbTx database.Tx, blockHash *daghash.Hash, blockID uint64) error {
serializedBlockID := blockdag.SerializeBlockID(blockID)
for _, index := range m.enabledIndexes {
err := dbTx.Metadata().Bucket(indexCurrentBlockIDBucketName).Put(index.Key(), serializedBlockID)
if err != nil {
if err := index.ConnectBlock(dbContext, blockHash, txsAcceptanceData); err != nil {
return err
}
}
@@ -238,155 +59,3 @@ func NewManager(enabledIndexes []Indexer) *Manager {
enabledIndexes: enabledIndexes,
}
}
// dropIndex drops the passed index from the database. Since indexes can be
// massive, it deletes the index in multiple database transactions in order to
// keep memory usage to reasonable levels. It also marks the drop in progress
// so the drop can be resumed if it is stopped before it is done before the
// index can be used again.
func dropIndex(db database.DB, idxKey []byte, idxName string, interrupt <-chan struct{}) error {
// Nothing to do if the index doesn't already exist.
var needsDelete bool
err := db.View(func(dbTx database.Tx) error {
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
if indexesBucket != nil && indexesBucket.Get(idxKey) != nil {
needsDelete = true
}
return nil
})
if err != nil {
return err
}
if !needsDelete {
log.Infof("Not dropping %s because it does not exist", idxName)
return nil
}
// Mark that the index is in the process of being dropped so that it
// can be resumed on the next start if interrupted before the process is
// complete.
log.Infof("Dropping all %s entries. This might take a while...",
idxName)
err = db.Update(func(dbTx database.Tx) error {
indexesBucket := dbTx.Metadata().Bucket(indexTipsBucketName)
return indexesBucket.Put(indexDropKey(idxKey), idxKey)
})
if err != nil {
return err
}
// Since the indexes can be so large, attempting to simply delete
// the bucket in a single database transaction would result in massive
// memory usage and likely crash many systems due to ulimits. In order
// to avoid this, use a cursor to delete a maximum number of entries out
// of the bucket at a time. Recurse buckets depth-first to delete any
// sub-buckets.
const maxDeletions = 2000000
var totalDeleted uint64
// Recurse through all buckets in the index, cataloging each for
// later deletion.
var subBuckets [][][]byte
var subBucketClosure func(database.Tx, []byte, [][]byte) error
subBucketClosure = func(dbTx database.Tx,
subBucket []byte, tlBucket [][]byte) error {
// Get full bucket name and append to subBuckets for later
// deletion.
var bucketName [][]byte
if (tlBucket == nil) || (len(tlBucket) == 0) {
bucketName = append(bucketName, subBucket)
} else {
bucketName = append(tlBucket, subBucket)
}
subBuckets = append(subBuckets, bucketName)
// Recurse sub-buckets to append to subBuckets slice.
bucket := dbTx.Metadata()
for _, subBucketName := range bucketName {
bucket = bucket.Bucket(subBucketName)
}
return bucket.ForEachBucket(func(k []byte) error {
return subBucketClosure(dbTx, k, bucketName)
})
}
// Call subBucketClosure with top-level bucket.
err = db.View(func(dbTx database.Tx) error {
return subBucketClosure(dbTx, idxKey, nil)
})
if err != nil {
return nil
}
// Iterate through each sub-bucket in reverse, deepest-first, deleting
// all keys inside them and then dropping the buckets themselves.
for i := range subBuckets {
bucketName := subBuckets[len(subBuckets)-1-i]
// Delete maxDeletions key/value pairs at a time.
for numDeleted := maxDeletions; numDeleted == maxDeletions; {
numDeleted = 0
err := db.Update(func(dbTx database.Tx) error {
subBucket := dbTx.Metadata()
for _, subBucketName := range bucketName {
subBucket = subBucket.Bucket(subBucketName)
}
cursor := subBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() &&
numDeleted < maxDeletions {
if err := cursor.Delete(); err != nil {
return err
}
numDeleted++
}
return nil
})
if err != nil {
return err
}
if numDeleted > 0 {
totalDeleted += uint64(numDeleted)
log.Infof("Deleted %d keys (%d total) from %s",
numDeleted, totalDeleted, idxName)
}
}
if interruptRequested(interrupt) {
return errInterruptRequested
}
// Drop the bucket itself.
err = db.Update(func(dbTx database.Tx) error {
bucket := dbTx.Metadata()
for j := 0; j < len(bucketName)-1; j++ {
bucket = bucket.Bucket(bucketName[j])
}
return bucket.DeleteBucket(bucketName[len(bucketName)-1])
})
if err != nil {
return err
}
}
// Remove the index tip, index bucket, and in-progress drop flag now
// that all index entries have been removed.
err = db.Update(func(dbTx database.Tx) error {
meta := dbTx.Metadata()
indexesBucket := meta.Bucket(indexTipsBucketName)
if err := indexesBucket.Delete(idxKey); err != nil {
return err
}
if err := meta.Bucket(indexCurrentBlockIDBucketName).Delete(idxKey); err != nil {
return err
}
return indexesBucket.Delete(indexDropKey(idxKey))
})
if err != nil {
return err
}
log.Infof("Dropped %s", idxName)
return nil
}

View File

@@ -1,206 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"math"
"sort"
"sync"
"time"
)
const (
// maxAllowedOffsetSeconds is the maximum number of seconds in either
// direction that local clock will be adjusted. When the median time
// of the network is outside of this range, no offset will be applied.
maxAllowedOffsetSecs = 70 * 60 // 1 hour 10 minutes
// similarTimeSecs is the number of seconds in either direction from the
// local clock that is used to determine that it is likley wrong and
// hence to show a warning.
similarTimeSecs = 5 * 60 // 5 minutes
)
var (
// maxMedianTimeEntries is the maximum number of entries allowed in the
// median time data. This is a variable as opposed to a constant so the
// test code can modify it.
maxMedianTimeEntries = 200
)
// MedianTimeSource provides a mechanism to add several time samples which are
// used to determine a median time which is then used as an offset to the local
// clock.
type MedianTimeSource interface {
// AdjustedTime returns the current time adjusted by the median time
// offset as calculated from the time samples added by AddTimeSample.
AdjustedTime() time.Time
// AddTimeSample adds a time sample that is used when determining the
// median time of the added samples.
AddTimeSample(id string, timeVal time.Time)
// Offset returns the number of seconds to adjust the local clock based
// upon the median of the time samples added by AddTimeData.
Offset() time.Duration
}
// int64Sorter implements sort.Interface to allow a slice of 64-bit integers to
// be sorted.
type int64Sorter []int64
// Len returns the number of 64-bit integers in the slice. It is part of the
// sort.Interface implementation.
func (s int64Sorter) Len() int {
return len(s)
}
// Swap swaps the 64-bit integers at the passed indices. It is part of the
// sort.Interface implementation.
func (s int64Sorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less returns whether the 64-bit integer with index i should sort before the
// 64-bit integer with index j. It is part of the sort.Interface
// implementation.
func (s int64Sorter) Less(i, j int) bool {
return s[i] < s[j]
}
// medianTime provides an implementation of the MedianTimeSource interface.
type medianTime struct {
mtx sync.Mutex
knownIDs map[string]struct{}
offsets []int64
offsetSecs int64
invalidTimeChecked bool
}
// Ensure the medianTime type implements the MedianTimeSource interface.
var _ MedianTimeSource = (*medianTime)(nil)
// AdjustedTime returns the current time adjusted by the median time offset as
// calculated from the time samples added by AddTimeSample.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) AdjustedTime() time.Time {
m.mtx.Lock()
defer m.mtx.Unlock()
// Limit the adjusted time to 1 second precision.
now := time.Unix(time.Now().Unix(), 0)
return now.Add(time.Duration(m.offsetSecs) * time.Second)
}
// AddTimeSample adds a time sample that is used when determining the median
// time of the added samples.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) AddTimeSample(sourceID string, timeVal time.Time) {
m.mtx.Lock()
defer m.mtx.Unlock()
// Don't add time data from the same source.
if _, exists := m.knownIDs[sourceID]; exists {
return
}
m.knownIDs[sourceID] = struct{}{}
// Truncate the provided offset to seconds and append it to the slice
// of offsets while respecting the maximum number of allowed entries by
// replacing the oldest entry with the new entry once the maximum number
// of entries is reached.
now := time.Unix(time.Now().Unix(), 0)
offsetSecs := int64(timeVal.Sub(now).Seconds())
numOffsets := len(m.offsets)
if numOffsets == maxMedianTimeEntries && maxMedianTimeEntries > 0 {
m.offsets = m.offsets[1:]
numOffsets--
}
m.offsets = append(m.offsets, offsetSecs)
numOffsets++
// Sort the offsets so the median can be obtained as needed later.
sortedOffsets := make([]int64, numOffsets)
copy(sortedOffsets, m.offsets)
sort.Sort(int64Sorter(sortedOffsets))
offsetDuration := time.Duration(offsetSecs) * time.Second
log.Debugf("Added time sample of %s (total: %d)", offsetDuration,
numOffsets)
// The median offset is only updated when there are enough offsets and
// the number of offsets is odd so the middle value is the true median.
// Thus, there is nothing to do when those conditions are not met.
if numOffsets < 5 || numOffsets&0x01 != 1 {
return
}
// At this point the number of offsets in the list is odd, so the
// middle value of the sorted offsets is the median.
median := sortedOffsets[numOffsets/2]
// Set the new offset when the median offset is within the allowed
// offset range.
if math.Abs(float64(median)) < maxAllowedOffsetSecs {
m.offsetSecs = median
} else {
// The median offset of all added time data is larger than the
// maximum allowed offset, so don't use an offset. This
// effectively limits how far the local clock can be skewed.
m.offsetSecs = 0
if !m.invalidTimeChecked {
m.invalidTimeChecked = true
// Find if any time samples have a time that is close
// to the local time.
var remoteHasCloseTime bool
for _, offset := range sortedOffsets {
if math.Abs(float64(offset)) < similarTimeSecs {
remoteHasCloseTime = true
break
}
}
// Warn if none of the time samples are close.
if !remoteHasCloseTime {
log.Warnf("Please check your date and time " +
"are correct! kaspad will not work " +
"properly with an invalid time")
}
}
}
medianDuration := time.Duration(m.offsetSecs) * time.Second
log.Debugf("New time offset: %d", medianDuration)
}
// Offset returns the number of seconds to adjust the local clock based upon the
// median of the time samples added by AddTimeData.
//
// This function is safe for concurrent access and is part of the
// MedianTimeSource interface implementation.
func (m *medianTime) Offset() time.Duration {
m.mtx.Lock()
defer m.mtx.Unlock()
return time.Duration(m.offsetSecs) * time.Second
}
// NewMedianTime returns a new instance of concurrency-safe implementation of
// the MedianTimeSource interface. The returned implementation contains the
// rules necessary for proper time handling in the DAG consensus rules and
// expects the time samples to be added from the timestamp field of the version
// message received from remote peers that successfully connect and negotiate.
func NewMedianTime() MedianTimeSource {
return &medianTime{
knownIDs: make(map[string]struct{}),
offsets: make([]int64, 0, maxMedianTimeEntries),
}
}

View File

@@ -1,102 +0,0 @@
// Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"strconv"
"testing"
"time"
)
// TestMedianTime tests the medianTime implementation.
func TestMedianTime(t *testing.T) {
tests := []struct {
in []int64
wantOffset int64
useDupID bool
}{
// Not enough samples must result in an offset of 0.
{in: []int64{1}, wantOffset: 0},
{in: []int64{1, 2}, wantOffset: 0},
{in: []int64{1, 2, 3}, wantOffset: 0},
{in: []int64{1, 2, 3, 4}, wantOffset: 0},
// Various number of entries. The expected offset is only
// updated on odd number of elements.
{in: []int64{-13, 57, -4, -23, -12}, wantOffset: -12},
{in: []int64{55, -13, 61, -52, 39, 55}, wantOffset: 39},
{in: []int64{-62, -58, -30, -62, 51, -30, 15}, wantOffset: -30},
{in: []int64{29, -47, 39, 54, 42, 41, 8, -33}, wantOffset: 39},
{in: []int64{37, 54, 9, -21, -56, -36, 5, -11, -39}, wantOffset: -11},
{in: []int64{57, -28, 25, -39, 9, 63, -16, 19, -60, 25}, wantOffset: 9},
{in: []int64{-5, -4, -3, -2, -1}, wantOffset: -3, useDupID: true},
// The offset stops being updated once the max number of entries
// has been reached.
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52}, wantOffset: 17},
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45}, wantOffset: 17},
{in: []int64{-67, 67, -50, 24, 63, 17, 58, -14, 5, -32, -52, 45, 4}, wantOffset: 17},
// Offsets that are too far away from the local time should
// be ignored.
{in: []int64{-4201, 4202, -4203, 4204, -4205}, wantOffset: 0},
// Exercise the condition where the median offset is greater
// than the max allowed adjustment, but there is at least one
// sample that is close enough to the current time to avoid
// triggering a warning about an invalid local clock.
{in: []int64{4201, 4202, 4203, 4204, -299}, wantOffset: 0},
}
// Modify the max number of allowed median time entries for these tests.
maxMedianTimeEntries = 10
defer func() { maxMedianTimeEntries = 200 }()
for i, test := range tests {
filter := NewMedianTime()
for j, offset := range test.in {
id := strconv.Itoa(j)
now := time.Unix(time.Now().Unix(), 0)
tOffset := now.Add(time.Duration(offset) * time.Second)
filter.AddTimeSample(id, tOffset)
// Ensure the duplicate IDs are ignored.
if test.useDupID {
// Modify the offsets to ensure the final median
// would be different if the duplicate is added.
tOffset = tOffset.Add(time.Duration(offset) *
time.Second)
filter.AddTimeSample(id, tOffset)
}
}
// Since it is possible that the time.Now call in AddTimeSample
// and the time.Now calls here in the tests will be off by one
// second, allow a fudge factor to compensate.
gotOffset := filter.Offset()
wantOffset := time.Duration(test.wantOffset) * time.Second
wantOffset2 := time.Duration(test.wantOffset-1) * time.Second
if gotOffset != wantOffset && gotOffset != wantOffset2 {
t.Errorf("Offset #%d: unexpected offset -- got %v, "+
"want %v or %v", i, gotOffset, wantOffset,
wantOffset2)
continue
}
// Since it is possible that the time.Now call in AdjustedTime
// and the time.Now call here in the tests will be off by one
// second, allow a fudge factor to compensate.
adjustedTime := filter.AdjustedTime()
now := time.Unix(time.Now().Unix(), 0)
wantTime := now.Add(filter.Offset())
wantTime2 := now.Add(filter.Offset() - time.Second)
if !adjustedTime.Equal(wantTime) && !adjustedTime.Equal(wantTime2) {
t.Errorf("AdjustedTime #%d: unexpected result -- got %v, "+
"want %v or %v", i, adjustedTime, wantTime,
wantTime2)
continue
}
}
}

View File

@@ -3,8 +3,10 @@ package blockdag
import (
"bytes"
"encoding/binary"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"time"
)
@@ -12,6 +14,8 @@ import (
// BlockForMining returns a block with the given transactions
// that points to the current DAG tips, that is valid from
// all aspects except proof of work.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, error) {
blockTimestamp := dag.NextBlockTime()
requiredDifficulty := dag.NextRequiredDifficulty(blockTimestamp)
@@ -34,18 +38,17 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
msgBlock.AddTransaction(tx.MsgTx())
}
utxoWithTransactions, err := dag.UTXOSet().WithTransactions(msgBlock.Transactions, UnacceptedBlueScore, false)
multiset, err := dag.NextBlockMultiset(transactions)
if err != nil {
return nil, err
}
utxoCommitment := utxoWithTransactions.Multiset().Hash()
msgBlock.Header = wire.BlockHeader{
Version: nextBlockVersion,
ParentHashes: dag.TipHashes(),
HashMerkleRoot: hashMerkleTree.Root(),
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
UTXOCommitment: utxoCommitment,
UTXOCommitment: (*daghash.Hash)(multiset.Finalize()),
Timestamp: blockTimestamp,
Bits: requiredDifficulty,
}
@@ -53,6 +56,19 @@ func (dag *BlockDAG) BlockForMining(transactions []*util.Tx) (*wire.MsgBlock, er
return &msgBlock, nil
}
// NextBlockMultiset returns the multiset of an assumed next block
// built on top of the current tips, with the given transactions.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) NextBlockMultiset(transactions []*util.Tx) (*secp256k1.MultiSet, error) {
pastUTXO, selectedParentUTXO, txsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
if err != nil {
return nil, err
}
return dag.virtual.blockNode.calcMultiset(dag, transactions, txsAcceptanceData, selectedParentUTXO, pastUTXO)
}
// CoinbasePayloadExtraData returns coinbase payload extra data parameter
// which is built from extra nonce and coinbase flags.
func CoinbasePayloadExtraData(extraNonce uint64, coinbaseFlags string) ([]byte, error) {
@@ -101,7 +117,7 @@ func (dag *BlockDAG) NextBlockTime() time.Time {
// timestamp is truncated to a second boundary before comparison since a
// block timestamp does not supported a precision greater than one
// second.
newTimestamp := dag.AdjustedTime()
newTimestamp := dag.Now()
minTimestamp := dag.NextBlockMinimumTime()
if newTimestamp.Before(minTimestamp) {
newTimestamp = minTimestamp

29
blockdag/multisetio.go Normal file
View File

@@ -0,0 +1,29 @@
package blockdag
import (
"encoding/binary"
"github.com/kaspanet/go-secp256k1"
"io"
)
const multisetPointSize = 32
// serializeMultiset serializes an ECMH multiset.
func serializeMultiset(w io.Writer, ms *secp256k1.MultiSet) error {
serialized := ms.Serialize()
err := binary.Write(w, byteOrder, serialized)
if err != nil {
return err
}
return nil
}
// deserializeMultiset deserializes an EMCH multiset.
func deserializeMultiset(r io.Reader) (*secp256k1.MultiSet, error) {
serialized := &secp256k1.SerializedMultiSet{}
err := binary.Read(r, byteOrder, serialized[:])
if err != nil {
return nil, err
}
return secp256k1.DeserializeMultiSet(serialized)
}

131
blockdag/multisetstore.go Normal file
View File

@@ -0,0 +1,131 @@
package blockdag
import (
"bytes"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/locks"
"github.com/pkg/errors"
)
type multisetStore struct {
dag *BlockDAG
new map[daghash.Hash]struct{}
loaded map[daghash.Hash]secp256k1.MultiSet
mtx *locks.PriorityMutex
}
func newMultisetStore(dag *BlockDAG) *multisetStore {
return &multisetStore{
dag: dag,
new: make(map[daghash.Hash]struct{}),
loaded: make(map[daghash.Hash]secp256k1.MultiSet),
}
}
func (store *multisetStore) setMultiset(node *blockNode, ms *secp256k1.MultiSet) {
store.loaded[*node.hash] = *ms
store.addToNewBlocks(node.hash)
}
func (store *multisetStore) addToNewBlocks(blockHash *daghash.Hash) {
store.new[*blockHash] = struct{}{}
}
func multisetNotFoundError(blockHash *daghash.Hash) error {
return errors.Errorf("Couldn't find multiset data for block %s", blockHash)
}
func (store *multisetStore) multisetByBlockNode(node *blockNode) (*secp256k1.MultiSet, error) {
ms, exists := store.multisetByBlockHash(node.hash)
if !exists {
return nil, multisetNotFoundError(node.hash)
}
return ms, nil
}
func (store *multisetStore) multisetByBlockHash(hash *daghash.Hash) (*secp256k1.MultiSet, bool) {
ms, ok := store.loaded[*hash]
return &ms, ok
}
// flushToDB writes all new multiset data to the database.
func (store *multisetStore) flushToDB(dbContext *dbaccess.TxContext) error {
if len(store.new) == 0 {
return nil
}
w := &bytes.Buffer{}
for hash := range store.new {
hash := hash // Copy hash to a new variable to avoid passing the same pointer
w.Reset()
ms, exists := store.loaded[hash]
if !exists {
return multisetNotFoundError(&hash)
}
err := serializeMultiset(w, &ms)
if err != nil {
return err
}
err = store.storeMultiset(dbContext, &hash, w.Bytes())
if err != nil {
return err
}
}
return nil
}
func (store *multisetStore) clearNewEntries() {
store.new = make(map[daghash.Hash]struct{})
}
func (store *multisetStore) init(dbContext dbaccess.Context) error {
cursor, err := dbaccess.MultisetCursor(dbContext)
if err != nil {
return err
}
defer cursor.Close()
for ok := cursor.First(); ok; ok = cursor.Next() {
key, err := cursor.Key()
if err != nil {
return err
}
hash, err := daghash.NewHash(key)
if err != nil {
return err
}
serializedMS, err := cursor.Value()
if err != nil {
return err
}
ms, err := deserializeMultiset(bytes.NewReader(serializedMS))
if err != nil {
return err
}
store.loaded[*hash] = *ms
}
return nil
}
// storeMultiset stores the multiset data to the database.
func (store *multisetStore) storeMultiset(dbContext dbaccess.Context, blockHash *daghash.Hash, serializedMS []byte) error {
exists, err := dbaccess.HasMultiset(dbContext, blockHash)
if err != nil {
return err
}
if exists {
return errors.Errorf("Can't override an existing multiset database entry for block %s", blockHash)
}
return dbaccess.StoreMultiset(dbContext, blockHash, serializedMS)
}

View File

@@ -19,7 +19,7 @@ func TestNotifications(t *testing.T) {
}
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("notifications", Config{
dag, teardownFunc, err := DAGSetup("notifications", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {

View File

@@ -264,7 +264,7 @@ func (dag *BlockDAG) maxDelayOfParents(parentHashes []*daghash.Hash) (delay time
for _, parentHash := range parentHashes {
if delayedParent, exists := dag.delayedBlocks[*parentHash]; exists {
isDelayed = true
parentDelay := delayedParent.processTime.Sub(dag.AdjustedTime())
parentDelay := delayedParent.processTime.Sub(dag.Now())
if parentDelay > delay {
delay = parentDelay
}

View File

@@ -11,7 +11,7 @@ import (
)
func TestProcessOrphans(t *testing.T) {
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", Config{
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -72,45 +72,35 @@ func TestProcessOrphans(t *testing.T) {
}
}
type fakeTimeSource struct {
time time.Time
}
func (fts *fakeTimeSource) AdjustedTime() time.Time {
return fts.time
}
func (fts *fakeTimeSource) AddTimeSample(_ string, _ time.Time) {
}
func (fts *fakeTimeSource) Offset() time.Duration {
return 0
}
func TestProcessDelayedBlocks(t *testing.T) {
// We use dag1 so we can build the test blocks with the proper
// block header (UTXO commitment, acceptedIDMerkleroot, etc), and
// then we use dag2 for the actual test.
dag1, teardownFunc, err := DAGSetup("TestProcessDelayedBlocks1", Config{
dag1, teardownFunc, err := DAGSetup("TestProcessDelayedBlocks1", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
isDAG1Open := true
defer func() {
if isDAG1Open {
teardownFunc()
}
}()
initialTime := dag1.dagParams.GenesisBlock.Header.Timestamp
// Here we use a fake time source that returns a timestamp
// one hour into the future to make delayedBlock artificially
// valid.
dag1.timeSource = &fakeTimeSource{initialTime.Add(time.Hour)}
dag1.timeSource = newFakeTimeSource(initialTime.Add(time.Hour))
delayedBlock, err := PrepareBlockForTest(dag1, []*daghash.Hash{dag1.dagParams.GenesisBlock.BlockHash()}, nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
blockDelay := time.Duration(dag1.dagParams.TimestampDeviationTolerance+5) * time.Second
blockDelay := time.Duration(dag1.dagParams.TimestampDeviationTolerance*uint64(dag1.targetTimePerBlock)+5) * time.Second
delayedBlock.Header.Timestamp = initialTime.Add(blockDelay)
isOrphan, isDelayed, err := dag1.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
@@ -131,18 +121,21 @@ func TestProcessDelayedBlocks(t *testing.T) {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}
teardownFunc()
isDAG1Open = false
// Here the actual test begins. We add a delayed block and
// its child and check that they are not added to the DAG,
// and check that they're added only if we add a new block
// after the delayed block timestamp is valid.
dag2, teardownFunc2, err := DAGSetup("TestProcessDelayedBlocks2", Config{
dag2, teardownFunc2, err := DAGSetup("TestProcessDelayedBlocks2", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc2()
dag2.timeSource = &fakeTimeSource{initialTime}
dag2.timeSource = newFakeTimeSource(initialTime)
isOrphan, isDelayed, err = dag2.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck)
if err != nil {
@@ -209,10 +202,13 @@ func TestProcessDelayedBlocks(t *testing.T) {
}
// We advance the clock to the point where delayedBlock timestamp is valid.
secondsUntilDelayedBlockIsValid := delayedBlock.Header.Timestamp.Unix() - int64(dag2.TimestampDeviationTolerance) - dag2.AdjustedTime().Unix() + 1
dag2.timeSource = &fakeTimeSource{initialTime.Add(time.Duration(secondsUntilDelayedBlockIsValid) * time.Second)}
deviationTolerance := int64(dag2.TimestampDeviationTolerance) * dag2.targetTimePerBlock
secondsUntilDelayedBlockIsValid := delayedBlock.Header.Timestamp.Unix() - deviationTolerance - dag2.Now().Unix() + 1
dag2.timeSource = newFakeTimeSource(initialTime.Add(time.Duration(secondsUntilDelayedBlockIsValid) * time.Second))
blockAfterDelay, err := PrepareBlockForTest(dag2, []*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()}, nil)
blockAfterDelay, err := PrepareBlockForTest(dag2,
[]*daghash.Hash{dag2.dagParams.GenesisBlock.BlockHash()},
nil)
if err != nil {
t.Fatalf("error in PrepareBlockForTest: %s", err)
}

View File

@@ -311,7 +311,7 @@ func (rtn *reachabilityTreeNode) countSubtrees(subTreeSizeMap map[*reachabilityT
if len(current.children) == 0 {
// We reached a leaf
subTreeSizeMap[current] = 1
} else if calculatedChildrenCount[current] <= uint64(len(current.children)) {
} else if _, ok := subTreeSizeMap[current]; !ok {
// We haven't yet calculated the subtree size of
// the current node. Add all its children to the
// queue

View File

@@ -609,6 +609,46 @@ func TestReindexIntervalErrors(t *testing.T) {
}
}
func BenchmarkReindexInterval(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
root := newReachabilityTreeNode(&blockNode{})
const subTreeSize = 70000
// We set the interval of the root to subTreeSize*2 because
// its first child gets half of the interval, so a reindex
// from the root should happen after adding subTreeSize
// nodes.
root.setInterval(newReachabilityInterval(0, subTreeSize*2))
currentTreeNode := root
for i := 0; i < subTreeSize; i++ {
childTreeNode := newReachabilityTreeNode(&blockNode{})
_, err := currentTreeNode.addChild(childTreeNode)
if err != nil {
b.Fatalf("addChild: %s", err)
}
currentTreeNode = childTreeNode
}
remainingIntervalBefore := *root.remainingInterval
// After we added subTreeSize nodes, adding the next
// node should lead to a reindex from root.
fullReindexTriggeringNode := newReachabilityTreeNode(&blockNode{})
b.StartTimer()
_, err := currentTreeNode.addChild(fullReindexTriggeringNode)
b.StopTimer()
if err != nil {
b.Fatalf("addChild: %s", err)
}
if *root.remainingInterval == remainingIntervalBefore {
b.Fatal("Expected a reindex from root, but it didn't happen")
}
}
}
func TestFutureCoveringBlockSetString(t *testing.T) {
treeNodeA := newReachabilityTreeNode(&blockNode{})
treeNodeA.setInterval(newReachabilityInterval(123, 456))

View File

@@ -3,6 +3,7 @@ package blockdag
import (
"bytes"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
@@ -82,7 +83,7 @@ func (store *reachabilityStore) reachabilityDataByHash(hash *daghash.Hash) (*rea
}
// flushToDB writes all dirty reachability data to the database.
func (store *reachabilityStore) flushToDB(dbTx database.Tx) error {
func (store *reachabilityStore) flushToDB(dbContext *dbaccess.TxContext) error {
if len(store.dirty) == 0 {
return nil
}
@@ -90,7 +91,7 @@ func (store *reachabilityStore) flushToDB(dbTx database.Tx) error {
for hash := range store.dirty {
hash := hash // Copy hash to a new variable to avoid passing the same pointer
reachabilityData := store.loaded[hash]
err := store.dbStoreReachabilityData(dbTx, &hash, reachabilityData)
err := store.storeReachabilityData(dbContext, &hash, reachabilityData)
if err != nil {
return err
}
@@ -102,22 +103,25 @@ func (store *reachabilityStore) clearDirtyEntries() {
store.dirty = make(map[daghash.Hash]struct{})
}
func (store *reachabilityStore) init(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(reachabilityDataBucketName)
func (store *reachabilityStore) init(dbContext dbaccess.Context) error {
// TODO: (Stas) This is a quick and dirty hack.
// We iterate over the entire bucket twice:
// * First, populate the loaded set with all entries
// * Second, connect the parent/children pointers in each entry
// with other nodes, which are now guaranteed to exist
cursor := bucket.Cursor()
cursor, err := dbaccess.ReachabilityDataCursor(dbContext)
if err != nil {
return err
}
defer cursor.Close()
for ok := cursor.First(); ok; ok = cursor.Next() {
err := store.initReachabilityData(cursor)
if err != nil {
return err
}
}
cursor = bucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
err := store.loadReachabilityDataFromCursor(cursor)
if err != nil {
@@ -128,7 +132,12 @@ func (store *reachabilityStore) init(dbTx database.Tx) error {
}
func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) error {
hash, err := daghash.NewHash(cursor.Key())
key, err := cursor.Key()
if err != nil {
return err
}
hash, err := daghash.NewHash(key)
if err != nil {
return err
}
@@ -141,7 +150,12 @@ func (store *reachabilityStore) initReachabilityData(cursor database.Cursor) err
}
func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.Cursor) error {
hash, err := daghash.NewHash(cursor.Key())
key, err := cursor.Key()
if err != nil {
return err
}
hash, err := daghash.NewHash(key)
if err != nil {
return err
}
@@ -151,7 +165,12 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C
return errors.Errorf("cannot find reachability data for block hash: %s", hash)
}
err = store.deserializeReachabilityData(cursor.Value(), reachabilityData)
serializedReachabilityData, err := cursor.Value()
if err != nil {
return err
}
err = store.deserializeReachabilityData(serializedReachabilityData, reachabilityData)
if err != nil {
return err
}
@@ -162,15 +181,15 @@ func (store *reachabilityStore) loadReachabilityDataFromCursor(cursor database.C
return nil
}
// dbStoreReachabilityData stores the reachability data to the database.
// storeReachabilityData stores the reachability data to the database.
// This overwrites the current entry if there exists one.
func (store *reachabilityStore) dbStoreReachabilityData(dbTx database.Tx, hash *daghash.Hash, reachabilityData *reachabilityData) error {
func (store *reachabilityStore) storeReachabilityData(dbContext dbaccess.Context, hash *daghash.Hash, reachabilityData *reachabilityData) error {
serializedReachabilyData, err := store.serializeReachabilityData(reachabilityData)
if err != nil {
return err
}
return dbTx.Metadata().Bucket(reachabilityDataBucketName).Put(hash[:], serializedReachabilyData)
return dbaccess.StoreReachabilityData(dbContext, hash, serializedReachabilyData)
}
func (store *reachabilityStore) serializeReachabilityData(reachabilityData *reachabilityData) ([]byte, error) {

View File

@@ -4,31 +4,20 @@ import (
"bytes"
"encoding/binary"
"fmt"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
)
// SubnetworkStore stores the subnetworks data
type SubnetworkStore struct {
db database.DB
}
func newSubnetworkStore(db database.DB) *SubnetworkStore {
return &SubnetworkStore{
db: db,
}
}
// registerSubnetworks scans a list of transactions, singles out
// subnetwork registry transactions, validates them, and registers a new
// subnetwork based on it.
// This function returns an error if one or more transactions are invalid
func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
func registerSubnetworks(dbContext dbaccess.Context, txs []*util.Tx) error {
subnetworkRegistryTxs := make([]*wire.MsgTx, 0)
for _, tx := range txs {
msgTx := tx.MsgTx()
@@ -50,13 +39,13 @@ func registerSubnetworks(dbTx database.Tx, txs []*util.Tx) error {
if err != nil {
return err
}
sNet, err := dbGetSubnetwork(dbTx, subnetworkID)
exists, err := dbaccess.HasSubnetwork(dbContext, subnetworkID)
if err != nil {
return err
}
if sNet == nil {
if !exists {
createdSubnetwork := newSubnetwork(registryTx)
err := dbRegisterSubnetwork(dbTx, subnetworkID, createdSubnetwork)
err := registerSubnetwork(dbContext, subnetworkID, createdSubnetwork)
if err != nil {
return errors.Errorf("failed registering subnetwork"+
"for tx '%s': %s", registryTx.TxHash(), err)
@@ -85,66 +74,39 @@ func TxToSubnetworkID(tx *wire.MsgTx) (*subnetworkid.SubnetworkID, error) {
return subnetworkid.New(util.Hash160(txHash[:]))
}
// subnetwork returns a registered subnetwork. If the subnetwork does not exist
// this method returns an error.
func (s *SubnetworkStore) subnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
var sNet *subnetwork
var err error
dbErr := s.db.View(func(dbTx database.Tx) error {
sNet, err = dbGetSubnetwork(dbTx, subnetworkID)
return nil
})
if dbErr != nil {
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, dbErr)
}
// fetchSubnetwork returns a registered subnetwork.
func fetchSubnetwork(subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
serializedSubnetwork, err := dbaccess.FetchSubnetworkData(dbaccess.NoTx(), subnetworkID)
if err != nil {
return nil, errors.Errorf("could not retrieve subnetwork '%d': %s", subnetworkID, err)
return nil, err
}
return sNet, nil
subnet, err := deserializeSubnetwork(serializedSubnetwork)
if err != nil {
return nil, err
}
return subnet, nil
}
// GasLimit returns the gas limit of a registered subnetwork. If the subnetwork does not
// exist this method returns an error.
func (s *SubnetworkStore) GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
sNet, err := s.subnetwork(subnetworkID)
func GasLimit(subnetworkID *subnetworkid.SubnetworkID) (uint64, error) {
sNet, err := fetchSubnetwork(subnetworkID)
if err != nil {
return 0, err
}
if sNet == nil {
return 0, errors.Errorf("subnetwork '%s' not found", subnetworkID)
}
return sNet.gasLimit, nil
}
// dbRegisterSubnetwork stores mappings from ID of the subnetwork to the subnetwork data.
func dbRegisterSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
// Serialize the subnetwork
func registerSubnetwork(dbContext dbaccess.Context, subnetworkID *subnetworkid.SubnetworkID, network *subnetwork) error {
serializedSubnetwork, err := serializeSubnetwork(network)
if err != nil {
return errors.Errorf("failed to serialize sub-netowrk '%s': %s", subnetworkID, err)
}
// Store the subnetwork
subnetworksBucket := dbTx.Metadata().Bucket(subnetworksBucketName)
err = subnetworksBucket.Put(subnetworkID[:], serializedSubnetwork)
if err != nil {
return errors.Errorf("failed to write sub-netowrk '%s': %s", subnetworkID, err)
}
return nil
}
// dbGetSubnetwork returns the subnetwork associated with subnetworkID or nil if the subnetwork was not found.
func dbGetSubnetwork(dbTx database.Tx, subnetworkID *subnetworkid.SubnetworkID) (*subnetwork, error) {
bucket := dbTx.Metadata().Bucket(subnetworksBucketName)
serializedSubnetwork := bucket.Get(subnetworkID[:])
if serializedSubnetwork == nil {
return nil, nil
}
return deserializeSubnetwork(serializedSubnetwork)
return dbaccess.StoreSubnetwork(dbContext, subnetworkID, serializedSubnetwork)
}
type subnetwork struct {

View File

@@ -5,9 +5,11 @@ package blockdag
import (
"compress/bzip2"
"encoding/binary"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
@@ -16,34 +18,11 @@ import (
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb" // blank import ffldb so that its init() function runs before tests
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const (
// testDbType is the database backend type to use for the tests.
testDbType = "ffldb"
// blockDataNet is the expected network in the test block data.
blockDataNet = wire.Mainnet
)
// isSupportedDbType returns whether or not the passed database type is
// currently supported.
func isSupportedDbType(dbType string) bool {
supportedDrivers := database.SupportedDrivers()
for _, driver := range supportedDrivers {
if dbType == driver {
return true
}
}
return false
}
// FileExists returns whether or not the named file or directory exists.
func FileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
@@ -57,11 +36,10 @@ func FileExists(name string) bool {
// DAGSetup is used to create a new db and DAG instance with the genesis
// block already inserted. In addition to the new DAG instance, it returns
// a teardown function the caller should invoke when done testing to clean up.
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
if !isSupportedDbType(testDbType) {
return nil, nil, errors.Errorf("unsupported db type %s", testDbType)
}
// The openDB parameter instructs DAGSetup whether or not to also open the
// database. Setting it to false is useful in tests that handle database
// opening/closing by themselves.
func DAGSetup(dbName string, openDb bool, config Config) (*BlockDAG, func(), error) {
var teardown func()
// To make sure that the teardown function is not called before any goroutines finished to run -
@@ -76,13 +54,16 @@ func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
})
}
if config.DB == nil {
tmpDir := os.TempDir()
if openDb {
var err error
tmpDir, err := ioutil.TempDir("", "DAGSetup")
if err != nil {
return nil, nil, errors.Errorf("error creating temp dir: %s", err)
}
dbPath := filepath.Join(tmpDir, dbName)
_ = os.RemoveAll(dbPath)
var err error
config.DB, err = database.Create(testDbType, dbPath, blockDataNet)
err = dbaccess.Open(dbPath)
if err != nil {
return nil, nil, errors.Errorf("error creating db: %s", err)
}
@@ -92,18 +73,17 @@ func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
teardown = func() {
spawnWaitGroup.Wait()
spawn = realSpawn
config.DB.Close()
dbaccess.Close()
os.RemoveAll(dbPath)
}
} else {
teardown = func() {
spawnWaitGroup.Wait()
spawn = realSpawn
config.DB.Close()
}
}
config.TimeSource = NewMedianTime()
config.TimeSource = NewTimeSource()
config.SigCache = txscript.NewSigCache(1000)
// Create the DAG instance.
@@ -173,7 +153,7 @@ func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (
}
virtual := newVirtualBlock(dag, parents)
pastUTXO, _, err := dag.pastUTXO(&virtual.blockNode)
pastUTXO, _, _, err := dag.pastUTXO(&virtual.blockNode)
if err != nil {
return nil, err
}

View File

@@ -1,14 +0,0 @@
package blockdag
import (
"testing"
)
func TestIsSupportedDbType(t *testing.T) {
if !isSupportedDbType("ffldb") {
t.Errorf("ffldb should be a supported DB driver")
}
if isSupportedDbType("madeUpDb") {
t.Errorf("madeUpDb should not be a supported DB driver")
}
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

25
blockdag/timesource.go Normal file
View File

@@ -0,0 +1,25 @@
package blockdag
import (
"time"
)
// TimeSource is the interface to access time.
type TimeSource interface {
// Now returns the current time.
Now() time.Time
}
// timeSource provides an implementation of the TimeSource interface
// that simply returns the current local time.
type timeSource struct{}
// Now returns the current local time, with one second precision.
func (m *timeSource) Now() time.Time {
return time.Unix(time.Now().Unix(), 0)
}
// NewTimeSource returns a new instance of a TimeSource
func NewTimeSource() TimeSource {
return &timeSource{}
}

View File

@@ -2,31 +2,26 @@ package blockdag
import (
"bytes"
"github.com/golang/groupcache/lru"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/wire"
)
const ecmhCacheSize = 4_000_000
var (
utxoToECMHCache = lru.New(ecmhCacheSize)
)
func utxoMultiset(entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
func addUTXOToMultiset(ms *secp256k1.MultiSet, entry *UTXOEntry, outpoint *wire.Outpoint) (*secp256k1.MultiSet, error) {
w := &bytes.Buffer{}
err := serializeUTXO(w, entry, outpoint)
if err != nil {
return nil, err
}
serializedUTXO := w.Bytes()
utxoHash := daghash.DoubleHashH(serializedUTXO)
if cachedMSPoint, ok := utxoToECMHCache.Get(utxoHash); ok {
return cachedMSPoint.(*ecc.Multiset), nil
}
msPoint := ecc.NewMultiset(ecc.S256()).Add(serializedUTXO)
utxoToECMHCache.Add(utxoHash, msPoint)
return msPoint, nil
ms.Add(w.Bytes())
return ms, nil
}
func removeUTXOFromMultiset(ms *secp256k1.MultiSet, entry *UTXOEntry, outpoint *wire.Outpoint) (*secp256k1.MultiSet, error) {
w := &bytes.Buffer{}
err := serializeUTXO(w, entry, outpoint)
if err != nil {
return nil, err
}
ms.Remove(w.Bytes())
return ms, nil
}

View File

@@ -2,14 +2,11 @@ package blockdag
import (
"bytes"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/locks"
"github.com/pkg/errors"
)
var multisetPointSize = 32
type blockUTXODiffData struct {
diff *UTXODiff
diffChild *blockNode
@@ -35,12 +32,11 @@ func (diffStore *utxoDiffStore) setBlockDiff(node *blockNode, diff *UTXODiff) er
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
// load the diff data from DB to diffStore.loaded
_, exists, err := diffStore.diffDataByHash(node.hash)
if err != nil {
return err
}
if !exists {
_, err := diffStore.diffDataByHash(node.hash)
if dbaccess.IsNotFoundError(err) {
diffStore.loaded[*node.hash] = &blockUTXODiffData{}
} else if err != nil {
return err
}
diffStore.loaded[*node.hash].diff = diff
@@ -52,22 +48,19 @@ func (diffStore *utxoDiffStore) setBlockDiffChild(node *blockNode, diffChild *bl
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
// load the diff data from DB to diffStore.loaded
_, exists, err := diffStore.diffDataByHash(node.hash)
_, err := diffStore.diffDataByHash(node.hash)
if err != nil {
return err
}
if !exists {
return diffNotFoundError(node)
}
diffStore.loaded[*node.hash].diffChild = diffChild
diffStore.setBlockAsDirty(node.hash)
return nil
}
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbTx database.Tx, blockHashes []*daghash.Hash) error {
func (diffStore *utxoDiffStore) removeBlocksDiffData(dbContext dbaccess.Context, blockHashes []*daghash.Hash) error {
for _, hash := range blockHashes {
err := diffStore.removeBlockDiffData(dbTx, hash)
err := diffStore.removeBlockDiffData(dbContext, hash)
if err != nil {
return err
}
@@ -75,11 +68,11 @@ func (diffStore *utxoDiffStore) removeBlocksDiffData(dbTx database.Tx, blockHash
return nil
}
func (diffStore *utxoDiffStore) removeBlockDiffData(dbTx database.Tx, blockHash *daghash.Hash) error {
func (diffStore *utxoDiffStore) removeBlockDiffData(dbContext dbaccess.Context, blockHash *daghash.Hash) error {
diffStore.mtx.LowPriorityWriteLock()
defer diffStore.mtx.LowPriorityWriteUnlock()
delete(diffStore.loaded, *blockHash)
err := dbRemoveDiffData(dbTx, blockHash)
err := dbaccess.RemoveDiffData(dbContext, blockHash)
if err != nil {
return err
}
@@ -90,72 +83,49 @@ func (diffStore *utxoDiffStore) setBlockAsDirty(blockHash *daghash.Hash) {
diffStore.dirty[*blockHash] = struct{}{}
}
func (diffStore *utxoDiffStore) diffDataByHash(hash *daghash.Hash) (*blockUTXODiffData, bool, error) {
func (diffStore *utxoDiffStore) diffDataByHash(hash *daghash.Hash) (*blockUTXODiffData, error) {
if diffData, ok := diffStore.loaded[*hash]; ok {
return diffData, true, nil
return diffData, nil
}
diffData, err := diffStore.diffDataFromDB(hash)
if err != nil {
return nil, false, err
return nil, err
}
exists := diffData != nil
if exists {
diffStore.loaded[*hash] = diffData
}
return diffData, exists, nil
}
func diffNotFoundError(node *blockNode) error {
return errors.Errorf("Couldn't find diff data for block %s", node.hash)
diffStore.loaded[*hash] = diffData
return diffData, nil
}
func (diffStore *utxoDiffStore) diffByNode(node *blockNode) (*UTXODiff, error) {
diffStore.mtx.HighPriorityReadLock()
defer diffStore.mtx.HighPriorityReadUnlock()
diffData, exists, err := diffStore.diffDataByHash(node.hash)
diffData, err := diffStore.diffDataByHash(node.hash)
if err != nil {
return nil, err
}
if !exists {
return nil, diffNotFoundError(node)
}
return diffData.diff, nil
}
func (diffStore *utxoDiffStore) diffChildByNode(node *blockNode) (*blockNode, error) {
diffStore.mtx.HighPriorityReadLock()
defer diffStore.mtx.HighPriorityReadUnlock()
diffData, exists, err := diffStore.diffDataByHash(node.hash)
diffData, err := diffStore.diffDataByHash(node.hash)
if err != nil {
return nil, err
}
if !exists {
return nil, diffNotFoundError(node)
}
return diffData.diffChild, nil
}
func (diffStore *utxoDiffStore) diffDataFromDB(hash *daghash.Hash) (*blockUTXODiffData, error) {
var diffData *blockUTXODiffData
err := diffStore.dag.db.View(func(dbTx database.Tx) error {
bucket := dbTx.Metadata().Bucket(utxoDiffsBucketName)
serializedBlockDiffData := bucket.Get(hash[:])
if serializedBlockDiffData != nil {
var err error
diffData, err = diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
return err
}
return nil
})
serializedBlockDiffData, err := dbaccess.FetchUTXODiffData(dbaccess.NoTx(), hash)
if err != nil {
return nil, err
}
return diffData, nil
return diffStore.deserializeBlockUTXODiffData(serializedBlockDiffData)
}
// flushToDB writes all dirty diff data to the database. If all writes
// succeed, this clears the dirty set.
func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
// flushToDB writes all dirty diff data to the database.
func (diffStore *utxoDiffStore) flushToDB(dbContext *dbaccess.TxContext) error {
diffStore.mtx.HighPriorityWriteLock()
defer diffStore.mtx.HighPriorityWriteUnlock()
if len(diffStore.dirty) == 0 {
@@ -169,7 +139,7 @@ func (diffStore *utxoDiffStore) flushToDB(dbTx database.Tx) error {
hash := hash // Copy hash to a new variable to avoid passing the same pointer
buffer.Reset()
diffData := diffStore.loaded[hash]
err := dbStoreDiffData(dbTx, buffer, &hash, diffData)
err := storeDiffData(dbContext, buffer, &hash, diffData)
if err != nil {
return err
}
@@ -181,28 +151,18 @@ func (diffStore *utxoDiffStore) clearDirtyEntries() {
diffStore.dirty = make(map[daghash.Hash]struct{})
}
// dbStoreDiffData stores the UTXO diff data to the database.
// storeDiffData stores the UTXO diff data to the database.
// This overwrites the current entry if there exists one.
func dbStoreDiffData(dbTx database.Tx, writeBuffer *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error {
// To avoid a ton of allocs, use the given writeBuffer
func storeDiffData(dbContext dbaccess.Context, w *bytes.Buffer, hash *daghash.Hash, diffData *blockUTXODiffData) error {
// To avoid a ton of allocs, use the io.Writer
// instead of allocating one. We expect the buffer to
// already be initalized and, in most cases, to already
// be large enough to accommodate the serialized data
// without growing.
err := serializeBlockUTXODiffData(writeBuffer, diffData)
err := serializeBlockUTXODiffData(w, diffData)
if err != nil {
return err
}
// Bucket.Put doesn't copy on its own, so we manually
// copy here. We do so because we expect the buffer
// to be reused once we're done with it.
serializedDiffData := make([]byte, writeBuffer.Len())
copy(serializedDiffData, writeBuffer.Bytes())
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Put(hash[:], serializedDiffData)
}
func dbRemoveDiffData(dbTx database.Tx, hash *daghash.Hash) error {
return dbTx.Metadata().Bucket(utxoDiffsBucketName).Delete(hash[:])
return dbaccess.StoreUTXODiffData(dbContext, hash, w.Bytes())
}

View File

@@ -1,9 +1,8 @@
package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/dbaccess"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"reflect"
@@ -12,7 +11,7 @@ import (
func TestUTXODiffStore(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", Config{
dag, teardownFunc, err := DAGSetup("TestUTXODiffStore", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -31,9 +30,12 @@ func TestUTXODiffStore(t *testing.T) {
// Check that an error is returned when asking for non existing node
nonExistingNode := createNode()
_, err = dag.utxoDiffStore.diffByNode(nonExistingNode)
expectedErrString := fmt.Sprintf("Couldn't find diff data for block %s", nonExistingNode.hash)
if err == nil || err.Error() != expectedErrString {
t.Errorf("diffByNode: expected error %s but got %s", expectedErrString, err)
if !dbaccess.IsNotFoundError(err) {
if err != nil {
t.Errorf("diffByNode: %s", err)
} else {
t.Errorf("diffByNode: unexpectedly found diff data")
}
}
// Add node's diff data to the utxoDiffStore and check if it's checked correctly.
@@ -63,12 +65,19 @@ func TestUTXODiffStore(t *testing.T) {
// Flush changes to db, delete them from the dag.utxoDiffStore.loaded
// map, and check if the diff data is re-fetched from the database.
err = dag.db.Update(func(dbTx database.Tx) error {
return dag.utxoDiffStore.flushToDB(dbTx)
})
dbTx, err := dbaccess.NewTx()
if err != nil {
t.Fatalf("Failed to open database transaction: %s", err)
}
defer dbTx.RollbackUnlessClosed()
err = dag.utxoDiffStore.flushToDB(dbTx)
if err != nil {
t.Fatalf("Error flushing utxoDiffStore data to DB: %s", err)
}
err = dbTx.Commit()
if err != nil {
t.Fatalf("Failed to commit database transaction: %s", err)
}
delete(dag.utxoDiffStore.loaded, *node.hash)
if storeDiff, err := dag.utxoDiffStore.diffByNode(node); err != nil {

View File

@@ -2,15 +2,11 @@ package blockdag
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
"io"
"math/big"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"io"
)
// serializeBlockUTXODiffData serializes diff data in the following format:
@@ -54,40 +50,26 @@ func utxoEntryHeaderCode(entry *UTXOEntry) uint64 {
return headerCode
}
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffDataBytes []byte) (*blockUTXODiffData, error) {
func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffData []byte) (*blockUTXODiffData, error) {
diffData := &blockUTXODiffData{}
serializedDiffData := bytes.NewBuffer(serializedDiffDataBytes)
r := bytes.NewBuffer(serializedDiffData)
var hasDiffChild bool
err := wire.ReadElement(serializedDiffData, &hasDiffChild)
err := wire.ReadElement(r, &hasDiffChild)
if err != nil {
return nil, err
}
if hasDiffChild {
hash := &daghash.Hash{}
err := wire.ReadElement(serializedDiffData, hash)
err := wire.ReadElement(r, hash)
if err != nil {
return nil, err
}
diffData.diffChild = diffStore.dag.index.LookupNode(hash)
}
diffData.diff = &UTXODiff{
useMultiset: true,
}
diffData.diff.toAdd, err = deserializeDiffEntries(serializedDiffData)
if err != nil {
return nil, err
}
diffData.diff.toRemove, err = deserializeDiffEntries(serializedDiffData)
if err != nil {
return nil, err
}
diffData.diff.diffMultiset, err = deserializeMultiset(serializedDiffData)
diffData.diff, err = deserializeUTXODiff(r)
if err != nil {
return nil, err
}
@@ -95,38 +77,31 @@ func (diffStore *utxoDiffStore) deserializeBlockUTXODiffData(serializedDiffDataB
return diffData, nil
}
func deserializeDiffEntries(r io.Reader) (utxoCollection, error) {
func deserializeUTXODiff(r io.Reader) (*UTXODiff, error) {
diff := &UTXODiff{}
var err error
diff.toAdd, err = deserializeUTXOCollection(r)
if err != nil {
return nil, err
}
diff.toRemove, err = deserializeUTXOCollection(r)
if err != nil {
return nil, err
}
return diff, nil
}
func deserializeUTXOCollection(r io.Reader) (utxoCollection, error) {
count, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
collection := utxoCollection{}
for i := uint64(0); i < count; i++ {
outpointSize, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
serializedOutpoint := make([]byte, outpointSize)
err = binary.Read(r, byteOrder, serializedOutpoint)
if err != nil {
return nil, err
}
outpoint, err := deserializeOutpoint(serializedOutpoint)
if err != nil {
return nil, err
}
utxoEntrySize, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
serializedEntry := make([]byte, utxoEntrySize)
err = binary.Read(r, byteOrder, serializedEntry)
if err != nil {
return nil, err
}
utxoEntry, err := deserializeUTXOEntry(serializedEntry)
utxoEntry, outpoint, err := deserializeUTXO(r)
if err != nil {
return nil, err
}
@@ -135,31 +110,22 @@ func deserializeDiffEntries(r io.Reader) (utxoCollection, error) {
return collection, nil
}
// deserializeMultiset deserializes an EMCH multiset.
// See serializeMultiset for more details.
func deserializeMultiset(r io.Reader) (*ecc.Multiset, error) {
xBytes := make([]byte, multisetPointSize)
yBytes := make([]byte, multisetPointSize)
err := binary.Read(r, byteOrder, xBytes)
func deserializeUTXO(r io.Reader) (*UTXOEntry, *wire.Outpoint, error) {
outpoint, err := deserializeOutpoint(r)
if err != nil {
return nil, err
return nil, nil, err
}
err = binary.Read(r, byteOrder, yBytes)
utxoEntry, err := deserializeUTXOEntry(r)
if err != nil {
return nil, err
return nil, nil, err
}
var x, y big.Int
x.SetBytes(xBytes)
y.SetBytes(yBytes)
return ecc.NewMultisetFromPoint(ecc.S256(), &x, &y), nil
return utxoEntry, outpoint, nil
}
// serializeUTXODiff serializes UTXODiff by serializing
// UTXODiff.toAdd, UTXODiff.toRemove and UTXODiff.Multiset one after the other.
func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
if !diff.useMultiset {
return errors.New("Cannot serialize a UTXO diff without a multiset")
}
err := serializeUTXOCollection(w, diff.toAdd)
if err != nil {
return err
@@ -169,10 +135,7 @@ func serializeUTXODiff(w io.Writer, diff *UTXODiff) error {
if err != nil {
return err
}
err = serializeMultiset(w, diff.diffMultiset)
if err != nil {
return err
}
return nil
}
@@ -193,121 +156,95 @@ func serializeUTXOCollection(w io.Writer, collection utxoCollection) error {
return nil
}
// serializeMultiset serializes an ECMH multiset. The serialization
// is done by taking the (x,y) coordinnates of the multiset point and
// padding each one of them with 32 byte (it'll be 32 byte in most
// cases anyway except one of the coordinates is zero) and writing
// them one after the other.
func serializeMultiset(w io.Writer, ms *ecc.Multiset) error {
x, y := ms.Point()
xBytes := make([]byte, multisetPointSize)
copy(xBytes, x.Bytes())
yBytes := make([]byte, multisetPointSize)
copy(yBytes, y.Bytes())
err := binary.Write(w, byteOrder, xBytes)
if err != nil {
return err
}
err = binary.Write(w, byteOrder, yBytes)
if err != nil {
return err
}
return nil
}
// serializeUTXO serializes a utxo entry-outpoint pair
func serializeUTXO(w io.Writer, entry *UTXOEntry, outpoint *wire.Outpoint) error {
serializedOutpoint := *outpointKey(*outpoint)
err := wire.WriteVarInt(w, uint64(len(serializedOutpoint)))
err := serializeOutpoint(w, outpoint)
if err != nil {
return err
}
err = binary.Write(w, byteOrder, serializedOutpoint)
if err != nil {
return err
}
serializedUTXOEntry := serializeUTXOEntry(entry)
err = wire.WriteVarInt(w, uint64(len(serializedUTXOEntry)))
if err != nil {
return err
}
err = binary.Write(w, byteOrder, serializedUTXOEntry)
err = serializeUTXOEntry(w, entry)
if err != nil {
return err
}
return nil
}
// serializeUTXOEntry returns the entry serialized to a format that is suitable
// for long-term storage. The format is described in detail above.
func serializeUTXOEntry(entry *UTXOEntry) []byte {
// p2pkhUTXOEntrySerializeSize is the serialized size for a P2PKH UTXO entry.
// 8 bytes (header code) + 8 bytes (amount) + varint for script pub key length of 25 (for P2PKH) + 25 bytes for P2PKH script.
var p2pkhUTXOEntrySerializeSize = 8 + 8 + wire.VarIntSerializeSize(25) + 25
// serializeUTXOEntry encodes the entry to the given io.Writer and use compression if useCompression is true.
// The compression format is described in detail above.
func serializeUTXOEntry(w io.Writer, entry *UTXOEntry) error {
// Encode the header code.
headerCode := utxoEntryHeaderCode(entry)
// Calculate the size needed to serialize the entry.
size := serializeSizeVLQ(headerCode) +
compressedTxOutSize(uint64(entry.Amount()), entry.ScriptPubKey())
// Serialize the header code followed by the compressed unspent
// transaction output.
serialized := make([]byte, size)
offset := putVLQ(serialized, headerCode)
offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()),
entry.ScriptPubKey())
return serialized
}
// deserializeOutpoint decodes an outpoint from the passed serialized byte
// slice into a new wire.Outpoint using a format that is suitable for long-
// term storage. this format is described in detail above.
func deserializeOutpoint(serialized []byte) (*wire.Outpoint, error) {
if len(serialized) <= daghash.HashSize {
return nil, errDeserialize("unexpected end of data")
err := binaryserializer.PutUint64(w, byteOrder, headerCode)
if err != nil {
return err
}
txID := daghash.TxID{}
txID.SetBytes(serialized[:daghash.HashSize])
index, _ := deserializeVLQ(serialized[daghash.HashSize:])
return wire.NewOutpoint(&txID, uint32(index)), nil
err = binaryserializer.PutUint64(w, byteOrder, entry.Amount())
if err != nil {
return err
}
err = wire.WriteVarInt(w, uint64(len(entry.ScriptPubKey())))
if err != nil {
return err
}
_, err = w.Write(entry.ScriptPubKey())
if err != nil {
return errors.WithStack(err)
}
return nil
}
// deserializeUTXOEntry decodes a UTXO entry from the passed serialized byte
// slice into a new UTXOEntry using a format that is suitable for long-term
// storage. The format is described in detail above.
func deserializeUTXOEntry(serialized []byte) (*UTXOEntry, error) {
// deserializeUTXOEntry decodes a UTXO entry from the passed reader
// into a new UTXOEntry. If isCompressed is used it will decompress
// the entry according to the format that is described in detail
// above.
func deserializeUTXOEntry(r io.Reader) (*UTXOEntry, error) {
// Deserialize the header code.
code, offset := deserializeVLQ(serialized)
if offset >= len(serialized) {
return nil, errDeserialize("unexpected end of data after header")
headerCode, err := binaryserializer.Uint64(r, byteOrder)
if err != nil {
return nil, err
}
// Decode the header code.
//
// Bit 0 indicates whether the containing transaction is a coinbase.
// Bits 1-x encode blue score of the containing transaction.
isCoinbase := code&0x01 != 0
blockBlueScore := code >> 1
// Decode the compressed unspent transaction output.
amount, scriptPubKey, _, err := decodeCompressedTxOut(serialized[offset:])
if err != nil {
return nil, errDeserialize(fmt.Sprintf("unable to decode "+
"UTXO: %s", err))
}
isCoinbase := headerCode&0x01 != 0
blockBlueScore := headerCode >> 1
entry := &UTXOEntry{
amount: amount,
scriptPubKey: scriptPubKey,
blockBlueScore: blockBlueScore,
packedFlags: 0,
}
if isCoinbase {
entry.packedFlags |= tfCoinbase
}
entry.amount, err = binaryserializer.Uint64(r, byteOrder)
if err != nil {
return nil, err
}
scriptPubKeyLen, err := wire.ReadVarInt(r)
if err != nil {
return nil, err
}
entry.scriptPubKey = make([]byte, scriptPubKeyLen)
_, err = r.Read(entry.scriptPubKey)
if err != nil {
return nil, errors.WithStack(err)
}
return entry, nil
}

View File

@@ -8,7 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/wire"
)
@@ -153,29 +153,16 @@ func (uc utxoCollection) clone() utxoCollection {
// UTXODiff represents a diff between two UTXO Sets.
type UTXODiff struct {
toAdd utxoCollection
toRemove utxoCollection
diffMultiset *ecc.Multiset
useMultiset bool
toAdd utxoCollection
toRemove utxoCollection
}
// NewUTXODiffWithoutMultiset creates a new, empty utxoDiff
// NewUTXODiff creates a new, empty utxoDiff
// without a multiset.
func NewUTXODiffWithoutMultiset() *UTXODiff {
return &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
useMultiset: false,
}
}
// NewUTXODiff creates a new, empty utxoDiff.
func NewUTXODiff() *UTXODiff {
return &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
useMultiset: true,
diffMultiset: ecc.NewMultiset(ecc.S256()),
toAdd: utxoCollection{},
toRemove: utxoCollection{},
}
}
@@ -209,9 +196,8 @@ func NewUTXODiff() *UTXODiff {
// diffFrom results in the UTXO being added to toAdd
func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
result := UTXODiff{
toAdd: make(utxoCollection, len(d.toRemove)+len(other.toAdd)),
toRemove: make(utxoCollection, len(d.toAdd)+len(other.toRemove)),
useMultiset: d.useMultiset,
toAdd: make(utxoCollection, len(d.toRemove)+len(other.toAdd)),
toRemove: make(utxoCollection, len(d.toAdd)+len(other.toRemove)),
}
// Note that the following cases are not accounted for, as they are impossible
@@ -293,17 +279,12 @@ func (d *UTXODiff) diffFrom(other *UTXODiff) (*UTXODiff, error) {
}
}
if d.useMultiset {
// Create a new diffMultiset as the subtraction of the two diffs.
result.diffMultiset = other.diffMultiset.Subtract(d.diffMultiset)
}
return &result, nil
}
// WithDiffInPlace applies provided diff to this diff in-place, that would be the result if
// withDiffInPlace applies provided diff to this diff in-place, that would be the result if
// first d, and than diff were applied to the same base
func (d *UTXODiff) WithDiffInPlace(diff *UTXODiff) error {
func (d *UTXODiff) withDiffInPlace(diff *UTXODiff) error {
for outpoint, entryToRemove := range diff.toRemove {
if d.toAdd.containsWithBlueScore(outpoint, entryToRemove.blockBlueScore) {
// If already exists in toAdd with the same blueScore - remove from toAdd
@@ -313,7 +294,7 @@ func (d *UTXODiff) WithDiffInPlace(diff *UTXODiff) error {
if d.toRemove.contains(outpoint) {
// If already exists - this is an error
return ruleError(ErrWithDiff, fmt.Sprintf(
"WithDiffInPlace: outpoint %s both in d.toRemove and in diff.toRemove", outpoint))
"withDiffInPlace: outpoint %s both in d.toRemove and in diff.toRemove", outpoint))
}
// If not exists neither in toAdd nor in toRemove - add to toRemove
@@ -325,7 +306,7 @@ func (d *UTXODiff) WithDiffInPlace(diff *UTXODiff) error {
// If already exists in toRemove with the same blueScore - remove from toRemove
if d.toAdd.contains(outpoint) && !diff.toRemove.contains(outpoint) {
return ruleError(ErrWithDiff, fmt.Sprintf(
"WithDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd with no "+
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd with no "+
"corresponding entry in diff.toRemove", outpoint))
}
d.toRemove.remove(outpoint)
@@ -336,129 +317,34 @@ func (d *UTXODiff) WithDiffInPlace(diff *UTXODiff) error {
!diff.toRemove.containsWithBlueScore(outpoint, existingEntry.blockBlueScore)) {
// If already exists - this is an error
return ruleError(ErrWithDiff, fmt.Sprintf(
"WithDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd", outpoint))
"withDiffInPlace: outpoint %s both in d.toAdd and in diff.toAdd", outpoint))
}
// If not exists neither in toAdd nor in toRemove, or exists in toRemove with different blueScore - add to toAdd
d.toAdd.add(outpoint, entryToAdd)
}
// Apply diff.diffMultiset to d.diffMultiset
if d.useMultiset {
d.diffMultiset = d.diffMultiset.Union(diff.diffMultiset)
}
return nil
}
// WithDiff applies provided diff to this diff, creating a new utxoDiff, that would be the result if
// first d, and than diff were applied to the same base
//
// WithDiff follows a set of rules represented by the following 3 by 3 table:
//
// | | this | |
// ---------+-----------+-----------+-----------+-----------
// | | toAdd | toRemove | None
// ---------+-----------+-----------+-----------+-----------
// other | toAdd | X | - | toAdd
// ---------+-----------+-----------+-----------+-----------
// | toRemove | - | X | toRemove
// ---------+-----------+-----------+-----------+-----------
// | None | toAdd | toRemove | -
//
// Key:
// - Don't add anything to the result
// X Return an error
// toAdd Add the UTXO into the toAdd collection of the result
// toRemove Add the UTXO into the toRemove collection of the result
//
// Examples:
// 1. This diff contains a UTXO in toAdd, and the other diff contains it in toRemove
// WithDiff results in nothing being added
// 2. This diff contains a UTXO in toRemove, and the other diff does not contain it
// WithDiff results in the UTXO being added to toRemove
// first d, and than diff were applied to some base
func (d *UTXODiff) WithDiff(diff *UTXODiff) (*UTXODiff, error) {
result := UTXODiff{
toAdd: make(utxoCollection, len(d.toAdd)+len(diff.toAdd)),
toRemove: make(utxoCollection, len(d.toRemove)+len(diff.toRemove)),
useMultiset: d.useMultiset,
clone := d.clone()
err := clone.withDiffInPlace(diff)
if err != nil {
return nil, err
}
// All transactions in d.toAdd:
// If they are not in diff.toRemove - should be added in result.toAdd
// If they are in diff.toAdd - should throw an error
// Otherwise - should be ignored
for outpoint, utxoEntry := range d.toAdd {
if !diff.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toAdd.add(outpoint, utxoEntry)
}
if diffEntry, ok := diff.toAdd.get(outpoint); ok {
// An exception is made for entries with unequal blue scores
// as long as the appropriate entry exists in either d.toRemove
// or diff.toRemove.
// These are just "updates" to accepted blue score
if diffEntry.blockBlueScore != utxoEntry.blockBlueScore &&
diff.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
continue
}
return nil, ruleError(ErrWithDiff, fmt.Sprintf("WithDiff: outpoint %s both in d.toAdd and in other.toAdd", outpoint))
}
}
// All transactions in d.toRemove:
// If they are not in diff.toAdd - should be added in result.toRemove
// If they are in diff.toRemove - should throw an error
// Otherwise - should be ignored
for outpoint, utxoEntry := range d.toRemove {
if !diff.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toRemove.add(outpoint, utxoEntry)
}
if diffEntry, ok := diff.toRemove.get(outpoint); ok {
// An exception is made for entries with unequal blue scores
// as long as the appropriate entry exists in either d.toAdd
// or diff.toAdd.
// These are just "updates" to accepted blue score
if diffEntry.blockBlueScore != utxoEntry.blockBlueScore &&
d.toAdd.containsWithBlueScore(outpoint, diffEntry.blockBlueScore) {
continue
}
return nil, ruleError(ErrWithDiff, "WithDiff: outpoint both in d.toRemove and in other.toRemove")
}
}
// All transactions in diff.toAdd:
// If they are not in d.toRemove - should be added in result.toAdd
for outpoint, utxoEntry := range diff.toAdd {
if !d.toRemove.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toAdd.add(outpoint, utxoEntry)
}
}
// All transactions in diff.toRemove:
// If they are not in d.toAdd - should be added in result.toRemove
for outpoint, utxoEntry := range diff.toRemove {
if !d.toAdd.containsWithBlueScore(outpoint, utxoEntry.blockBlueScore) {
result.toRemove.add(outpoint, utxoEntry)
}
}
// Apply diff.diffMultiset to d.diffMultiset
if d.useMultiset {
result.diffMultiset = d.diffMultiset.Union(diff.diffMultiset)
}
return &result, nil
return clone, nil
}
// clone returns a clone of this utxoDiff
func (d *UTXODiff) clone() *UTXODiff {
clone := &UTXODiff{
toAdd: d.toAdd.clone(),
toRemove: d.toRemove.clone(),
useMultiset: d.useMultiset,
}
if d.useMultiset {
clone.diffMultiset = d.diffMultiset.Clone()
toAdd: d.toAdd.clone(),
toRemove: d.toRemove.clone(),
}
return clone
}
@@ -475,14 +361,6 @@ func (d *UTXODiff) AddEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
} else {
d.toAdd.add(outpoint, entry)
}
if d.useMultiset {
newMs, err := addUTXOToMultiset(d.diffMultiset, entry, &outpoint)
if err != nil {
return err
}
d.diffMultiset = newMs
}
return nil
}
@@ -498,21 +376,10 @@ func (d *UTXODiff) RemoveEntry(outpoint wire.Outpoint, entry *UTXOEntry) error {
} else {
d.toRemove.add(outpoint, entry)
}
if d.useMultiset {
newMs, err := removeUTXOFromMultiset(d.diffMultiset, entry, &outpoint)
if err != nil {
return err
}
d.diffMultiset = newMs
}
return nil
}
func (d UTXODiff) String() string {
if d.useMultiset {
return fmt.Sprintf("toAdd: %s; toRemove: %s, Multiset-Hash: %s", d.toAdd, d.toRemove, d.diffMultiset.Hash())
}
return fmt.Sprintf("toAdd: %s; toRemove: %s", d.toAdd, d.toRemove)
}
@@ -537,8 +404,6 @@ type UTXOSet interface {
AddTx(tx *wire.MsgTx, blockBlueScore uint64) (ok bool, err error)
clone() UTXOSet
Get(outpoint wire.Outpoint) (*UTXOEntry, bool)
Multiset() *ecc.Multiset
WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error)
}
// diffFromTx is a common implementation for diffFromTx, that works
@@ -608,21 +473,19 @@ func diffFromAcceptedTx(u UTXOSet, tx *wire.MsgTx, acceptingBlueScore uint64) (*
// FullUTXOSet represents a full list of transaction outputs and their values
type FullUTXOSet struct {
utxoCollection
UTXOMultiset *ecc.Multiset
}
// NewFullUTXOSet creates a new utxoSet with full list of transaction outputs and their values
func NewFullUTXOSet() *FullUTXOSet {
return &FullUTXOSet{
utxoCollection: utxoCollection{},
UTXOMultiset: ecc.NewMultiset(ecc.S256()),
}
}
// newFullUTXOSetFromUTXOCollection converts a utxoCollection to a FullUTXOSet
func newFullUTXOSetFromUTXOCollection(collection utxoCollection) (*FullUTXOSet, error) {
var err error
multiset := ecc.NewMultiset(ecc.S256())
multiset := secp256k1.NewMultiset()
for outpoint, utxoEntry := range collection {
multiset, err = addUTXOToMultiset(multiset, utxoEntry, &outpoint)
if err != nil {
@@ -631,7 +494,6 @@ func newFullUTXOSetFromUTXOCollection(collection utxoCollection) (*FullUTXOSet,
}
return &FullUTXOSet{
utxoCollection: collection,
UTXOMultiset: multiset,
}, nil
}
@@ -668,22 +530,14 @@ func (fus *FullUTXOSet) AddTx(tx *wire.MsgTx, blueScore uint64) (isAccepted bool
}
for _, txIn := range tx.TxIn {
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
err := fus.removeAndUpdateMultiset(outpoint)
if err != nil {
return false, err
}
fus.remove(txIn.PreviousOutpoint)
}
}
for i, txOut := range tx.TxOut {
outpoint := *wire.NewOutpoint(tx.TxID(), uint32(i))
entry := NewUTXOEntry(txOut, isCoinbase, blueScore)
err := fus.addAndUpdateMultiset(outpoint, entry)
if err != nil {
return false, err
}
fus.add(outpoint, entry)
}
return true, nil
@@ -712,7 +566,7 @@ func (fus *FullUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore ui
// clone returns a clone of this utxoSet
func (fus *FullUTXOSet) clone() UTXOSet {
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone(), UTXOMultiset: fus.UTXOMultiset.Clone()}
return &FullUTXOSet{utxoCollection: fus.utxoCollection.clone()}
}
// Get returns the UTXOEntry associated with the given Outpoint, and a boolean indicating if such entry was found
@@ -721,55 +575,6 @@ func (fus *FullUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
return utxoEntry, ok
}
// Multiset returns the ecmh-Multiset of this utxoSet
func (fus *FullUTXOSet) Multiset() *ecc.Multiset {
return fus.UTXOMultiset
}
// addAndUpdateMultiset adds a UTXOEntry to this utxoSet and updates its multiset accordingly
func (fus *FullUTXOSet) addAndUpdateMultiset(outpoint wire.Outpoint, entry *UTXOEntry) error {
fus.add(outpoint, entry)
newMs, err := addUTXOToMultiset(fus.UTXOMultiset, entry, &outpoint)
if err != nil {
return err
}
fus.UTXOMultiset = newMs
return nil
}
// removeAndUpdateMultiset removes a UTXOEntry from this utxoSet and updates its multiset accordingly
func (fus *FullUTXOSet) removeAndUpdateMultiset(outpoint wire.Outpoint) error {
entry, ok := fus.Get(outpoint)
if !ok {
return errors.Errorf("Couldn't find outpoint %s", outpoint)
}
fus.remove(outpoint)
var err error
newMs, err := removeUTXOFromMultiset(fus.UTXOMultiset, entry, &outpoint)
if err != nil {
return err
}
fus.UTXOMultiset = newMs
return nil
}
// WithTransactions returns a new UTXO Set with the added transactions.
//
// This function MUST be called with the DAG lock held.
func (fus *FullUTXOSet) WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error) {
diffSet := NewDiffUTXOSet(fus, NewUTXODiff())
for _, tx := range transactions {
isAccepted, err := diffSet.AddTx(tx, blockBlueScore)
if err != nil {
return nil, err
}
if !ignoreDoubleSpends && !isAccepted {
return nil, errors.Errorf("Transaction %s is not valid with the current UTXO set", tx.TxID())
}
}
return UTXOSet(diffSet), nil
}
// DiffUTXOSet represents a utxoSet with a base fullUTXOSet and a UTXODiff
type DiffUTXOSet struct {
base *FullUTXOSet
@@ -830,12 +635,11 @@ func (dus *DiffUTXOSet) AddTx(tx *wire.MsgTx, blockBlueScore uint64) (bool, erro
func (dus *DiffUTXOSet) appendTx(tx *wire.MsgTx, blockBlueScore uint64, isCoinbase bool) error {
if !isCoinbase {
for _, txIn := range tx.TxIn {
outpoint := *wire.NewOutpoint(&txIn.PreviousOutpoint.TxID, txIn.PreviousOutpoint.Index)
entry, ok := dus.Get(outpoint)
entry, ok := dus.Get(txIn.PreviousOutpoint)
if !ok {
return errors.Errorf("Couldn't find entry for outpoint %s", outpoint)
return errors.Errorf("Couldn't find entry for outpoint %s", txIn.PreviousOutpoint)
}
err := dus.UTXODiff.RemoveEntry(outpoint, entry)
err := dus.UTXODiff.RemoveEntry(txIn.PreviousOutpoint, entry)
if err != nil {
return err
}
@@ -881,16 +685,7 @@ func (dus *DiffUTXOSet) meldToBase() error {
for outpoint, utxoEntry := range dus.UTXODiff.toAdd {
dus.base.add(outpoint, utxoEntry)
}
if dus.UTXODiff.useMultiset {
dus.base.UTXOMultiset = dus.base.UTXOMultiset.Union(dus.UTXODiff.diffMultiset)
}
if dus.UTXODiff.useMultiset {
dus.UTXODiff = NewUTXODiff()
} else {
dus.UTXODiff = NewUTXODiffWithoutMultiset()
}
dus.UTXODiff = NewUTXODiff()
return nil
}
@@ -905,7 +700,7 @@ func (dus *DiffUTXOSet) diffFromAcceptedTx(tx *wire.MsgTx, acceptingBlueScore ui
}
func (dus *DiffUTXOSet) String() string {
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s, Multiset-Hash:%s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove, dus.Multiset().Hash())
return fmt.Sprintf("{Base: %s, To Add: %s, To Remove: %s}", dus.base, dus.UTXODiff.toAdd, dus.UTXODiff.toRemove)
}
// clone returns a clone of this UTXO Set
@@ -930,42 +725,3 @@ func (dus *DiffUTXOSet) Get(outpoint wire.Outpoint) (*UTXOEntry, bool) {
txOut, ok := dus.UTXODiff.toAdd.get(outpoint)
return txOut, ok
}
// Multiset returns the ecmh-Multiset of this utxoSet
func (dus *DiffUTXOSet) Multiset() *ecc.Multiset {
return dus.base.UTXOMultiset.Union(dus.UTXODiff.diffMultiset)
}
// WithTransactions returns a new UTXO Set with the added transactions.
//
// If dus.UTXODiff.useMultiset is true, this function MUST be
// called with the DAG lock held.
func (dus *DiffUTXOSet) WithTransactions(transactions []*wire.MsgTx, blockBlueScore uint64, ignoreDoubleSpends bool) (UTXOSet, error) {
diffSet := NewDiffUTXOSet(dus.base, dus.UTXODiff.clone())
for _, tx := range transactions {
isAccepted, err := diffSet.AddTx(tx, blockBlueScore)
if err != nil {
return nil, err
}
if !ignoreDoubleSpends && !isAccepted {
return nil, errors.Errorf("Transaction %s is not valid with the current UTXO set", tx.TxID())
}
}
return UTXOSet(diffSet), nil
}
func addUTXOToMultiset(ms *ecc.Multiset, entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
utxoMS, err := utxoMultiset(entry, outpoint)
if err != nil {
return nil, err
}
return ms.Union(utxoMS), nil
}
func removeUTXOFromMultiset(ms *ecc.Multiset, entry *UTXOEntry, outpoint *wire.Outpoint) (*ecc.Multiset, error) {
utxoMS, err := utxoMultiset(entry, outpoint)
if err != nil {
return nil, err
}
return ms.Subtract(utxoMS), nil
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
@@ -80,49 +79,40 @@ func TestUTXODiff(t *testing.T) {
utxoEntry0 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 10}, true, 0)
utxoEntry1 := NewUTXOEntry(&wire.TxOut{ScriptPubKey: []byte{}, Value: 20}, false, 1)
for i := 0; i < 2; i++ {
withMultiset := i == 0
// Test utxoDiff creation
var diff *UTXODiff
if withMultiset {
diff = NewUTXODiff()
} else {
diff = NewUTXODiffWithoutMultiset()
}
if len(diff.toAdd) != 0 || len(diff.toRemove) != 0 {
t.Errorf("new diff is not empty")
}
// Test utxoDiff creation
err := diff.AddEntry(outpoint0, utxoEntry0)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
diff := NewUTXODiff()
err = diff.RemoveEntry(outpoint1, utxoEntry1)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
if len(diff.toAdd) != 0 || len(diff.toRemove) != 0 {
t.Errorf("new diff is not empty")
}
// Test utxoDiff cloning
clonedDiff := diff.clone()
if clonedDiff == diff {
t.Errorf("cloned diff is reference-equal to the original")
}
if !reflect.DeepEqual(clonedDiff, diff) {
t.Errorf("cloned diff not equal to the original"+
"Original: \"%v\", cloned: \"%v\".", diff, clonedDiff)
}
err := diff.AddEntry(outpoint0, utxoEntry0)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
// Test utxoDiff string representation
expectedDiffString := "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ]"
if withMultiset {
expectedDiffString = "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], Multiset-Hash: 7cb61e48005b0c817211d04589d719bff87d86a6a6ce2454515f57265382ded7"
}
diffString := clonedDiff.String()
if diffString != expectedDiffString {
t.Errorf("unexpected diff string. "+
"Expected: \"%s\", got: \"%s\".", expectedDiffString, diffString)
}
err = diff.RemoveEntry(outpoint1, utxoEntry1)
if err != nil {
t.Fatalf("error adding entry to utxo diff: %s", err)
}
// Test utxoDiff cloning
clonedDiff := diff.clone()
if clonedDiff == diff {
t.Errorf("cloned diff is reference-equal to the original")
}
if !reflect.DeepEqual(clonedDiff, diff) {
t.Errorf("cloned diff not equal to the original"+
"Original: \"%v\", cloned: \"%v\".", diff, clonedDiff)
}
// Test utxoDiff string representation
expectedDiffString := "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ]"
diffString := clonedDiff.String()
if diffString != expectedDiffString {
t.Errorf("unexpected diff string. "+
"Expected: \"%s\", got: \"%s\".", expectedDiffString, diffString)
}
}
@@ -137,7 +127,7 @@ func TestUTXODiffRules(t *testing.T) {
// For each of the following test cases, we will:
// this.diffFrom(other) and compare it to expectedDiffFromResult
// this.WithDiff(other) and compare it to expectedWithDiffResult
// this.WithDiffInPlace(other) and compare it to expectedWithDiffResult
// this.withDiffInPlace(other) and compare it to expectedWithDiffResult
//
// Note: an expected nil result means that we expect the respective operation to fail
// See the following spreadsheet for a summary of all test-cases:
@@ -542,157 +532,101 @@ func TestUTXODiffRules(t *testing.T) {
}
for _, test := range tests {
this := addMultisetToDiff(t, test.this)
other := addMultisetToDiff(t, test.other)
expectedDiffFromResult := addMultisetToDiff(t, test.expectedDiffFromResult)
expectedWithDiffResult := addMultisetToDiff(t, test.expectedWithDiffResult)
// diffFrom from this to other
diffResult, err := this.diffFrom(other)
// diffFrom from test.this to test.other
diffResult, err := test.this.diffFrom(test.other)
// Test whether diffFrom returned an error
isDiffFromOk := err == nil
expectedIsDiffFromOk := expectedDiffFromResult != nil
expectedIsDiffFromOk := test.expectedDiffFromResult != nil
if isDiffFromOk != expectedIsDiffFromOk {
t.Errorf("unexpected diffFrom error in test \"%s\". "+
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsDiffFromOk, isDiffFromOk)
}
// If not error, test the diffFrom result
if isDiffFromOk && !expectedDiffFromResult.equal(diffResult) {
if isDiffFromOk && !test.expectedDiffFromResult.equal(diffResult) {
t.Errorf("unexpected diffFrom result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedDiffFromResult, diffResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedDiffFromResult, diffResult)
}
// Make sure that WithDiff after diffFrom results in the original other
// Make sure that WithDiff after diffFrom results in the original test.other
if isDiffFromOk {
otherResult, err := this.WithDiff(diffResult)
otherResult, err := test.this.WithDiff(diffResult)
if err != nil {
t.Errorf("WithDiff unexpectedly failed in test \"%s\": %s", test.name, err)
}
if !other.equal(otherResult) {
if !test.other.equal(otherResult) {
t.Errorf("unexpected WithDiff result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, other, otherResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.other, otherResult)
}
}
// WithDiff from this to other
withDiffResult, err := this.WithDiff(other)
// WithDiff from test.this to test.other
withDiffResult, err := test.this.WithDiff(test.other)
// Test whether WithDiff returned an error
isWithDiffOk := err == nil
expectedIsWithDiffOk := expectedWithDiffResult != nil
expectedIsWithDiffOk := test.expectedWithDiffResult != nil
if isWithDiffOk != expectedIsWithDiffOk {
t.Errorf("unexpected WithDiff error in test \"%s\". "+
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsWithDiffOk, isWithDiffOk)
}
// If not error, test the WithDiff result
if isWithDiffOk && !withDiffResult.equal(expectedWithDiffResult) {
if isWithDiffOk && !withDiffResult.equal(test.expectedWithDiffResult) {
t.Errorf("unexpected WithDiff result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedWithDiffResult, withDiffResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedWithDiffResult, withDiffResult)
}
// Repeat WithDiff check this time using WithDiffInPlace
thisClone := this.clone()
err = thisClone.WithDiffInPlace(other)
// Repeat WithDiff check test.this time using withDiffInPlace
thisClone := test.this.clone()
err = thisClone.withDiffInPlace(test.other)
// Test whether WithDiffInPlace returned an error
// Test whether withDiffInPlace returned an error
isWithDiffInPlaceOk := err == nil
expectedIsWithDiffInPlaceOk := expectedWithDiffResult != nil
expectedIsWithDiffInPlaceOk := test.expectedWithDiffResult != nil
if isWithDiffInPlaceOk != expectedIsWithDiffInPlaceOk {
t.Errorf("unexpected WithDiffInPlace error in test \"%s\". "+
t.Errorf("unexpected withDiffInPlace error in test \"%s\". "+
"Expected: \"%t\", got: \"%t\".", test.name, expectedIsWithDiffInPlaceOk, isWithDiffInPlaceOk)
}
// If not error, test the WithDiffInPlace result
if isWithDiffInPlaceOk && !thisClone.equal(expectedWithDiffResult) {
t.Errorf("unexpected WithDiffInPlace result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedWithDiffResult, thisClone)
// If not error, test the withDiffInPlace result
if isWithDiffInPlaceOk && !thisClone.equal(test.expectedWithDiffResult) {
t.Errorf("unexpected withDiffInPlace result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedWithDiffResult, thisClone)
}
// Make sure that diffFrom after WithDiff results in the original other
// Make sure that diffFrom after WithDiff results in the original test.other
if isWithDiffOk {
otherResult, err := this.diffFrom(withDiffResult)
otherResult, err := test.this.diffFrom(withDiffResult)
if err != nil {
t.Errorf("diffFrom unexpectedly failed in test \"%s\": %s", test.name, err)
}
if !other.equal(otherResult) {
if !test.other.equal(otherResult) {
t.Errorf("unexpected diffFrom result in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, other, otherResult)
"Expected: \"%v\", got: \"%v\".", test.name, test.other, otherResult)
}
}
}
}
func areMultisetsEqual(a *ecc.Multiset, b *ecc.Multiset) bool {
aX, aY := a.Point()
bX, bY := b.Point()
return aX.Cmp(bX) == 0 && aY.Cmp(bY) == 0
}
func (d *UTXODiff) equal(other *UTXODiff) bool {
if d == nil || other == nil {
return d == other
}
return reflect.DeepEqual(d.toAdd, other.toAdd) &&
reflect.DeepEqual(d.toRemove, other.toRemove) &&
areMultisetsEqual(d.diffMultiset, other.diffMultiset)
reflect.DeepEqual(d.toRemove, other.toRemove)
}
func (fus *FullUTXOSet) equal(other *FullUTXOSet) bool {
return reflect.DeepEqual(fus.utxoCollection, other.utxoCollection) &&
areMultisetsEqual(fus.UTXOMultiset, other.UTXOMultiset)
return reflect.DeepEqual(fus.utxoCollection, other.utxoCollection)
}
func (dus *DiffUTXOSet) equal(other *DiffUTXOSet) bool {
return dus.base.equal(other.base) && dus.UTXODiff.equal(other.UTXODiff)
}
func addMultisetToDiff(t *testing.T, diff *UTXODiff) *UTXODiff {
if diff == nil {
return nil
}
diffWithMs := NewUTXODiff()
for outpoint, entry := range diff.toAdd {
err := diffWithMs.AddEntry(outpoint, entry)
if err != nil {
t.Fatalf("Error with diffWithMs.AddEntry: %s", err)
}
}
for outpoint, entry := range diff.toRemove {
err := diffWithMs.RemoveEntry(outpoint, entry)
if err != nil {
t.Fatalf("Error with diffWithMs.removeEntry: %s", err)
}
}
return diffWithMs
}
func addMultisetToFullUTXOSet(t *testing.T, fus *FullUTXOSet) *FullUTXOSet {
if fus == nil {
return nil
}
fusWithMs := NewFullUTXOSet()
for outpoint, entry := range fus.utxoCollection {
err := fusWithMs.addAndUpdateMultiset(outpoint, entry)
if err != nil {
t.Fatalf("Error with diffWithMs.AddEntry: %s", err)
}
}
return fusWithMs
}
func addMultisetToDiffUTXOSet(t *testing.T, diffSet *DiffUTXOSet) *DiffUTXOSet {
if diffSet == nil {
return nil
}
diffWithMs := addMultisetToDiff(t, diffSet.UTXODiff)
baseWithMs := addMultisetToFullUTXOSet(t, diffSet.base)
return NewDiffUTXOSet(baseWithMs, diffWithMs)
}
// TestFullUTXOSet makes sure that fullUTXOSet is working as expected.
func TestFullUTXOSet(t *testing.T) {
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
@@ -703,10 +637,10 @@ func TestFullUTXOSet(t *testing.T) {
txOut1 := &wire.TxOut{ScriptPubKey: []byte{}, Value: 20}
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
diff := addMultisetToDiff(t, &UTXODiff{
diff := &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry0},
toRemove: utxoCollection{outpoint1: utxoEntry1},
})
}
// Test fullUTXOSet creation
emptySet := NewFullUTXOSet()
@@ -735,7 +669,7 @@ func TestFullUTXOSet(t *testing.T) {
} else if isAccepted {
t.Errorf("addTx unexpectedly succeeded")
}
emptySet = addMultisetToFullUTXOSet(t, &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}})
emptySet = &FullUTXOSet{utxoCollection: utxoCollection{outpoint0: utxoEntry0}}
if isAccepted, err := emptySet.AddTx(transaction0, 0); err != nil {
t.Errorf("addTx unexpectedly failed. Error: %s", err)
} else if !isAccepted {
@@ -767,10 +701,10 @@ func TestDiffUTXOSet(t *testing.T) {
txOut1 := &wire.TxOut{ScriptPubKey: []byte{}, Value: 20}
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
utxoEntry1 := NewUTXOEntry(txOut1, false, 1)
diff := addMultisetToDiff(t, &UTXODiff{
diff := &UTXODiff{
toAdd: utxoCollection{outpoint0: utxoEntry0},
toRemove: utxoCollection{outpoint1: utxoEntry1},
})
}
// Test diffUTXOSet creation
emptySet := NewDiffUTXOSet(NewFullUTXOSet(), NewUTXODiff())
@@ -828,7 +762,7 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ ], Multiset-Hash:0000000000000000000000000000000000000000000000000000000000000000}",
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ ]}",
expectedCollection: utxoCollection{},
},
{
@@ -847,7 +781,7 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ ], To Add: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Remove: [ ], Multiset-Hash:da4768bd0359c3426268d6707c1fc17a68c45ef1ea734331b07568418234487f}",
expectedString: "{Base: [ ], To Add: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Remove: [ ]}",
expectedCollection: utxoCollection{outpoint0: utxoEntry0},
},
{
@@ -860,7 +794,7 @@ func TestDiffUTXOSet(t *testing.T) {
},
},
expectedMeldSet: nil,
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], Multiset-Hash:046242cb1bb1e6d3fd91d0f181e1b2d4a597ac57fa2584fc3c2eb0e0f46c9369}",
expectedString: "{Base: [ ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]}",
expectedCollection: utxoCollection{},
expectedMeldToBaseError: "Couldn't remove outpoint 0000000000000000000000000000000000000000000000000000000000000000:0 because it doesn't exist in the DiffUTXOSet base",
},
@@ -885,7 +819,7 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], To Remove: [ ], Multiset-Hash:556cc61fd4d7e74d7807ca2298c5320375a6a20310a18920e54667220924baff}",
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, blueScore: 1 ], To Remove: [ ]}",
expectedCollection: utxoCollection{
outpoint0: utxoEntry0,
outpoint1: utxoEntry1,
@@ -909,24 +843,21 @@ func TestDiffUTXOSet(t *testing.T) {
toRemove: utxoCollection{},
},
},
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], Multiset-Hash:0000000000000000000000000000000000000000000000000000000000000000}",
expectedString: "{Base: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ], To Add: [ ], To Remove: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, blueScore: 0 ]}",
expectedCollection: utxoCollection{},
},
}
for _, test := range tests {
diffSet := addMultisetToDiffUTXOSet(t, test.diffSet)
expectedMeldSet := addMultisetToDiffUTXOSet(t, test.expectedMeldSet)
// Test string representation
setString := diffSet.String()
setString := test.diffSet.String()
if setString != test.expectedString {
t.Errorf("unexpected string in test \"%s\". "+
"Expected: \"%s\", got: \"%s\".", test.name, test.expectedString, setString)
}
// Test meldToBase
meldSet := diffSet.clone().(*DiffUTXOSet)
meldSet := test.diffSet.clone().(*DiffUTXOSet)
err := meldSet.meldToBase()
errString := ""
if err != nil {
@@ -938,27 +869,27 @@ func TestDiffUTXOSet(t *testing.T) {
if err != nil {
continue
}
if !meldSet.equal(expectedMeldSet) {
if !meldSet.equal(test.expectedMeldSet) {
t.Errorf("unexpected melded set in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedMeldSet, meldSet)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedMeldSet, meldSet)
}
// Test collection
setCollection, err := diffSet.collection()
setCollection, err := test.diffSet.collection()
if err != nil {
t.Errorf("Error getting diffSet collection: %s", err)
t.Errorf("Error getting test.diffSet collection: %s", err)
} else if !reflect.DeepEqual(setCollection, test.expectedCollection) {
t.Errorf("unexpected set collection in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedCollection, setCollection)
}
// Test cloning
clonedSet := diffSet.clone().(*DiffUTXOSet)
if !reflect.DeepEqual(clonedSet, diffSet) {
clonedSet := test.diffSet.clone().(*DiffUTXOSet)
if !reflect.DeepEqual(clonedSet, test.diffSet) {
t.Errorf("unexpected set clone in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, diffSet, clonedSet)
"Expected: \"%v\", got: \"%v\".", test.name, test.diffSet, clonedSet)
}
if clonedSet == diffSet {
if clonedSet == test.diffSet {
t.Errorf("cloned set is reference-equal to the original")
}
}
@@ -1159,10 +1090,7 @@ func TestDiffUTXOSet_addTx(t *testing.T) {
testLoop:
for _, test := range tests {
startSet := addMultisetToDiffUTXOSet(t, test.startSet)
expectedSet := addMultisetToDiffUTXOSet(t, test.expectedSet)
diffSet := startSet.clone()
diffSet := test.startSet.clone()
// Apply all transactions to diffSet, in order, with the initial block height startHeight
for i, transaction := range test.toAdd {
@@ -1174,18 +1102,18 @@ testLoop:
}
}
// Make sure that the result diffSet equals to the expectedSet
if !diffSet.(*DiffUTXOSet).equal(expectedSet) {
// Make sure that the result diffSet equals to test.expectedSet
if !diffSet.(*DiffUTXOSet).equal(test.expectedSet) {
t.Errorf("unexpected diffSet in test \"%s\". "+
"Expected: \"%v\", got: \"%v\".", test.name, expectedSet, diffSet)
"Expected: \"%v\", got: \"%v\".", test.name, test.expectedSet, diffSet)
}
}
}
func TestDiffFromTx(t *testing.T) {
fus := addMultisetToFullUTXOSet(t, &FullUTXOSet{
fus := &FullUTXOSet{
utxoCollection: utxoCollection{},
})
}
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutpoint: wire.Outpoint{TxID: *txID0, Index: math.MaxUint32}, Sequence: 0}
@@ -1241,10 +1169,10 @@ func TestDiffFromTx(t *testing.T) {
}
//Test that we get an error if the outpoint is inside diffUTXOSet's toRemove
diff2 := addMultisetToDiff(t, &UTXODiff{
diff2 := &UTXODiff{
toAdd: utxoCollection{},
toRemove: utxoCollection{},
})
}
dus := NewDiffUTXOSet(fus, diff2)
if isAccepted, err := dus.AddTx(tx, 2); err != nil {
t.Fatalf("AddTx unexpectedly failed. Error: %s", err)
@@ -1321,7 +1249,6 @@ func TestUTXOSetAddEntry(t *testing.T) {
}
for _, test := range tests {
expectedUTXODiff := addMultisetToDiff(t, test.expectedUTXODiff)
err := utxoDiff.AddEntry(*test.outpointToAdd, test.utxoEntryToAdd)
errString := ""
if err != nil {
@@ -1330,9 +1257,9 @@ func TestUTXOSetAddEntry(t *testing.T) {
if errString != test.expectedError {
t.Fatalf("utxoDiff.AddEntry: unexpected err in test \"%s\". Expected: %s but got: %s", test.name, test.expectedError, err)
}
if err == nil && !utxoDiff.equal(expectedUTXODiff) {
if err == nil && !utxoDiff.equal(test.expectedUTXODiff) {
t.Fatalf("utxoDiff.AddEntry: unexpected utxoDiff in test \"%s\". "+
"Expected: %v, got: %v", test.name, expectedUTXODiff, utxoDiff)
"Expected: %v, got: %v", test.name, test.expectedUTXODiff, utxoDiff)
}
}
}

View File

@@ -435,7 +435,7 @@ func (dag *BlockDAG) checkBlockHeaderSanity(header *wire.BlockHeader, flags Beha
// the duration of time that should be waited before the block becomes valid.
// This check needs to be last as it does not return an error but rather marks the
// header as delayed (and valid).
maxTimestamp := dag.AdjustedTime().Add(time.Second *
maxTimestamp := dag.Now().Add(time.Second *
time.Duration(int64(dag.TimestampDeviationTolerance)*dag.targetTimePerBlock))
if header.Timestamp.After(maxTimestamp) {
return header.Timestamp.Sub(maxTimestamp), nil

View File

@@ -69,7 +69,7 @@ func TestSequenceLocksActive(t *testing.T) {
// ensure it fails.
func TestCheckConnectBlockTemplate(t *testing.T) {
// Create a new database and DAG instance to run tests against.
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", Config{
dag, teardownFunc, err := DAGSetup("checkconnectblocktemplate", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -161,7 +161,7 @@ func TestCheckConnectBlockTemplate(t *testing.T) {
// as expected.
func TestCheckBlockSanity(t *testing.T) {
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", Config{
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {
@@ -169,6 +169,7 @@ func TestCheckBlockSanity(t *testing.T) {
return
}
defer teardownFunc()
dag.timeSource = newFakeTimeSource(time.Now())
block := util.NewBlock(&Block100000)
if len(block.Transactions()) < 3 {
@@ -191,7 +192,8 @@ func TestCheckBlockSanity(t *testing.T) {
if !errors.As(err, &ruleErr) {
t.Errorf("CheckBlockSanity: wrong error returned, expect RuleError, got %T", err)
} else if ruleErr.ErrorCode != ErrTransactionsNotSorted {
t.Errorf("CheckBlockSanity: wrong error returned, expect ErrTransactionsNotSorted, got %v, err %s", ruleErr.ErrorCode, err)
t.Errorf("CheckBlockSanity: wrong error returned, expect ErrTransactionsNotSorted, got"+
" %v, err %s", ruleErr.ErrorCode, err)
}
if delay != 0 {
t.Errorf("CheckBlockSanity: unexpected return %s delay", delay)
@@ -492,8 +494,8 @@ func TestCheckBlockSanity(t *testing.T) {
blockInTheFuture := Block100000
expectedDelay := 10 * time.Second
now := time.Unix(time.Now().Unix(), 0)
blockInTheFuture.Header.Timestamp = now.Add(time.Duration(dag.TimestampDeviationTolerance)*time.Second + expectedDelay)
deviationTolerance := time.Duration(dag.TimestampDeviationTolerance*uint64(dag.targetTimePerBlock)) * time.Second
blockInTheFuture.Header.Timestamp = dag.Now().Add(deviationTolerance + expectedDelay)
delay, err = dag.checkBlockSanity(util.NewBlock(&blockInTheFuture), BFNoPoWCheck)
if err != nil {
t.Errorf("CheckBlockSanity: %v", err)
@@ -559,7 +561,7 @@ func TestPastMedianTime(t *testing.T) {
func TestValidateParents(t *testing.T) {
// Create a new database and dag instance to run tests against.
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", Config{
dag, teardownFunc, err := DAGSetup("TestCheckBlockSanity", true, Config{
DAGParams: &dagconfig.SimnetParams,
})
if err != nil {

View File

@@ -35,7 +35,7 @@ func TestVirtualBlock(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestVirtualBlock", Config{
dag, teardownFunc, err := DAGSetup("TestVirtualBlock", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -134,7 +134,7 @@ func TestSelectedPath(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestSelectedPath", Config{
dag, teardownFunc, err := DAGSetup("TestSelectedPath", true, Config{
DAGParams: &params,
})
if err != nil {
@@ -222,7 +222,7 @@ func TestChainUpdates(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimnetParams
params.K = 1
dag, teardownFunc, err := DAGSetup("TestChainUpdates", Config{
dag, teardownFunc, err := DAGSetup("TestChainUpdates", true, Config{
DAGParams: &params,
})
if err != nil {

View File

@@ -5,12 +5,9 @@
package main
import (
"github.com/pkg/errors"
"os"
"path/filepath"
"runtime"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/limits"
"github.com/kaspanet/kaspad/logs"
"github.com/kaspanet/kaspad/util/panics"
@@ -27,39 +24,6 @@ var (
spawn func(func())
)
// loadBlockDB opens the block database and returns a handle to it.
func loadBlockDB() (database.DB, error) {
// The database name is based on the database type.
dbName := blockDBNamePrefix + "_" + cfg.DBType
dbPath := filepath.Join(cfg.DataDir, dbName)
log.Infof("Loading block database from '%s'", dbPath)
db, err := database.Open(cfg.DBType, dbPath, ActiveConfig().NetParams().Net)
if err != nil {
// Return the error if it's not because the database doesn't
// exist.
var dbErr database.Error
if ok := errors.As(err, &dbErr); !ok || dbErr.ErrorCode !=
database.ErrDbDoesNotExist {
return nil, err
}
// Create the db if it does not exist.
err = os.MkdirAll(cfg.DataDir, 0700)
if err != nil {
return nil, err
}
db, err = database.Create(cfg.DBType, dbPath, ActiveConfig().NetParams().Net)
if err != nil {
return nil, err
}
}
log.Info("Block database loaded")
return db, nil
}
// realMain is the real main function for the utility. It is necessary to work
// around the fact that deferred functions do not run when os.Exit() is called.
func realMain() error {
@@ -76,14 +40,6 @@ func realMain() error {
log = backendLogger.Logger("MAIN")
spawn = panics.GoroutineWrapperFunc(log)
// Load the block database.
db, err := loadBlockDB()
if err != nil {
log.Errorf("Failed to load database: %s", err)
return err
}
defer db.Close()
fi, err := os.Open(cfg.InFile)
if err != nil {
log.Errorf("Failed to open file %s: %s", cfg.InFile, err)
@@ -94,7 +50,7 @@ func realMain() error {
// Create a block importer for the database and input file and start it.
// The done channel returned from start will contain an error if
// anything went wrong.
importer, err := newBlockImporter(db, fi)
importer, err := newBlockImporter(fi)
if err != nil {
log.Errorf("Failed create block importer: %s", err)
return err

View File

@@ -6,20 +6,15 @@ package main
import (
"fmt"
flags "github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"os"
"path/filepath"
"strings"
flags "github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
)
const (
defaultDBType = "ffldb"
defaultDataFile = "bootstrap.dat"
defaultProgress = 10
)
@@ -27,7 +22,6 @@ const (
var (
kaspadHomeDir = util.AppDataDir("kaspad", false)
defaultDataDir = filepath.Join(kaspadHomeDir, "data")
knownDbTypes = database.SupportedDrivers()
activeConfig *ConfigFlags
)
@@ -41,7 +35,6 @@ func ActiveConfig() *ConfigFlags {
// See loadConfig for details on the configuration load process.
type ConfigFlags struct {
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
DBType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainFromBlock RPC available"`
@@ -58,23 +51,11 @@ func fileExists(name string) bool {
return true
}
// validDbType returns whether or not dbType is a supported database type.
func validDbType(dbType string) bool {
for _, knownType := range knownDbTypes {
if dbType == knownType {
return true
}
}
return false
}
// loadConfig initializes and parses the config using command line options.
func loadConfig() (*ConfigFlags, []string, error) {
// Default config.
activeConfig = &ConfigFlags{
DataDir: defaultDataDir,
DBType: defaultDBType,
InFile: defaultDataFile,
Progress: defaultProgress,
}
@@ -95,16 +76,6 @@ func loadConfig() (*ConfigFlags, []string, error) {
return nil, nil, err
}
// Validate database type.
if !validDbType(activeConfig.DBType) {
str := "%s: The specified database type [%s] is invalid -- " +
"supported types %s"
err := errors.Errorf(str, "loadConfig", activeConfig.DBType, strings.Join(knownDbTypes, ", "))
fmt.Fprintln(os.Stderr, err)
parser.WriteHelp(os.Stderr)
return nil, nil, err
}
// Append the network type to the data directory so it is "namespaced"
// per network. In addition to the block database, there are other
// pieces of data that are saved to disk such as address manager state.

View File

@@ -13,7 +13,6 @@ import (
"time"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
)
@@ -28,7 +27,6 @@ type importResults struct {
// blockImporter houses information about an ongoing import from a block data
// file to the block database.
type blockImporter struct {
db database.DB
dag *blockdag.BlockDAG
r io.ReadSeeker
processQueue chan []byte
@@ -287,7 +285,7 @@ func (bi *blockImporter) Import() chan *importResults {
// newBlockImporter returns a new importer for the provided file reader seeker
// and database.
func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
func newBlockImporter(r io.ReadSeeker) (*blockImporter, error) {
// Create the acceptance index if needed.
var indexes []indexers.Indexer
if cfg.AcceptanceIndex {
@@ -302,9 +300,8 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
}
dag, err := blockdag.New(&blockdag.Config{
DB: db,
DAGParams: ActiveConfig().NetParams(),
TimeSource: blockdag.NewMedianTime(),
TimeSource: blockdag.NewTimeSource(),
IndexManager: indexManager,
})
if err != nil {
@@ -312,7 +309,6 @@ func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
}
return &blockImporter{
db: db,
r: r,
processQueue: make(chan []byte, 2),
doneChan: make(chan bool),

View File

@@ -2,11 +2,13 @@ package main
import (
"fmt"
"github.com/kaspanet/kaspad/config"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
@@ -37,6 +39,7 @@ type configFlags struct {
Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"`
NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."`
BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds). This is used only for testing purposes."`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
config.NetworkFlags
}
@@ -78,6 +81,13 @@ func parseConfig() (*configFlags, error) {
return nil, errors.New("--rpccert should be omitted if --notls is used")
}
if cfg.Profile != "" {
profilePort, err := strconv.Atoi(cfg.Profile)
if err != nil || profilePort < 1024 || profilePort > 65535 {
return nil, errors.New("The profile port must be between 1024 and 65535")
}
}
initLog(defaultLogFile, defaultErrLogFile)
return cfg, nil

View File

@@ -1,5 +1,5 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
FROM golang:1.14-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
@@ -20,7 +20,7 @@ WORKDIR /go/src/github.com/kaspanet/kaspad/cmd/kaspaminer
RUN GOFMT_RESULT=`go fmt ./...`; echo $GOFMT_RESULT; test -z "$GOFMT_RESULT"
RUN go vet ./...
RUN golint -set_exit_status ./...
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o kaspaminer .
RUN GOOS=linux go build -a -installsuffix cgo -o kaspaminer .
# --- multistage docker build: stage #2: runtime image
FROM alpine

View File

@@ -2,13 +2,17 @@ package main
import (
"fmt"
"github.com/kaspanet/kaspad/version"
"os"
"github.com/kaspanet/kaspad/version"
"github.com/pkg/errors"
_ "net/http/pprof"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/util/profiling"
)
func main() {
@@ -28,6 +32,11 @@ func main() {
enableRPCLogging()
}
// Enable http profiling server if requested.
if cfg.Profile != "" {
profiling.Start(cfg.Profile, log)
}
client, err := connectToServer(cfg)
if err != nil {
panic(errors.Wrap(err, "Error connecting to the RPC server"))

View File

@@ -4,7 +4,7 @@ import (
"bytes"
"encoding/hex"
"fmt"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
@@ -28,7 +28,11 @@ func main() {
printErrorAndExit(err, "Failed to decode transaction")
}
scriptPubKey, err := createScriptPubKey(privateKey.PubKey())
pubkey, err := privateKey.SchnorrPublicKey()
if err != nil {
printErrorAndExit(err, "Failed to generate a public key")
}
scriptPubKey, err := createScriptPubKey(pubkey)
if err != nil {
printErrorAndExit(err, "Failed to create scriptPubKey")
}
@@ -46,10 +50,12 @@ func main() {
fmt.Printf("Signed Transaction (hex): %s\n\n", serializedTransaction)
}
func parsePrivateKey(privateKeyHex string) (*ecc.PrivateKey, error) {
func parsePrivateKey(privateKeyHex string) (*secp256k1.PrivateKey, error) {
privateKeyBytes, err := hex.DecodeString(privateKeyHex)
privateKey, _ := ecc.PrivKeyFromBytes(ecc.S256(), privateKeyBytes)
return privateKey, err
if err != nil {
return nil, errors.Errorf("'%s' isn't a valid hex. err: '%s' ", privateKeyHex, err)
}
return secp256k1.DeserializePrivateKeyFromSlice(privateKeyBytes)
}
func parseTransaction(transactionHex string) (*wire.MsgTx, error) {
@@ -62,8 +68,12 @@ func parseTransaction(transactionHex string) (*wire.MsgTx, error) {
return &transaction, err
}
func createScriptPubKey(publicKey *ecc.PublicKey) ([]byte, error) {
p2pkhAddress, err := util.NewAddressPubKeyHashFromPublicKey(publicKey.SerializeCompressed(), ActiveConfig().NetParams().Prefix)
func createScriptPubKey(publicKey *secp256k1.SchnorrPublicKey) ([]byte, error) {
serializedKey, err := publicKey.SerializeCompressed()
if err != nil {
return nil, err
}
p2pkhAddress, err := util.NewAddressPubKeyHashFromPublicKey(serializedKey, ActiveConfig().NetParams().Prefix)
if err != nil {
return nil, err
}
@@ -71,7 +81,7 @@ func createScriptPubKey(publicKey *ecc.PublicKey) ([]byte, error) {
return scriptPubKey, err
}
func signTransaction(transaction *wire.MsgTx, privateKey *ecc.PrivateKey, scriptPubKey []byte) error {
func signTransaction(transaction *wire.MsgTx, privateKey *secp256k1.PrivateKey, scriptPubKey []byte) error {
for i, transactionInput := range transaction.TxIn {
signatureScript, err := txscript.SignatureScript(transaction, i, scriptPubKey, txscript.SigHashAll, privateKey, true)
if err != nil {

View File

@@ -22,7 +22,6 @@ import (
"github.com/btcsuite/go-socks/socks"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/network"
@@ -46,7 +45,6 @@ const (
defaultMaxRPCClients = 10
defaultMaxRPCWebsockets = 25
defaultMaxRPCConcurrentReqs = 20
defaultDbType = "ffldb"
defaultBlockMaxMass = 10000000
blockMaxMassMin = 1000
blockMaxMassMax = 10000000
@@ -65,7 +63,6 @@ var (
defaultConfigFile = filepath.Join(DefaultHomeDir, defaultConfigFilename)
defaultDataDir = filepath.Join(DefaultHomeDir, defaultDataDirname)
knownDbTypes = database.SupportedDrivers()
defaultRPCKeyFile = filepath.Join(DefaultHomeDir, "rpc.key")
defaultRPCCertFile = filepath.Join(DefaultHomeDir, "rpc.cert")
defaultLogDir = filepath.Join(DefaultHomeDir, defaultLogDirname)
@@ -168,17 +165,6 @@ func cleanAndExpandPath(path string) string {
return filepath.Clean(os.ExpandEnv(path))
}
// validDbType returns whether or not dbType is a supported database type.
func validDbType(dbType string) bool {
for _, knownType := range knownDbTypes {
if dbType == knownType {
return true
}
}
return false
}
// newConfigParser returns a new command line flags parser.
func newConfigParser(cfgFlags *Flags, so *serviceOptions, options flags.Options) *flags.Parser {
parser := flags.NewParser(cfgFlags, options)
@@ -235,7 +221,6 @@ func loadConfig() (*Config, []string, error) {
RPCMaxConcurrentReqs: defaultMaxRPCConcurrentReqs,
DataDir: defaultDataDir,
LogDir: defaultLogDir,
DbType: defaultDbType,
RPCKey: defaultRPCKeyFile,
RPCCert: defaultRPCCertFile,
BlockMaxMass: defaultBlockMaxMass,
@@ -424,16 +409,6 @@ func loadConfig() (*Config, []string, error) {
return nil, nil, err
}
// Validate database type.
if !validDbType(activeConfig.DbType) {
str := "%s: The specified database type [%s] is invalid -- " +
"supported types %s"
err := errors.Errorf(str, funcName, activeConfig.DbType, knownDbTypes)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Validate profile port number
if activeConfig.Profile != "" {
profilePort, err := strconv.Atoi(activeConfig.Profile)

View File

@@ -13,7 +13,6 @@ import (
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/hdkeychain"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
@@ -177,13 +176,6 @@ type Params struct {
// Address encoding magics
PrivateKeyID byte // First byte of a WIF private key
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair hdkeychain.HDKeyIDPair
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType uint32
}
// NormalizeRPCServerAddress returns addr with the current network default
@@ -238,13 +230,6 @@ var MainnetParams = Params{
// Address encoding magics
PrivateKeyID: 0x80, // starts with 5 (uncompressed) or K (compressed)
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair: hdkeychain.HDKeyPairMainnet,
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 0,
}
// RegressionNetParams defines the network parameters for the regression test
@@ -295,13 +280,6 @@ var RegressionNetParams = Params{
// Address encoding magics
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair: hdkeychain.HDKeyPairRegressionNet,
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 1,
}
// TestnetParams defines the network parameters for the test Kaspa network.
@@ -350,13 +328,6 @@ var TestnetParams = Params{
// Address encoding magics
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair: hdkeychain.HDKeyPairTestnet,
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 1,
}
// SimnetParams defines the network parameters for the simulation test Kaspa
@@ -409,13 +380,6 @@ var SimnetParams = Params{
PrivateKeyID: 0x64, // starts with 4 (uncompressed) or F (compressed)
// Human-readable part for Bech32 encoded addresses
Prefix: util.Bech32PrefixKaspaSim,
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair: hdkeychain.HDKeyPairSimnet,
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 115, // ASCII for s
}
// DevnetParams defines the network parameters for the development Kaspa network.
@@ -464,13 +428,6 @@ var DevnetParams = Params{
// Address encoding magics
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// BIP32 hierarchical deterministic extended key magics
HDKeyIDPair: hdkeychain.HDKeyPairDevnet,
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 1,
}
var (

View File

@@ -1,9 +1,6 @@
package dagconfig_test
import (
"bytes"
"github.com/kaspanet/kaspad/util/hdkeychain"
"reflect"
"testing"
. "github.com/kaspanet/kaspad/dagconfig"
@@ -15,10 +12,6 @@ import (
var mockNetParams = Params{
Name: "mocknet",
Net: 1<<32 - 1,
HDKeyIDPair: hdkeychain.HDKeyIDPair{
PrivateKeyID: [4]byte{0x01, 0x02, 0x03, 0x04},
PublicKeyID: [4]byte{0x05, 0x06, 0x07, 0x08},
},
}
func TestRegister(t *testing.T) {
@@ -27,16 +20,10 @@ func TestRegister(t *testing.T) {
params *Params
err error
}
type hdTest struct {
priv []byte
want []byte
err error
}
tests := []struct {
name string
register []registerTest
hdMagics []hdTest
}{
{
name: "default networks",
@@ -62,40 +49,6 @@ func TestRegister(t *testing.T) {
err: ErrDuplicateNet,
},
},
hdMagics: []hdTest{
{
priv: MainnetParams.HDKeyIDPair.PrivateKeyID[:],
want: MainnetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: TestnetParams.HDKeyIDPair.PrivateKeyID[:],
want: TestnetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: RegressionNetParams.HDKeyIDPair.PrivateKeyID[:],
want: RegressionNetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: SimnetParams.HDKeyIDPair.PrivateKeyID[:],
want: SimnetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: mockNetParams.HDKeyIDPair.PrivateKeyID[:],
err: hdkeychain.ErrUnknownHDKeyID,
},
{
priv: []byte{0xff, 0xff, 0xff, 0xff},
err: hdkeychain.ErrUnknownHDKeyID,
},
{
priv: []byte{0xff},
err: hdkeychain.ErrUnknownHDKeyID,
},
},
},
{
name: "register mocknet",
@@ -106,13 +59,6 @@ func TestRegister(t *testing.T) {
err: nil,
},
},
hdMagics: []hdTest{
{
priv: mockNetParams.HDKeyIDPair.PrivateKeyID[:],
want: mockNetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
},
},
{
name: "more duplicates",
@@ -143,41 +89,6 @@ func TestRegister(t *testing.T) {
err: ErrDuplicateNet,
},
},
hdMagics: []hdTest{
{
priv: MainnetParams.HDKeyIDPair.PrivateKeyID[:],
want: MainnetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: TestnetParams.HDKeyIDPair.PrivateKeyID[:],
want: TestnetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: RegressionNetParams.HDKeyIDPair.PrivateKeyID[:],
want: RegressionNetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: SimnetParams.HDKeyIDPair.PrivateKeyID[:],
want: SimnetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: mockNetParams.HDKeyIDPair.PrivateKeyID[:],
want: mockNetParams.HDKeyIDPair.PublicKeyID[:],
err: nil,
},
{
priv: []byte{0xff, 0xff, 0xff, 0xff},
err: hdkeychain.ErrUnknownHDKeyID,
},
{
priv: []byte{0xff},
err: hdkeychain.ErrUnknownHDKeyID,
},
},
},
}
@@ -185,25 +96,10 @@ func TestRegister(t *testing.T) {
for _, regtest := range test.register {
err := Register(regtest.params)
// HDKeyIDPairs must be registered separately
hdkeychain.RegisterHDKeyIDPair(regtest.params.HDKeyIDPair)
if err != regtest.err {
t.Errorf("%s:%s: Registered network with unexpected error: got %v expected %v",
test.name, regtest.name, err, regtest.err)
}
}
for i, magTest := range test.hdMagics {
pubKey, err := hdkeychain.HDPrivateKeyToPublicKeyID(magTest.priv[:])
if !reflect.DeepEqual(err, magTest.err) {
t.Errorf("%s: HD magic %d mismatched error: got %v expected %v ",
test.name, i, err, magTest.err)
continue
}
if magTest.err == nil && !bytes.Equal(pubKey, magTest.want[:]) {
t.Errorf("%s: HD magic %d private and public mismatch: got %v expected %v ",
test.name, i, pubKey, magTest.want[:])
}
}
}
}

View File

@@ -4,29 +4,35 @@ database
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/database)
Package database provides a block and metadata storage database.
Package database provides a database for kaspad.
Please note that this package is intended to enable kaspad to support different
database backends and is not something that a client can directly access as only
one entity can have the database open at a time (for most database backends),
and that entity will be kaspad.
Overview
--------
This package provides a database layer to store and retrieve data in a simple
and efficient manner.
When a client wants programmatic access to the data provided by kaspad, they'll
likely want to use the [rpcclient](https://github.com/kaspanet/kaspad/tree/master/rpcclient)
package which makes use of the [JSON-RPC API](https://github.com/kaspanet/kaspad/tree/master/docs/json_rpc_api.md).
The current backend is ffldb, which makes use of leveldb, flat files, and strict
checksums in key areas to ensure data integrity.
The default backend, ffldb, has a strong focus on speed, efficiency, and
robustness. It makes use of leveldb for the metadata, flat files for block
storage, and strict checksums in key areas to ensure data integrity.
Implementors of additional backends are required to implement the following interfaces:
## Feature Overview
DataAccessor
------------
This defines the common interface by which data gets accessed in a generic kaspad
database. Both the Database and the Transaction interfaces (see below) implement it.
- Key/value metadata store
- Kaspa block storage
- Efficient retrieval of block headers and regions (transactions, scripts, etc)
- Read-only and read-write transactions with both manual and managed modes
- Nested buckets
- Iteration support including cursors with seek capability
- Supports registration of backend databases
- Comprehensive test coverage
Database
--------
This defines the interface of a database that can begin transactions and close itself.
Transaction
-----------
This defines the interface of a generic kaspad database transaction.
Note: Transactions provide data consistency over the state of the database as it was
when the transaction started. There is NO guarantee that if one puts data into the
transaction then it will be available to get within the same transaction.
Cursor
------
This iterates over database entries given some bucket.

51
database/bucket.go Normal file
View File

@@ -0,0 +1,51 @@
package database
import "bytes"
var separator = []byte("/")
// Bucket is a helper type meant to combine buckets,
// sub-buckets, and keys into a single full key-value
// database key.
type Bucket struct {
path [][]byte
}
// MakeBucket creates a new Bucket using the given path
// of buckets.
func MakeBucket(path ...[]byte) *Bucket {
return &Bucket{path: path}
}
// Bucket returns the sub-bucket of the current bucket
// defined by bucketBytes.
func (b *Bucket) Bucket(bucketBytes []byte) *Bucket {
newPath := make([][]byte, len(b.path)+1)
copy(newPath, b.path)
copy(newPath[len(b.path):], [][]byte{bucketBytes})
return MakeBucket(newPath...)
}
// Key returns the key inside of the current bucket.
func (b *Bucket) Key(key []byte) []byte {
bucketPath := b.Path()
fullKeyLength := len(bucketPath) + len(key)
fullKey := make([]byte, fullKeyLength)
copy(fullKey, bucketPath)
copy(fullKey[len(bucketPath):], key)
return fullKey
}
// Path returns the full path of the current bucket.
func (b *Bucket) Path() []byte {
bucketPath := bytes.Join(b.path, separator)
bucketPathWithFinalSeparator := make([]byte, len(bucketPath)+len(separator))
copy(bucketPathWithFinalSeparator, bucketPath)
copy(bucketPathWithFinalSeparator[len(bucketPath):], separator)
return bucketPathWithFinalSeparator
}

69
database/bucket_test.go Normal file
View File

@@ -0,0 +1,69 @@
package database
import (
"reflect"
"testing"
)
func TestBucketPath(t *testing.T) {
tests := []struct {
bucketByteSlices [][]byte
expectedPath []byte
}{
{
bucketByteSlices: [][]byte{[]byte("hello")},
expectedPath: []byte("hello/"),
},
{
bucketByteSlices: [][]byte{[]byte("hello"), []byte("world")},
expectedPath: []byte("hello/world/"),
},
}
for _, test := range tests {
// Build a result using the MakeBucket function alone
resultKey := MakeBucket(test.bucketByteSlices...).Path()
if !reflect.DeepEqual(resultKey, test.expectedPath) {
t.Errorf("TestBucketPath: got wrong path using MakeBucket. "+
"Want: %s, got: %s", string(test.expectedPath), string(resultKey))
}
// Build a result using sub-Bucket calls
bucket := MakeBucket()
for _, bucketBytes := range test.bucketByteSlices {
bucket = bucket.Bucket(bucketBytes)
}
resultKey = bucket.Path()
if !reflect.DeepEqual(resultKey, test.expectedPath) {
t.Errorf("TestBucketPath: got wrong path using sub-Bucket "+
"calls. Want: %s, got: %s", string(test.expectedPath), string(resultKey))
}
}
}
func TestBucketKey(t *testing.T) {
tests := []struct {
bucketByteSlices [][]byte
key []byte
expectedKey []byte
}{
{
bucketByteSlices: [][]byte{[]byte("hello")},
key: []byte("test"),
expectedKey: []byte("hello/test"),
},
{
bucketByteSlices: [][]byte{[]byte("hello"), []byte("world")},
key: []byte("test"),
expectedKey: []byte("hello/world/test"),
},
}
for _, test := range tests {
resultKey := MakeBucket(test.bucketByteSlices...).Key(test.key)
if !reflect.DeepEqual(resultKey, test.expectedKey) {
t.Errorf("TestBucketKey: got wrong key. Want: %s, got: %s",
string(test.expectedKey), string(resultKey))
}
}
}

View File

@@ -1,62 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"encoding/hex"
"github.com/pkg/errors"
"time"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
// fetchBlockCmd defines the configuration options for the fetchblock command.
type fetchBlockCmd struct{}
var (
// fetchBlockCfg defines the configuration options for the command.
fetchBlockCfg = fetchBlockCmd{}
)
// Execute is the main entry point for the command. It's invoked by the parser.
func (cmd *fetchBlockCmd) Execute(args []string) error {
// Setup the global config options and ensure they are valid.
if err := setupGlobalConfig(); err != nil {
return err
}
if len(args) < 1 {
return errors.New("required block hash parameter not specified")
}
blockHash, err := daghash.NewHashFromStr(args[0])
if err != nil {
return err
}
// Load the block database.
db, err := loadBlockDB()
if err != nil {
return err
}
defer db.Close()
return db.View(func(dbTx database.Tx) error {
log.Infof("Fetching block %s", blockHash)
startTime := time.Now()
blockBytes, err := dbTx.FetchBlock(blockHash)
if err != nil {
return err
}
log.Infof("Loaded block in %s", time.Since(startTime))
log.Infof("Block Hex: %s", hex.EncodeToString(blockBytes))
return nil
})
}
// Usage overrides the usage display for the command.
func (cmd *fetchBlockCmd) Usage() string {
return "<block-hash>"
}

View File

@@ -1,90 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"encoding/hex"
"github.com/pkg/errors"
"strconv"
"time"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
)
// blockRegionCmd defines the configuration options for the fetchblockregion
// command.
type blockRegionCmd struct{}
var (
// blockRegionCfg defines the configuration options for the command.
blockRegionCfg = blockRegionCmd{}
)
// Execute is the main entry point for the command. It's invoked by the parser.
func (cmd *blockRegionCmd) Execute(args []string) error {
// Setup the global config options and ensure they are valid.
if err := setupGlobalConfig(); err != nil {
return err
}
// Ensure expected arguments.
if len(args) < 1 {
return errors.New("required block hash parameter not specified")
}
if len(args) < 2 {
return errors.New("required start offset parameter not " +
"specified")
}
if len(args) < 3 {
return errors.New("required region length parameter not " +
"specified")
}
// Parse arguments.
blockHash, err := daghash.NewHashFromStr(args[0])
if err != nil {
return err
}
startOffset, err := strconv.ParseUint(args[1], 10, 32)
if err != nil {
return err
}
regionLen, err := strconv.ParseUint(args[2], 10, 32)
if err != nil {
return err
}
// Load the block database.
db, err := loadBlockDB()
if err != nil {
return err
}
defer db.Close()
return db.View(func(dbTx database.Tx) error {
log.Infof("Fetching block region %s<%d:%d>", blockHash,
startOffset, startOffset+regionLen-1)
region := database.BlockRegion{
Hash: blockHash,
Offset: uint32(startOffset),
Len: uint32(regionLen),
}
startTime := time.Now()
regionBytes, err := dbTx.FetchBlockRegion(&region)
if err != nil {
return err
}
log.Infof("Loaded block region in %s", time.Since(startTime))
log.Infof("Double Hash: %s", daghash.DoubleHashH(regionBytes))
log.Infof("Region Hex: %s", hex.EncodeToString(regionBytes))
return nil
})
}
// Usage overrides the usage display for the command.
func (cmd *blockRegionCmd) Usage() string {
return "<block-hash> <start-offset> <length-of-region>"
}

View File

@@ -1,111 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"github.com/pkg/errors"
"os"
"path/filepath"
"strings"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
)
var (
kaspadHomeDir = util.AppDataDir("kaspad", false)
knownDbTypes = database.SupportedDrivers()
activeNetParams = &dagconfig.MainnetParams
// Default global config.
cfg = &config{
DataDir: filepath.Join(kaspadHomeDir, "data"),
DbType: "ffldb",
}
)
// config defines the global configuration options.
type config struct {
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
DbType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
Testnet bool `long:"testnet" description:"Use the test network"`
RegressionTest bool `long:"regtest" description:"Use the regression test network"`
Simnet bool `long:"simnet" description:"Use the simulation test network"`
Devnet bool `long:"devnet" description:"Use the development test network"`
}
// fileExists reports whether the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// validDbType returns whether or not dbType is a supported database type.
func validDbType(dbType string) bool {
for _, knownType := range knownDbTypes {
if dbType == knownType {
return true
}
}
return false
}
// setupGlobalConfig examine the global configuration options for any conditions
// which are invalid as well as performs any addition setup necessary after the
// initial parse.
func setupGlobalConfig() error {
// Multiple networks can't be selected simultaneously.
// Count number of network flags passed; assign active network params
// while we're at it
numNets := 0
if cfg.Testnet {
numNets++
activeNetParams = &dagconfig.TestnetParams
}
if cfg.RegressionTest {
numNets++
activeNetParams = &dagconfig.RegressionNetParams
}
if cfg.Simnet {
numNets++
activeNetParams = &dagconfig.SimnetParams
}
if cfg.Devnet {
numNets++
activeNetParams = &dagconfig.DevnetParams
}
if numNets > 1 {
return errors.New("The testnet, regtest, simnet and devnet params " +
"can't be used together -- choose one of the four")
}
if numNets == 0 {
return errors.New("Mainnet has not launched yet, use --testnet to run in testnet mode")
}
// Validate database type.
if !validDbType(cfg.DbType) {
str := "The specified database type [%s] is invalid -- " +
"supported types: %s"
return errors.Errorf(str, cfg.DbType, strings.Join(knownDbTypes, ", "))
}
// Append the network type to the data directory so it is "namespaced"
// per network. In addition to the block database, there are other
// pieces of data that are saved to disk such as address manager state.
// All data is specific to a network, so namespacing the data directory
// means each individual piece of serialized data does not have to
// worry about changing names per network and such.
cfg.DataDir = filepath.Join(cfg.DataDir, activeNetParams.Name)
return nil
}

View File

@@ -1,113 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"github.com/kaspanet/kaspad/util/panics"
"github.com/pkg/errors"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/logs"
)
const (
// blockDbNamePrefix is the prefix for the kaspad block database.
blockDbNamePrefix = "blocks"
)
var (
log *logs.Logger
spawn func(func())
shutdownChannel = make(chan error)
)
// loadBlockDB opens the block database and returns a handle to it.
func loadBlockDB() (database.DB, error) {
// The database name is based on the database type.
dbName := blockDbNamePrefix + "_" + cfg.DbType
dbPath := filepath.Join(cfg.DataDir, dbName)
log.Infof("Loading block database from '%s'", dbPath)
db, err := database.Open(cfg.DbType, dbPath, activeNetParams.Net)
if err != nil {
// Return the error if it's not because the database doesn't
// exist.
var dbErr database.Error
if ok := errors.As(err, &dbErr); !ok || dbErr.ErrorCode !=
database.ErrDbDoesNotExist {
return nil, err
}
// Create the db if it does not exist.
err = os.MkdirAll(cfg.DataDir, 0700)
if err != nil {
return nil, err
}
db, err = database.Create(cfg.DbType, dbPath, activeNetParams.Net)
if err != nil {
return nil, err
}
}
log.Info("Block database loaded")
return db, nil
}
// realMain is the real main function for the utility. It is necessary to work
// around the fact that deferred functions do not run when os.Exit() is called.
func realMain() error {
// Setup logging.
backendLogger := logs.NewBackend()
defer os.Stdout.Sync()
log = backendLogger.Logger("MAIN")
spawn = panics.GoroutineWrapperFunc(log)
dbLog, _ := logger.Get(logger.SubsystemTags.KSDB)
dbLog.SetLevel(logs.LevelDebug)
// Setup the parser options and commands.
appName := filepath.Base(os.Args[0])
appName = strings.TrimSuffix(appName, filepath.Ext(appName))
parserFlags := flags.Options(flags.HelpFlag | flags.PassDoubleDash)
parser := flags.NewNamedParser(appName, parserFlags)
parser.AddGroup("Global Options", "", cfg)
parser.AddCommand("fetchblock",
"Fetch the specific block hash from the database", "",
&fetchBlockCfg)
parser.AddCommand("fetchblockregion",
"Fetch the specified block region from the database", "",
&blockRegionCfg)
// Parse command line and invoke the Execute function for the specified
// command.
if _, err := parser.Parse(); err != nil {
var flagsErr *flags.Error
if ok := errors.As(err, &flagsErr); ok && flagsErr.Type == flags.ErrHelp {
parser.WriteHelp(os.Stderr)
} else {
log.Error(err)
}
return err
}
return nil
}
func main() {
// Use all processor cores.
runtime.GOMAXPROCS(runtime.NumCPU())
// Work around defer not working after os.Exit()
if err := realMain(); err != nil {
os.Exit(1)
}
}

View File

@@ -1,82 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"os"
"os/signal"
)
// interruptChannel is used to receive SIGINT (Ctrl+C) signals.
var interruptChannel chan os.Signal
// addHandlerChannel is used to add an interrupt handler to the list of handlers
// to be invoked on SIGINT (Ctrl+C) signals.
var addHandlerChannel = make(chan func())
// mainInterruptHandler listens for SIGINT (Ctrl+C) signals on the
// interruptChannel and invokes the registered interruptCallbacks accordingly.
// It also listens for callback registration. It must be run as a goroutine.
func mainInterruptHandler() {
// interruptCallbacks is a list of callbacks to invoke when a
// SIGINT (Ctrl+C) is received.
var interruptCallbacks []func()
// isShutdown is a flag which is used to indicate whether or not
// the shutdown signal has already been received and hence any future
// attempts to add a new interrupt handler should invoke them
// immediately.
var isShutdown bool
for {
select {
case <-interruptChannel:
// Ignore more than one shutdown signal.
if isShutdown {
log.Infof("Received SIGINT (Ctrl+C). " +
"Already shutting down...")
continue
}
isShutdown = true
log.Infof("Received SIGINT (Ctrl+C). Shutting down...")
// Run handlers in LIFO order.
for i := range interruptCallbacks {
idx := len(interruptCallbacks) - 1 - i
callback := interruptCallbacks[idx]
callback()
}
// Signal the main goroutine to shutdown.
spawn(func() {
shutdownChannel <- nil
})
case handler := <-addHandlerChannel:
// The shutdown signal has already been received, so
// just invoke and new handlers immediately.
if isShutdown {
handler()
}
interruptCallbacks = append(interruptCallbacks, handler)
}
}
}
// addInterruptHandler adds a handler to call when a SIGINT (Ctrl+C) is
// received.
func addInterruptHandler(handler func()) {
// Create the channel and start the main interrupt handler which invokes
// all other callbacks and exits if not already done.
if interruptChannel == nil {
interruptChannel = make(chan os.Signal, 1)
signal.Notify(interruptChannel, os.Interrupt)
spawn(mainInterruptHandler)
}
addHandlerChannel <- handler
}

31
database/cursor.go Normal file
View File

@@ -0,0 +1,31 @@
package database
// Cursor iterates over database entries given some bucket.
type Cursor interface {
// Next moves the iterator to the next key/value pair. It returns whether the
// iterator is exhausted. Returns false if the cursor is closed.
Next() bool
// First moves the iterator to the first key/value pair. It returns false if
// such a pair does not exist or if the cursor is closed.
First() bool
// Seek moves the iterator to the first key/value pair whose key is greater
// than or equal to the given key. It returns ErrNotFound if such pair does not
// exist.
Seek(key []byte) error
// Key returns the key of the current key/value pair, or ErrNotFound if done.
// Note that the key is trimmed to not include the prefix the cursor was opened
// with. The caller should not modify the contents of the returned slice, and
// its contents may change on the next call to Next.
Key() ([]byte, error)
// Value returns the value of the current key/value pair, or ErrNotFound if done.
// The caller should not modify the contents of the returned slice, and its
// contents may change on the next call to Next.
Value() ([]byte, error)
// Close releases associated resources.
Close() error
}

36
database/dataaccessor.go Normal file
View File

@@ -0,0 +1,36 @@
package database
// DataAccessor defines the common interface by which data gets
// accessed in a generic kaspad database.
type DataAccessor interface {
// Put sets the value for the given key. It overwrites
// any previous value for that key.
Put(key []byte, value []byte) error
// Get gets the value for the given key. It returns
// ErrNotFound if the given key does not exist.
Get(key []byte) ([]byte, error)
// Has returns true if the database does contains the
// given key.
Has(key []byte) (bool, error)
// Delete deletes the value for the given key. Will not
// return an error if the key doesn't exist.
Delete(key []byte) error
// AppendToStore appends the given data to the store
// defined by storeName. This function returns a serialized
// location handle that's meant to be stored and later used
// when querying the data that has just now been inserted.
AppendToStore(storeName string, data []byte) ([]byte, error)
// RetrieveFromStore retrieves data from the store defined by
// storeName using the given serialized location handle. It
// returns ErrNotFound if the location does not exist. See
// AppendToStore for further details.
RetrieveFromStore(storeName string, location []byte) ([]byte, error)
// Cursor begins a new cursor over the given bucket.
Cursor(bucket []byte) (Cursor, error)
}

19
database/database.go Normal file
View File

@@ -0,0 +1,19 @@
package database
// Database defines the interface of a database that can begin
// transactions and close itself.
//
// Important: This is not part of the DataAccessor interface
// because the Transaction interface includes it. Were we to
// merge Database with DataAccessor, implementors of the
// Transaction interface would be forced to implement methods
// such as Begin and Close, which is undesirable.
type Database interface {
DataAccessor
// Begin begins a new database transaction.
Begin() (Transaction, error)
// Close closes the database.
Close() error
}

View File

@@ -1,85 +1,34 @@
/*
Package database provides a block and metadata storage database.
Package database provides a database for kaspad.
Overview
This package provides a database layer to store and retrieve this data in a
simple and efficient manner.
This package provides a database layer to store and retrieve data in a simple
and efficient manner.
The default backend, ffldb, has a strong focus on speed, efficiency, and
robustness. It makes use leveldb for the metadata, flat files for block
storage, and strict checksums in key areas to ensure data integrity.
The current backend is ffldb, which makes use of leveldb, flat files, and strict
checksums in key areas to ensure data integrity.
A quick overview of the features database provides are as follows:
Implementors of additional backends are required to implement the following interfaces:
- Key/value metadata store
- Kaspa block storage
- Efficient retrieval of block headers and regions (transactions, scripts, etc)
- Read-only and read-write transactions with both manual and managed modes
- Nested buckets
- Supports registration of backend databases
- Comprehensive test coverage
DataAccessor
This defines the common interface by which data gets accessed in a generic kaspad
database. Both the Database and the Transaction interfaces (see below) implement it.
Database
The main entry point is the DB interface. It exposes functionality for
transactional-based access and storage of metadata and block data. It is
obtained via the Create and Open functions which take a database type string
that identifies the specific database driver (backend) to use as well as
arguments specific to the specified driver.
This defines the interface of a database that can begin transactions and close itself.
The interface provides facilities for obtaining transactions (the Tx interface)
that are the basis of all database reads and writes. Unlike some database
interfaces that support reading and writing without transactions, this interface
requires transactions even when only reading or writing a single key.
Transaction
The Begin function provides an unmanaged transaction while the View and Update
functions provide a managed transaction. These are described in more detail
below.
This defines the interface of a generic kaspad database transaction.
Note: transactions provide data consistency over the state of the database as it was
when the transaction started. There is NO guarantee that if one puts data into the
transaction then it will be available to get within the same transaction.
Transactions
Cursor
The Tx interface provides facilities for rolling back or committing changes that
took place while the transaction was active. It also provides the root metadata
bucket under which all keys, values, and nested buckets are stored. A
transaction can either be read-only or read-write and managed or unmanaged.
Managed versus Unmanaged Transactions
A managed transaction is one where the caller provides a function to execute
within the context of the transaction and the commit or rollback is handled
automatically depending on whether or not the provided function returns an
error. Attempting to manually call Rollback or Commit on the managed
transaction will result in a panic.
An unmanaged transaction, on the other hand, requires the caller to manually
call Commit or Rollback when they are finished with it. Leaving transactions
open for long periods of time can have several adverse effects, so it is
recommended that managed transactions are used instead.
Buckets
The Bucket interface provides the ability to manipulate key/value pairs and
nested buckets as well as iterate through them.
The Get, Put, and Delete functions work with key/value pairs, while the Bucket,
CreateBucket, CreateBucketIfNotExists, and DeleteBucket functions work with
buckets. The ForEach function allows the caller to provide a function to be
called with each key/value pair and nested bucket in the current bucket.
Metadata Bucket
As discussed above, all of the functions which are used to manipulate key/value
pairs and nested buckets exist on the Bucket interface. The root metadata
bucket is the upper-most bucket in which data is stored and is created at the
same time as the database. Use the Metadata function on the Tx interface
to retrieve it.
Nested Buckets
The CreateBucket and CreateBucketIfNotExists functions on the Bucket interface
provide the ability to create an arbitrary number of nested buckets. It is
a good idea to avoid a lot of buckets with little data in them as it could lead
to poor page utilization depending on the specific driver in use.
This iterates over database entries given some bucket.
*/
package database

View File

@@ -1,84 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database
import (
"fmt"
)
// Driver defines a structure for backend drivers to use when they registered
// themselves as a backend which implements the DB interface.
type Driver struct {
// DbType is the identifier used to uniquely identify a specific
// database driver. There can be only one driver with the same name.
DbType string
// Create is the function that will be invoked with all user-specified
// arguments to create the database. This function must return
// ErrDbExists if the database already exists.
Create func(args ...interface{}) (DB, error)
// Open is the function that will be invoked with all user-specified
// arguments to open the database. This function must return
// ErrDbDoesNotExist if the database has not already been created.
Open func(args ...interface{}) (DB, error)
}
// driverList holds all of the registered database backends.
var drivers = make(map[string]*Driver)
// RegisterDriver adds a backend database driver to available interfaces.
// ErrDbTypeRegistered will be returned if the database type for the driver has
// already been registered.
func RegisterDriver(driver Driver) error {
if _, exists := drivers[driver.DbType]; exists {
str := fmt.Sprintf("driver %q is already registered",
driver.DbType)
return makeError(ErrDbTypeRegistered, str, nil)
}
drivers[driver.DbType] = &driver
return nil
}
// SupportedDrivers returns a slice of strings that represent the database
// drivers that have been registered and are therefore supported.
func SupportedDrivers() []string {
supportedDBs := make([]string, 0, len(drivers))
for _, drv := range drivers {
supportedDBs = append(supportedDBs, drv.DbType)
}
return supportedDBs
}
// Create initializes and opens a database for the specified type. The
// arguments are specific to the database type driver. See the documentation
// for the database driver for further details.
//
// ErrDbUnknownType will be returned if the the database type is not registered.
func Create(dbType string, args ...interface{}) (DB, error) {
drv, exists := drivers[dbType]
if !exists {
str := fmt.Sprintf("driver %q is not registered", dbType)
return nil, makeError(ErrDbUnknownType, str, nil)
}
return drv.Create(args...)
}
// Open opens an existing database for the specified type. The arguments are
// specific to the database type driver. See the documentation for the database
// driver for further details.
//
// ErrDbUnknownType will be returned if the the database type is not registered.
func Open(dbType string, args ...interface{}) (DB, error) {
drv, exists := drivers[dbType]
if !exists {
str := fmt.Sprintf("driver %q is not registered", dbType)
return nil, makeError(ErrDbUnknownType, str, nil)
}
return drv.Open(args...)
}

View File

@@ -1,128 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database_test
import (
"github.com/pkg/errors"
"testing"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
)
// checkDbError ensures the passed error is a database.Error with an error code
// that matches the passed error code.
func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool {
dbErr, ok := gotErr.(database.Error)
if !ok {
t.Errorf("%s: unexpected error type - got %T, want %T",
testName, gotErr, database.Error{})
return false
}
if dbErr.ErrorCode != wantErrCode {
t.Errorf("%s: unexpected error code - got %s (%s), want %s",
testName, dbErr.ErrorCode, dbErr.Description,
wantErrCode)
return false
}
return true
}
// TestAddDuplicateDriver ensures that adding a duplicate driver does not
// overwrite an existing one.
func TestAddDuplicateDriver(t *testing.T) {
supportedDrivers := database.SupportedDrivers()
if len(supportedDrivers) == 0 {
t.Errorf("no backends to test")
return
}
dbType := supportedDrivers[0]
// bogusCreateDB is a function which acts as a bogus create and open
// driver function and intentionally returns a failure that can be
// detected if the interface allows a duplicate driver to overwrite an
// existing one.
bogusCreateDB := func(args ...interface{}) (database.DB, error) {
return nil, errors.Errorf("duplicate driver allowed for database "+
"type [%v]", dbType)
}
// Create a driver that tries to replace an existing one. Set its
// create and open functions to a function that causes a test failure if
// they are invoked.
driver := database.Driver{
DbType: dbType,
Create: bogusCreateDB,
Open: bogusCreateDB,
}
testName := "duplicate driver registration"
err := database.RegisterDriver(driver)
if !checkDbError(t, testName, err, database.ErrDbTypeRegistered) {
return
}
}
// TestCreateOpenFail ensures that errors which occur while opening or closing
// a database are handled properly.
func TestCreateOpenFail(t *testing.T) {
// bogusCreateDB is a function which acts as a bogus create and open
// driver function that intentionally returns a failure which can be
// detected.
dbType := "createopenfail"
openError := errors.Errorf("failed to create or open database for "+
"database type [%v]", dbType)
bogusCreateDB := func(args ...interface{}) (database.DB, error) {
return nil, openError
}
// Create and add driver that intentionally fails when created or opened
// to ensure errors on database open and create are handled properly.
driver := database.Driver{
DbType: dbType,
Create: bogusCreateDB,
Open: bogusCreateDB,
}
database.RegisterDriver(driver)
// Ensure creating a database with the new type fails with the expected
// error.
_, err := database.Create(dbType)
if err != openError {
t.Errorf("expected error not received - got: %v, want %v", err,
openError)
return
}
// Ensure opening a database with the new type fails with the expected
// error.
_, err = database.Open(dbType)
if err != openError {
t.Errorf("expected error not received - got: %v, want %v", err,
openError)
return
}
}
// TestCreateOpenUnsupported ensures that attempting to create or open an
// unsupported database type is handled properly.
func TestCreateOpenUnsupported(t *testing.T) {
// Ensure creating a database with an unsupported type fails with the
// expected error.
testName := "create with unsupported database type"
dbType := "unsupported"
_, err := database.Create(dbType)
if !checkDbError(t, testName, err, database.ErrDbUnknownType) {
return
}
// Ensure opening a database with the an unsupported type fails with the
// expected error.
testName = "open with unsupported database type"
_, err = database.Open(dbType)
if !checkDbError(t, testName, err, database.ErrDbUnknownType) {
return
}
}

View File

@@ -1,211 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database
import (
"fmt"
"github.com/pkg/errors"
)
// ErrorCode identifies a kind of error.
type ErrorCode int
// These constants are used to identify a specific database Error.
const (
// **************************************
// Errors related to driver registration.
// **************************************
// ErrDbTypeRegistered indicates two different database drivers
// attempt to register with the name database type.
ErrDbTypeRegistered ErrorCode = iota
// *************************************
// Errors related to database functions.
// *************************************
// ErrDbUnknownType indicates there is no driver registered for
// the specified database type.
ErrDbUnknownType
// ErrDbDoesNotExist indicates open is called for a database that
// does not exist.
ErrDbDoesNotExist
// ErrDbExists indicates create is called for a database that
// already exists.
ErrDbExists
// ErrDbNotOpen indicates a database instance is accessed before
// it is opened or after it is closed.
ErrDbNotOpen
// ErrDbAlreadyOpen indicates open was called on a database that
// is already open.
ErrDbAlreadyOpen
// ErrInvalid indicates the specified database is not valid.
ErrInvalid
// ErrCorruption indicates a checksum failure occurred which invariably
// means the database is corrupt.
ErrCorruption
// ****************************************
// Errors related to database transactions.
// ****************************************
// ErrTxClosed indicates an attempt was made to commit or rollback a
// transaction that has already had one of those operations performed.
ErrTxClosed
// ErrTxNotWritable indicates an operation that requires write access to
// the database was attempted against a read-only transaction.
ErrTxNotWritable
// **************************************
// Errors related to metadata operations.
// **************************************
// ErrBucketNotFound indicates an attempt to access a bucket that has
// not been created yet.
ErrBucketNotFound
// ErrBucketExists indicates an attempt to create a bucket that already
// exists.
ErrBucketExists
// ErrBucketNameRequired indicates an attempt to create a bucket with a
// blank name.
ErrBucketNameRequired
// ErrKeyRequired indicates at attempt to insert a zero-length key.
ErrKeyRequired
// ErrKeyTooLarge indicates an attmempt to insert a key that is larger
// than the max allowed key size. The max key size depends on the
// specific backend driver being used. As a general rule, key sizes
// should be relatively, so this should rarely be an issue.
ErrKeyTooLarge
// ErrValueTooLarge indicates an attmpt to insert a value that is larger
// than max allowed value size. The max key size depends on the
// specific backend driver being used.
ErrValueTooLarge
// ErrIncompatibleValue indicates the value in question is invalid for
// the specific requested operation. For example, trying create or
// delete a bucket with an existing non-bucket key, attempting to create
// or delete a non-bucket key with an existing bucket key, or trying to
// delete a value via a cursor when it points to a nested bucket.
ErrIncompatibleValue
// ***************************************
// Errors related to block I/O operations.
// ***************************************
// ErrBlockNotFound indicates a block with the provided hash does not
// exist in the database.
ErrBlockNotFound
// ErrBlockExists indicates a block with the provided hash already
// exists in the database.
ErrBlockExists
// ErrBlockRegionInvalid indicates a region that exceeds the bounds of
// the specified block was requested. When the hash provided by the
// region does not correspond to an existing block, the error will be
// ErrBlockNotFound instead.
ErrBlockRegionInvalid
// ***********************************
// Support for driver-specific errors.
// ***********************************
// ErrDriverSpecific indicates the Err field is a driver-specific error.
// This provides a mechanism for drivers to plug-in their own custom
// errors for any situations which aren't already covered by the error
// codes provided by this package.
ErrDriverSpecific
// numErrorCodes is the maximum error code number used in tests.
numErrorCodes
)
// Map of ErrorCode values back to their constant names for pretty printing.
var errorCodeStrings = map[ErrorCode]string{
ErrDbTypeRegistered: "ErrDbTypeRegistered",
ErrDbUnknownType: "ErrDbUnknownType",
ErrDbDoesNotExist: "ErrDbDoesNotExist",
ErrDbExists: "ErrDbExists",
ErrDbNotOpen: "ErrDbNotOpen",
ErrDbAlreadyOpen: "ErrDbAlreadyOpen",
ErrInvalid: "ErrInvalid",
ErrCorruption: "ErrCorruption",
ErrTxClosed: "ErrTxClosed",
ErrTxNotWritable: "ErrTxNotWritable",
ErrBucketNotFound: "ErrBucketNotFound",
ErrBucketExists: "ErrBucketExists",
ErrBucketNameRequired: "ErrBucketNameRequired",
ErrKeyRequired: "ErrKeyRequired",
ErrKeyTooLarge: "ErrKeyTooLarge",
ErrValueTooLarge: "ErrValueTooLarge",
ErrIncompatibleValue: "ErrIncompatibleValue",
ErrBlockNotFound: "ErrBlockNotFound",
ErrBlockExists: "ErrBlockExists",
ErrBlockRegionInvalid: "ErrBlockRegionInvalid",
ErrDriverSpecific: "ErrDriverSpecific",
}
// String returns the ErrorCode as a human-readable name.
func (e ErrorCode) String() string {
if s := errorCodeStrings[e]; s != "" {
return s
}
return fmt.Sprintf("Unknown ErrorCode (%d)", int(e))
}
// Error provides a single type for errors that can happen during database
// operation. It is used to indicate several types of failures including errors
// with caller requests such as specifying invalid block regions or attempting
// to access data against closed database transactions, driver errors, errors
// retrieving data, and errors communicating with database servers.
//
// The caller can use type assertions to determine if an error is an Error and
// access the ErrorCode field to ascertain the specific reason for the failure.
//
// The ErrDriverSpecific error code will also have the Err field set with the
// underlying error. Depending on the backend driver, the Err field might be
// set to the underlying error for other error codes as well.
type Error struct {
ErrorCode ErrorCode // Describes the kind of error
Description string // Human readable description of the issue
Err error // Underlying error
}
// Error satisfies the error interface and prints human-readable errors.
func (e Error) Error() string {
if e.Err != nil {
return e.Description + ": " + e.Err.Error()
}
return e.Description
}
// makeError creates an Error given a set of arguments. The error code must
// be one of the error codes provided by this package.
func makeError(c ErrorCode, desc string, err error) Error {
return Error{ErrorCode: c, Description: desc, Err: err}
}
// IsErrorCode returns whether or not the provided error is a script error with
// the provided error code.
func IsErrorCode(err error, c ErrorCode) bool {
var errError Error
if ok := errors.As(err, &errError); ok {
return errError.ErrorCode == c
}
return false
}

View File

@@ -1,118 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database
import (
"github.com/pkg/errors"
"testing"
)
// TestErrorCodeStringer tests the stringized output for the ErrorCode type.
func TestErrorCodeStringer(t *testing.T) {
tests := []struct {
in ErrorCode
want string
}{
{ErrDbTypeRegistered, "ErrDbTypeRegistered"},
{ErrDbUnknownType, "ErrDbUnknownType"},
{ErrDbDoesNotExist, "ErrDbDoesNotExist"},
{ErrDbExists, "ErrDbExists"},
{ErrDbNotOpen, "ErrDbNotOpen"},
{ErrDbAlreadyOpen, "ErrDbAlreadyOpen"},
{ErrInvalid, "ErrInvalid"},
{ErrCorruption, "ErrCorruption"},
{ErrTxClosed, "ErrTxClosed"},
{ErrTxNotWritable, "ErrTxNotWritable"},
{ErrBucketNotFound, "ErrBucketNotFound"},
{ErrBucketExists, "ErrBucketExists"},
{ErrBucketNameRequired, "ErrBucketNameRequired"},
{ErrKeyRequired, "ErrKeyRequired"},
{ErrKeyTooLarge, "ErrKeyTooLarge"},
{ErrValueTooLarge, "ErrValueTooLarge"},
{ErrIncompatibleValue, "ErrIncompatibleValue"},
{ErrBlockNotFound, "ErrBlockNotFound"},
{ErrBlockExists, "ErrBlockExists"},
{ErrBlockRegionInvalid, "ErrBlockRegionInvalid"},
{ErrDriverSpecific, "ErrDriverSpecific"},
{0xffff, "Unknown ErrorCode (65535)"},
}
// Detect additional error codes that don't have the stringer added.
if len(tests)-1 != int(TstNumErrorCodes) {
t.Errorf("It appears an error code was added without adding " +
"an associated stringer test")
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.String()
if result != test.want {
t.Errorf("String #%d\ngot: %s\nwant: %s", i, result,
test.want)
continue
}
}
}
// TestError tests the error output for the Error type.
func TestError(t *testing.T) {
t.Parallel()
tests := []struct {
in Error
want string
}{
{
Error{Description: "some error"},
"some error",
},
{
Error{Description: "human-readable error"},
"human-readable error",
},
{
Error{
ErrorCode: ErrDriverSpecific,
Description: "some error",
Err: errors.New("driver-specific error"),
},
"some error: driver-specific error",
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.in.Error()
if result != test.want {
t.Errorf("Error #%d\n got: %s want: %s", i, result,
test.want)
continue
}
}
}
func TestIsErrorCode(t *testing.T) {
dummyError := errors.New("")
tests := []struct {
err error
code ErrorCode
expectedResult bool
}{
{makeError(ErrBucketExists, "", dummyError), ErrBucketExists, true},
{makeError(ErrBucketExists, "", dummyError), ErrBlockExists, false},
{dummyError, ErrBlockExists, false},
{nil, ErrBlockExists, false},
}
for i, test := range tests {
actualResult := IsErrorCode(test.err, test.code)
if test.expectedResult != actualResult {
t.Errorf("TestIsErrorCode: %d: Expected: %t, but got: %t",
i, test.expectedResult, actualResult)
}
}
}

12
database/errors.go Normal file
View File

@@ -0,0 +1,12 @@
package database
import "errors"
// ErrNotFound denotes that the requested item was not
// found in the database.
var ErrNotFound = errors.New("not found")
// IsNotFoundError checks whether an error is an ErrNotFound.
func IsNotFoundError(err error) bool {
return errors.Is(err, ErrNotFound)
}

View File

@@ -1,180 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database_test
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"os"
"path/filepath"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
)
// This example demonstrates creating a new database.
func ExampleCreate() {
// This example assumes the ffldb driver is imported.
//
// import (
// "github.com/kaspanet/kaspad/database"
// _ "github.com/kaspanet/kaspad/database/ffldb"
// )
// Create a database and schedule it to be closed and removed on exit.
// Typically you wouldn't want to remove the database right away like
// this, nor put it in the temp directory, but it's done here to ensure
// the example cleans up after itself.
dbPath := filepath.Join(os.TempDir(), "examplecreate")
db, err := database.Create("ffldb", dbPath, wire.Mainnet)
if err != nil {
fmt.Println(err)
return
}
defer os.RemoveAll(dbPath)
defer db.Close()
// Output:
}
// This example demonstrates creating a new database and using a managed
// read-write transaction to store and retrieve metadata.
func Example_basicUsage() {
// This example assumes the ffldb driver is imported.
//
// import (
// "github.com/kaspanet/kaspad/database"
// _ "github.com/kaspanet/kaspad/database/ffldb"
// )
// Create a database and schedule it to be closed and removed on exit.
// Typically you wouldn't want to remove the database right away like
// this, nor put it in the temp directory, but it's done here to ensure
// the example cleans up after itself.
dbPath := filepath.Join(os.TempDir(), "exampleusage")
// ensure that DB does not exist before test starts
os.RemoveAll(dbPath)
db, err := database.Create("ffldb", dbPath, wire.Mainnet)
if err != nil {
fmt.Println(err)
return
}
defer os.RemoveAll(dbPath)
defer db.Close()
// Use the Update function of the database to perform a managed
// read-write transaction. The transaction will automatically be rolled
// back if the supplied inner function returns a non-nil error.
err = db.Update(func(dbTx database.Tx) error {
// Store a key/value pair directly in the metadata bucket.
// Typically a nested bucket would be used for a given feature,
// but this example is using the metadata bucket directly for
// simplicity.
key := []byte("mykey")
value := []byte("myvalue")
if err := dbTx.Metadata().Put(key, value); err != nil {
return err
}
// Read the key back and ensure it matches.
if !bytes.Equal(dbTx.Metadata().Get(key), value) {
return errors.Errorf("unexpected value for key '%s'", key)
}
// Create a new nested bucket under the metadata bucket.
nestedBucketKey := []byte("mybucket")
nestedBucket, err := dbTx.Metadata().CreateBucket(nestedBucketKey)
if err != nil {
return err
}
// The key from above that was set in the metadata bucket does
// not exist in this new nested bucket.
if nestedBucket.Get(key) != nil {
return errors.Errorf("key '%s' is not expected nil", key)
}
return nil
})
if err != nil {
fmt.Println(err)
return
}
// Output:
}
// This example demonstrates creating a new database, using a managed read-write
// transaction to store a block, and using a managed read-only transaction to
// fetch the block.
func Example_blockStorageAndRetrieval() {
// This example assumes the ffldb driver is imported.
//
// import (
// "github.com/kaspanet/kaspad/database"
// _ "github.com/kaspanet/kaspad/database/ffldb"
// )
// Create a database and schedule it to be closed and removed on exit.
// Typically you wouldn't want to remove the database right away like
// this, nor put it in the temp directory, but it's done here to ensure
// the example cleans up after itself.
dbPath := filepath.Join(os.TempDir(), "exampleblkstorage")
db, err := database.Create("ffldb", dbPath, wire.Mainnet)
if err != nil {
fmt.Println(err)
return
}
defer os.RemoveAll(dbPath)
defer db.Close()
// Use the Update function of the database to perform a managed
// read-write transaction and store a genesis block in the database as
// and example.
err = db.Update(func(dbTx database.Tx) error {
genesisBlock := dagconfig.MainnetParams.GenesisBlock
return dbTx.StoreBlock(util.NewBlock(genesisBlock))
})
if err != nil {
fmt.Println(err)
return
}
// Use the View function of the database to perform a managed read-only
// transaction and fetch the block stored above.
var loadedBlockBytes []byte
err = db.Update(func(dbTx database.Tx) error {
genesisHash := dagconfig.MainnetParams.GenesisHash
blockBytes, err := dbTx.FetchBlock(genesisHash)
if err != nil {
return err
}
// As documented, all data fetched from the database is only
// valid during a database transaction in order to support
// zero-copy backends. Thus, make a copy of the data so it
// can be used outside of the transaction.
loadedBlockBytes = make([]byte, len(blockBytes))
copy(loadedBlockBytes, blockBytes)
return nil
})
if err != nil {
fmt.Println(err)
return
}
// Typically at this point, the block could be deserialized via the
// wire.MsgBlock.Deserialize function or used in its serialized form
// depending on need. However, for this example, just display the
// number of serialized bytes to show it was loaded as expected.
fmt.Printf("Serialized block size: %d bytes\n", len(loadedBlockBytes))
// Output:
// Serialized block size: 280 bytes
}

View File

@@ -1,17 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
This test file is part of the database package rather than than the
database_test package so it can bridge access to the internals to properly test
cases which are either not possible or can't reliably be tested via the public
interface. The functions, constants, and variables are only exported while the
tests are being run.
*/
package database
// TstNumErrorCodes makes the internal numErrorCodes parameter available to the
// test package.
const TstNumErrorCodes = numErrorCodes

View File

@@ -1,34 +0,0 @@
ffldb
=====
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/)
[![GoDoc](https://godoc.org/github.com/kaspanet/kaspad/database/ffldb?status.png)](http://godoc.org/github.com/kaspanet/kaspad/database/ffldb)
=======
Package ffldb implements a driver for the database package that uses leveldb for
the backing metadata and flat files for block storage.
This driver is the recommended driver for use with kaspad. It makes use of leveldb
for the metadata, flat files for block storage, and checksums in key areas to
ensure data integrity.
## Usage
This package is a driver to the database package and provides the database type
of "ffldb". The parameters the Open and Create functions take are the
database path as a string and the block network.
```Go
db, err := database.Open("ffldb", "path/to/database", wire.Mainnet)
if err != nil {
// Handle error
}
```
```Go
db, err := database.Create("ffldb", "path/to/database", wire.Mainnet)
if err != nil {
// Handle error
}
```

View File

@@ -1,97 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ffldb
import (
"os"
"path/filepath"
"testing"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
)
// BenchmarkBlockHeader benchmarks how long it takes to load the mainnet genesis
// block header.
func BenchmarkBlockHeader(b *testing.B) {
// Start by creating a new database and populating it with the mainnet
// genesis block.
dbPath := filepath.Join(os.TempDir(), "ffldb-benchblkhdr")
_ = os.RemoveAll(dbPath)
db, err := database.Create("ffldb", dbPath, blockDataNet)
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(dbPath)
defer db.Close()
err = db.Update(func(dbTx database.Tx) error {
block := util.NewBlock(dagconfig.MainnetParams.GenesisBlock)
return dbTx.StoreBlock(block)
})
if err != nil {
b.Fatal(err)
}
b.ReportAllocs()
b.ResetTimer()
err = db.View(func(dbTx database.Tx) error {
blockHash := dagconfig.MainnetParams.GenesisHash
for i := 0; i < b.N; i++ {
_, err := dbTx.FetchBlockHeader(blockHash)
if err != nil {
return err
}
}
return nil
})
if err != nil {
b.Fatal(err)
}
// Don't benchmark teardown.
b.StopTimer()
}
// BenchmarkBlockHeader benchmarks how long it takes to load the mainnet genesis
// block.
func BenchmarkBlock(b *testing.B) {
// Start by creating a new database and populating it with the mainnet
// genesis block.
dbPath := filepath.Join(os.TempDir(), "ffldb-benchblk")
_ = os.RemoveAll(dbPath)
db, err := database.Create("ffldb", dbPath, blockDataNet)
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(dbPath)
defer db.Close()
err = db.Update(func(dbTx database.Tx) error {
block := util.NewBlock(dagconfig.MainnetParams.GenesisBlock)
return dbTx.StoreBlock(block)
})
if err != nil {
b.Fatal(err)
}
b.ReportAllocs()
b.ResetTimer()
err = db.View(func(dbTx database.Tx) error {
blockHash := dagconfig.MainnetParams.GenesisHash
for i := 0; i < b.N; i++ {
_, err := dbTx.FetchBlock(blockHash)
if err != nil {
return err
}
}
return nil
})
if err != nil {
b.Fatal(err)
}
// Don't benchmark teardown.
b.StopTimer()
}

View File

@@ -1,765 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// This file contains the implementation functions for reading, writing, and
// otherwise working with the flat files that house the actual blocks.
package ffldb
import (
"container/list"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
"hash/crc32"
"io"
"os"
"path/filepath"
"sync"
"syscall"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const (
// maxOpenFiles is the max number of open files to maintain in the
// open blocks cache. Note that this does not include the current
// write file, so there will typically be one more than this value open.
maxOpenFiles = 25
// maxBlockFileSize is the maximum size for each file used to store
// blocks.
//
// NOTE: The current code uses uint32 for all offsets, so this value
// must be less than 2^32 (4 GiB). This is also why it's a typed
// constant.
maxBlockFileSize uint32 = 512 * 1024 * 1024 // 512 MiB
)
var (
// castagnoli houses the Catagnoli polynomial used for CRC-32 checksums.
castagnoli = crc32.MakeTable(crc32.Castagnoli)
)
// filer is an interface which acts very similar to a *os.File and is typically
// implemented by it. It exists so the test code can provide mock files for
// properly testing corruption and file system issues.
type filer interface {
io.Closer
io.WriterAt
io.ReaderAt
Truncate(size int64) error
Sync() error
}
// lockableFile represents a block file on disk that has been opened for either
// read or read/write access. It also contains a read-write mutex to support
// multiple concurrent readers.
type lockableFile struct {
sync.RWMutex
file filer
}
// writeCursor represents the current file and offset of the block file on disk
// for performing all writes. It also contains a read-write mutex to support
// multiple concurrent readers which can reuse the file handle.
type writeCursor struct {
sync.RWMutex
// curFile is the current block file that will be appended to when
// writing new blocks.
curFile *lockableFile
// curFileNum is the current block file number and is used to allow
// readers to use the same open file handle.
curFileNum uint32
// curOffset is the offset in the current write block file where the
// next new block will be written.
curOffset uint32
}
// blockStore houses information used to handle reading and writing blocks (and
// part of blocks) into flat files with support for multiple concurrent readers.
type blockStore struct {
// network is the specific network to use in the flat files for each
// block.
network wire.KaspaNet
// basePath is the base path used for the flat block files and metadata.
basePath string
// maxBlockFileSize is the maximum size for each file used to store
// blocks. It is defined on the store so the whitebox tests can
// override the value.
maxBlockFileSize uint32
// maxOpenFiles is the max number of open files to maintain in the
// open blocks cache. Note that this does not include the current
// write file, so there will typically be one more than this value open.
// It is defined on the store so the whitebox tests can override the value.
maxOpenFiles int
// The following fields are related to the flat files which hold the
// actual blocks. The number of open files is limited by maxOpenFiles.
//
// obfMutex protects concurrent access to the openBlockFiles map. It is
// a RWMutex so multiple readers can simultaneously access open files.
//
// openBlockFiles houses the open file handles for existing block files
// which have been opened read-only along with an individual RWMutex.
// This scheme allows multiple concurrent readers to the same file while
// preventing the file from being closed out from under them.
//
// lruMutex protects concurrent access to the least recently used list
// and lookup map.
//
// openBlocksLRU tracks how the open files are refenced by pushing the
// most recently used files to the front of the list thereby trickling
// the least recently used files to end of the list. When a file needs
// to be closed due to exceeding the the max number of allowed open
// files, the one at the end of the list is closed.
//
// fileNumToLRUElem is a mapping between a specific block file number
// and the associated list element on the least recently used list.
//
// Thus, with the combination of these fields, the database supports
// concurrent non-blocking reads across multiple and individual files
// along with intelligently limiting the number of open file handles by
// closing the least recently used files as needed.
//
// NOTE: The locking order used throughout is well-defined and MUST be
// followed. Failure to do so could lead to deadlocks. In particular,
// the locking order is as follows:
// 1) obfMutex
// 2) lruMutex
// 3) writeCursor mutex
// 4) specific file mutexes
//
// None of the mutexes are required to be locked at the same time, and
// often aren't. However, if they are to be locked simultaneously, they
// MUST be locked in the order previously specified.
//
// Due to the high performance and multi-read concurrency requirements,
// write locks should only be held for the minimum time necessary.
obfMutex sync.RWMutex
lruMutex sync.Mutex
openBlocksLRU *list.List // Contains uint32 block file numbers.
fileNumToLRUElem map[uint32]*list.Element
openBlockFiles map[uint32]*lockableFile
// writeCursor houses the state for the current file and location that
// new blocks are written to.
writeCursor *writeCursor
// These functions are set to openFile, openWriteFile, and deleteFile by
// default, but are exposed here to allow the whitebox tests to replace
// them when working with mock files.
openFileFunc func(fileNum uint32) (*lockableFile, error)
openWriteFileFunc func(fileNum uint32) (filer, error)
deleteFileFunc func(fileNum uint32) error
}
// blockLocation identifies a particular block file and location.
type blockLocation struct {
blockFileNum uint32
fileOffset uint32
blockLen uint32
}
// deserializeBlockLoc deserializes the passed serialized block location
// information. This is data stored into the block index metadata for each
// block. The serialized data passed to this function MUST be at least
// blockLocSize bytes or it will panic. The error check is avoided here because
// this information will always be coming from the block index which includes a
// checksum to detect corruption. Thus it is safe to use this unchecked here.
func deserializeBlockLoc(serializedLoc []byte) blockLocation {
// The serialized block location format is:
//
// [0:4] Block file (4 bytes)
// [4:8] File offset (4 bytes)
// [8:12] Block length (4 bytes)
return blockLocation{
blockFileNum: byteOrder.Uint32(serializedLoc[0:4]),
fileOffset: byteOrder.Uint32(serializedLoc[4:8]),
blockLen: byteOrder.Uint32(serializedLoc[8:12]),
}
}
// serializeBlockLoc returns the serialization of the passed block location.
// This is data to be stored into the block index metadata for each block.
func serializeBlockLoc(loc blockLocation) []byte {
// The serialized block location format is:
//
// [0:4] Block file (4 bytes)
// [4:8] File offset (4 bytes)
// [8:12] Block length (4 bytes)
var serializedData [12]byte
byteOrder.PutUint32(serializedData[0:4], loc.blockFileNum)
byteOrder.PutUint32(serializedData[4:8], loc.fileOffset)
byteOrder.PutUint32(serializedData[8:12], loc.blockLen)
return serializedData[:]
}
// blockFilePath return the file path for the provided block file number.
func blockFilePath(dbPath string, fileNum uint32) string {
// Choose 9 digits of precision for the filenames. 9 digits provide
// 10^9 files @ 512MiB each a total of ~476.84PiB.
fileName := fmt.Sprintf("%09d.fdb", fileNum)
return filepath.Join(dbPath, fileName)
}
// openWriteFile returns a file handle for the passed flat file number in
// read/write mode. The file will be created if needed. It is typically used
// for the current file that will have all new data appended. Unlike openFile,
// this function does not keep track of the open file and it is not subject to
// the maxOpenFiles limit.
func (s *blockStore) openWriteFile(fileNum uint32) (filer, error) {
// The current block file needs to be read-write so it is possible to
// append to it. Also, it shouldn't be part of the least recently used
// file.
filePath := blockFilePath(s.basePath, fileNum)
file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0666)
if err != nil {
str := fmt.Sprintf("failed to open file %q: %s", filePath, err)
return nil, makeDbErr(database.ErrDriverSpecific, str, err)
}
return file, nil
}
// openFile returns a read-only file handle for the passed flat file number.
// The function also keeps track of the open files, performs least recently
// used tracking, and limits the number of open files to maxOpenFiles by closing
// the least recently used file as needed.
//
// This function MUST be called with the overall files mutex (s.obfMutex) locked
// for WRITES.
func (s *blockStore) openFile(fileNum uint32) (*lockableFile, error) {
// Open the appropriate file as read-only.
filePath := blockFilePath(s.basePath, fileNum)
file, err := os.Open(filePath)
if err != nil {
return nil, makeDbErr(database.ErrDriverSpecific, err.Error(),
err)
}
blockFile := &lockableFile{file: file}
// Close the least recently used file if the file exceeds the max
// allowed open files. This is not done until after the file open in
// case the file fails to open, there is no need to close any files.
//
// A write lock is required on the LRU list here to protect against
// modifications happening as already open files are read from and
// shuffled to the front of the list.
//
// Also, add the file that was just opened to the front of the least
// recently used list to indicate it is the most recently used file and
// therefore should be closed last.
s.lruMutex.Lock()
lruList := s.openBlocksLRU
if lruList.Len() >= s.maxOpenFiles {
lruFileNum := lruList.Remove(lruList.Back()).(uint32)
oldBlockFile := s.openBlockFiles[lruFileNum]
// Close the old file under the write lock for the file in case
// any readers are currently reading from it so it's not closed
// out from under them.
oldBlockFile.Lock()
_ = oldBlockFile.file.Close()
oldBlockFile.Unlock()
delete(s.openBlockFiles, lruFileNum)
delete(s.fileNumToLRUElem, lruFileNum)
}
s.fileNumToLRUElem[fileNum] = lruList.PushFront(fileNum)
s.lruMutex.Unlock()
// Store a reference to it in the open block files map.
s.openBlockFiles[fileNum] = blockFile
return blockFile, nil
}
// deleteFile removes the block file for the passed flat file number. The file
// must already be closed and it is the responsibility of the caller to do any
// other state cleanup necessary.
func (s *blockStore) deleteFile(fileNum uint32) error {
filePath := blockFilePath(s.basePath, fileNum)
if err := os.Remove(filePath); err != nil {
return makeDbErr(database.ErrDriverSpecific, err.Error(), err)
}
return nil
}
// blockFile attempts to return an existing file handle for the passed flat file
// number if it is already open as well as marking it as most recently used. It
// will also open the file when it's not already open subject to the rules
// described in openFile.
//
// NOTE: The returned block file will already have the read lock acquired and
// the caller MUST call .RUnlock() to release it once it has finished all read
// operations. This is necessary because otherwise it would be possible for a
// separate goroutine to close the file after it is returned from here, but
// before the caller has acquired a read lock.
func (s *blockStore) blockFile(fileNum uint32) (*lockableFile, error) {
// When the requested block file is open for writes, return it.
wc := s.writeCursor
wc.RLock()
if fileNum == wc.curFileNum && wc.curFile.file != nil {
obf := wc.curFile
obf.RLock()
wc.RUnlock()
return obf, nil
}
wc.RUnlock()
// Try to return an open file under the overall files read lock.
s.obfMutex.RLock()
if obf, ok := s.openBlockFiles[fileNum]; ok {
s.lruMutex.Lock()
s.openBlocksLRU.MoveToFront(s.fileNumToLRUElem[fileNum])
s.lruMutex.Unlock()
obf.RLock()
s.obfMutex.RUnlock()
return obf, nil
}
s.obfMutex.RUnlock()
// Since the file isn't open already, need to check the open block files
// map again under write lock in case multiple readers got here and a
// separate one is already opening the file.
s.obfMutex.Lock()
if obf, ok := s.openBlockFiles[fileNum]; ok {
obf.RLock()
s.obfMutex.Unlock()
return obf, nil
}
// The file isn't open, so open it while potentially closing the least
// recently used one as needed.
obf, err := s.openFileFunc(fileNum)
if err != nil {
s.obfMutex.Unlock()
return nil, err
}
obf.RLock()
s.obfMutex.Unlock()
return obf, nil
}
// writeData is a helper function for writeBlock which writes the provided data
// at the current write offset and updates the write cursor accordingly. The
// field name parameter is only used when there is an error to provide a nicer
// error message.
//
// The write cursor will be advanced the number of bytes actually written in the
// event of failure.
//
// NOTE: This function MUST be called with the write cursor current file lock
// held and must only be called during a write transaction so it is effectively
// locked for writes. Also, the write cursor current file must NOT be nil.
func (s *blockStore) writeData(data []byte, fieldName string) error {
wc := s.writeCursor
n, err := wc.curFile.file.WriteAt(data, int64(wc.curOffset))
wc.curOffset += uint32(n)
if err != nil {
var pathErr *os.PathError
if ok := errors.As(err, &pathErr); ok && pathErr.Err == syscall.ENOSPC {
log.Errorf("No space left on the hard disk, exiting...")
os.Exit(1)
}
str := fmt.Sprintf("failed to write %s to file %d at "+
"offset %d: %s", fieldName, wc.curFileNum,
wc.curOffset-uint32(n), err)
return makeDbErr(database.ErrDriverSpecific, str, err)
}
return nil
}
// writeBlock appends the specified raw block bytes to the store's write cursor
// location and increments it accordingly. When the block would exceed the max
// file size for the current flat file, this function will close the current
// file, create the next file, update the write cursor, and write the block to
// the new file.
//
// The write cursor will also be advanced the number of bytes actually written
// in the event of failure.
//
// Format: <network><block length><serialized block><checksum>
func (s *blockStore) writeBlock(rawBlock []byte) (blockLocation, error) {
// Compute how many bytes will be written.
// 4 bytes each for block network + 4 bytes for block length +
// length of raw block + 4 bytes for checksum.
blockLen := uint32(len(rawBlock))
fullLen := blockLen + 12
// Move to the next block file if adding the new block would exceed the
// max allowed size for the current block file. Also detect overflow
// to be paranoid, even though it isn't possible currently, numbers
// might change in the future to make it possible.
//
// NOTE: The writeCursor.offset field isn't protected by the mutex
// since it's only read/changed during this function which can only be
// called during a write transaction, of which there can be only one at
// a time.
wc := s.writeCursor
finalOffset := wc.curOffset + fullLen
if finalOffset < wc.curOffset || finalOffset > s.maxBlockFileSize {
// This is done under the write cursor lock since the curFileNum
// field is accessed elsewhere by readers.
//
// Close the current write file to force a read-only reopen
// with LRU tracking. The close is done under the write lock
// for the file to prevent it from being closed out from under
// any readers currently reading from it.
wc.Lock()
wc.curFile.Lock()
if wc.curFile.file != nil {
_ = wc.curFile.file.Close()
wc.curFile.file = nil
}
wc.curFile.Unlock()
// Start writes into next file.
wc.curFileNum++
wc.curOffset = 0
wc.Unlock()
}
// All writes are done under the write lock for the file to ensure any
// readers are finished and blocked first.
wc.curFile.Lock()
defer wc.curFile.Unlock()
// Open the current file if needed. This will typically only be the
// case when moving to the next file to write to or on initial database
// load. However, it might also be the case if rollbacks happened after
// file writes started during a transaction commit.
if wc.curFile.file == nil {
file, err := s.openWriteFileFunc(wc.curFileNum)
if err != nil {
return blockLocation{}, err
}
wc.curFile.file = file
}
// Kaspa network.
origOffset := wc.curOffset
hasher := crc32.New(castagnoli)
var scratch [4]byte
byteOrder.PutUint32(scratch[:], uint32(s.network))
if err := s.writeData(scratch[:], "network"); err != nil {
return blockLocation{}, err
}
_, _ = hasher.Write(scratch[:])
// Block length.
byteOrder.PutUint32(scratch[:], blockLen)
if err := s.writeData(scratch[:], "block length"); err != nil {
return blockLocation{}, err
}
_, _ = hasher.Write(scratch[:])
// Serialized block.
if err := s.writeData(rawBlock[:], "block"); err != nil {
return blockLocation{}, err
}
_, _ = hasher.Write(rawBlock)
// Castagnoli CRC-32 as a checksum of all the previous.
if err := s.writeData(hasher.Sum(nil), "checksum"); err != nil {
return blockLocation{}, err
}
loc := blockLocation{
blockFileNum: wc.curFileNum,
fileOffset: origOffset,
blockLen: fullLen,
}
return loc, nil
}
// readBlock reads the specified block record and returns the serialized block.
// It ensures the integrity of the block data by checking that the serialized
// network matches the current network associated with the block store and
// comparing the calculated checksum against the one stored in the flat file.
// This function also automatically handles all file management such as opening
// and closing files as necessary to stay within the maximum allowed open files
// limit.
//
// Returns ErrDriverSpecific if the data fails to read for any reason and
// ErrCorruption if the checksum of the read data doesn't match the checksum
// read from the file.
//
// Format: <network><block length><serialized block><checksum>
func (s *blockStore) readBlock(hash *daghash.Hash, loc blockLocation) ([]byte, error) {
// Get the referenced block file handle opening the file as needed. The
// function also handles closing files as needed to avoid going over the
// max allowed open files.
blockFile, err := s.blockFile(loc.blockFileNum)
if err != nil {
return nil, err
}
serializedData := make([]byte, loc.blockLen)
n, err := blockFile.file.ReadAt(serializedData, int64(loc.fileOffset))
blockFile.RUnlock()
if err != nil {
str := fmt.Sprintf("failed to read block %s from file %d, "+
"offset %d: %s", hash, loc.blockFileNum, loc.fileOffset,
err)
return nil, makeDbErr(database.ErrDriverSpecific, str, err)
}
// Calculate the checksum of the read data and ensure it matches the
// serialized checksum. This will detect any data corruption in the
// flat file without having to do much more expensive merkle root
// calculations on the loaded block.
serializedChecksum := binary.BigEndian.Uint32(serializedData[n-4:])
calculatedChecksum := crc32.Checksum(serializedData[:n-4], castagnoli)
if serializedChecksum != calculatedChecksum {
str := fmt.Sprintf("block data for block %s checksum "+
"does not match - got %x, want %x", hash,
calculatedChecksum, serializedChecksum)
return nil, makeDbErr(database.ErrCorruption, str, nil)
}
// The network associated with the block must match the current active
// network, otherwise somebody probably put the block files for the
// wrong network in the directory.
serializedNet := byteOrder.Uint32(serializedData[:4])
if serializedNet != uint32(s.network) {
str := fmt.Sprintf("block data for block %s is for the "+
"wrong network - got %d, want %d", hash, serializedNet,
uint32(s.network))
return nil, makeDbErr(database.ErrDriverSpecific, str, nil)
}
// The raw block excludes the network, length of the block, and
// checksum.
return serializedData[8 : n-4], nil
}
// readBlockRegion reads the specified amount of data at the provided offset for
// a given block location. The offset is relative to the start of the
// serialized block (as opposed to the beginning of the block record). This
// function automatically handles all file management such as opening and
// closing files as necessary to stay within the maximum allowed open files
// limit.
//
// Returns ErrDriverSpecific if the data fails to read for any reason.
func (s *blockStore) readBlockRegion(loc blockLocation, offset, numBytes uint32) ([]byte, error) {
// Get the referenced block file handle opening the file as needed. The
// function also handles closing files as needed to avoid going over the
// max allowed open files.
blockFile, err := s.blockFile(loc.blockFileNum)
if err != nil {
return nil, err
}
// Regions are offsets into the actual block, however the serialized
// data for a block includes an initial 4 bytes for network + 4 bytes
// for block length. Thus, add 8 bytes to adjust.
readOffset := loc.fileOffset + 8 + offset
serializedData := make([]byte, numBytes)
_, err = blockFile.file.ReadAt(serializedData, int64(readOffset))
blockFile.RUnlock()
if err != nil {
str := fmt.Sprintf("failed to read region from block file %d, "+
"offset %d, len %d: %s", loc.blockFileNum, readOffset,
numBytes, err)
return nil, makeDbErr(database.ErrDriverSpecific, str, err)
}
return serializedData, nil
}
// syncBlocks performs a file system sync on the flat file associated with the
// store's current write cursor. It is safe to call even when there is not a
// current write file in which case it will have no effect.
//
// This is used when flushing cached metadata updates to disk to ensure all the
// block data is fully written before updating the metadata. This ensures the
// metadata and block data can be properly reconciled in failure scenarios.
func (s *blockStore) syncBlocks() error {
wc := s.writeCursor
wc.RLock()
defer wc.RUnlock()
// Nothing to do if there is no current file associated with the write
// cursor.
wc.curFile.RLock()
defer wc.curFile.RUnlock()
if wc.curFile.file == nil {
return nil
}
// Sync the file to disk.
if err := wc.curFile.file.Sync(); err != nil {
str := fmt.Sprintf("failed to sync file %d: %s", wc.curFileNum,
err)
return makeDbErr(database.ErrDriverSpecific, str, err)
}
return nil
}
// handleRollback rolls the block files on disk back to the provided file number
// and offset. This involves potentially deleting and truncating the files that
// were partially written.
//
// There are effectively two scenarios to consider here:
// 1) Transient write failures from which recovery is possible
// 2) More permanent failures such as hard disk death and/or removal
//
// In either case, the write cursor will be repositioned to the old block file
// offset regardless of any other errors that occur while attempting to undo
// writes.
//
// For the first scenario, this will lead to any data which failed to be undone
// being overwritten and thus behaves as desired as the system continues to run.
//
// For the second scenario, the metadata which stores the current write cursor
// position within the block files will not have been updated yet and thus if
// the system eventually recovers (perhaps the hard drive is reconnected), it
// will also lead to any data which failed to be undone being overwritten and
// thus behaves as desired.
//
// Therefore, any errors are simply logged at a warning level rather than being
// returned since there is nothing more that could be done about it anyways.
func (s *blockStore) handleRollback(oldBlockFileNum, oldBlockOffset uint32) {
// Grab the write cursor mutex since it is modified throughout this
// function.
wc := s.writeCursor
wc.Lock()
defer wc.Unlock()
// Nothing to do if the rollback point is the same as the current write
// cursor.
if wc.curFileNum == oldBlockFileNum && wc.curOffset == oldBlockOffset {
return
}
// Regardless of any failures that happen below, reposition the write
// cursor to the old block file and offset.
defer func() {
wc.curFileNum = oldBlockFileNum
wc.curOffset = oldBlockOffset
}()
log.Debugf("ROLLBACK: Rolling back to file %d, offset %d",
oldBlockFileNum, oldBlockOffset)
// Close the current write file if it needs to be deleted. Then delete
// all files that are newer than the provided rollback file while
// also moving the write cursor file backwards accordingly.
if wc.curFileNum > oldBlockFileNum {
wc.curFile.Lock()
if wc.curFile.file != nil {
_ = wc.curFile.file.Close()
wc.curFile.file = nil
}
wc.curFile.Unlock()
}
for ; wc.curFileNum > oldBlockFileNum; wc.curFileNum-- {
if err := s.deleteFileFunc(wc.curFileNum); err != nil {
log.Warnf("ROLLBACK: Failed to delete block file "+
"number %d: %s", wc.curFileNum, err)
return
}
}
// Open the file for the current write cursor if needed.
wc.curFile.Lock()
if wc.curFile.file == nil {
obf, err := s.openWriteFileFunc(wc.curFileNum)
if err != nil {
wc.curFile.Unlock()
log.Warnf("ROLLBACK: %s", err)
return
}
wc.curFile.file = obf
}
// Truncate the to the provided rollback offset.
if err := wc.curFile.file.Truncate(int64(oldBlockOffset)); err != nil {
wc.curFile.Unlock()
log.Warnf("ROLLBACK: Failed to truncate file %d: %s",
wc.curFileNum, err)
return
}
// Sync the file to disk.
err := wc.curFile.file.Sync()
wc.curFile.Unlock()
if err != nil {
log.Warnf("ROLLBACK: Failed to sync file %d: %s",
wc.curFileNum, err)
return
}
}
// scanBlockFiles searches the database directory for all flat block files to
// find the end of the most recent file. This position is considered the
// current write cursor which is also stored in the metadata. Thus, it is used
// to detect unexpected shutdowns in the middle of writes so the block files
// can be reconciled.
func scanBlockFiles(dbPath string) (int, uint32) {
lastFile := -1
fileLen := uint32(0)
for i := 0; ; i++ {
filePath := blockFilePath(dbPath, uint32(i))
st, err := os.Stat(filePath)
if err != nil {
break
}
lastFile = i
fileLen = uint32(st.Size())
}
log.Tracef("Scan found latest block file #%d with length %d", lastFile,
fileLen)
return lastFile, fileLen
}
// newBlockStore returns a new block store with the current block file number
// and offset set and all fields initialized.
func newBlockStore(basePath string, network wire.KaspaNet) *blockStore {
// Look for the end of the latest block to file to determine what the
// write cursor position is from the viewpoing of the block files on
// disk.
fileNum, fileOff := scanBlockFiles(basePath)
if fileNum == -1 {
fileNum = 0
fileOff = 0
}
store := &blockStore{
network: network,
basePath: basePath,
maxBlockFileSize: maxBlockFileSize,
maxOpenFiles: maxOpenFiles,
openBlockFiles: make(map[uint32]*lockableFile),
openBlocksLRU: list.New(),
fileNumToLRUElem: make(map[uint32]*list.Element),
writeCursor: &writeCursor{
curFile: &lockableFile{},
curFileNum: uint32(fileNum),
curOffset: fileOff,
},
}
store.openFileFunc = store.openFile
store.openWriteFileFunc = store.openWriteFile
store.deleteFileFunc = store.deleteFile
return store
}

View File

@@ -1,108 +0,0 @@
package ffldb
import (
"os"
"testing"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
func TestDeleteFile(t *testing.T) {
testBlock := util.NewBlock(wire.NewMsgBlock(
wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
tests := []struct {
fileNum uint32
expectedErr bool
}{
{0, false},
{1, true},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestDeleteFile", t)
defer func() {
if !pdb.closed {
pdb.Close()
}
}()
err := pdb.Update(func(dbTx database.Tx) error {
dbTx.StoreBlock(testBlock)
return nil
})
if err != nil {
t.Fatalf("TestDeleteFile: Error storing block: %s", err)
}
err = pdb.Close()
if err != nil {
t.Fatalf("TestDeleteFile: Error closing file before deletion: %s", err)
}
err = pdb.store.deleteFile(test.fileNum)
if (err != nil) != test.expectedErr {
t.Errorf("TestDeleteFile: %d: Expected error status: %t, but got: %t",
test.fileNum, test.expectedErr, (err != nil))
}
if err == nil {
filePath := blockFilePath(pdb.store.basePath, test.fileNum)
if _, err := os.Stat(filePath); !os.IsNotExist(err) {
t.Errorf("TestDeleteFile: %d: File %s still exists", test.fileNum, filePath)
}
}
}()
}
}
// TestHandleRollbackErrors tests all error-cases in *blockStore.handleRollback().
// The non-error-cases are tested in the more general tests.
// Since handleRollback just logs errors, this test simply causes all error-cases to be hit,
// and makes sure no panic occurs, as well as ensures the writeCursor was updated correctly.
func TestHandleRollbackErrors(t *testing.T) {
testBlock := util.NewBlock(wire.NewMsgBlock(
wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
testBlockSize := uint32(testBlock.MsgBlock().SerializeSize())
tests := []struct {
name string
fileNum uint32
offset uint32
}{
// offset should be size of block + 12 bytes for block network, size and checksum
{"Nothing to rollback", 1, testBlockSize + 12},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestHandleRollbackErrors", t)
defer pdb.Close()
// Set maxBlockFileSize to testBlockSize so that writeCursor.curFileNum increments
pdb.store.maxBlockFileSize = testBlockSize
err := pdb.Update(func(dbTx database.Tx) error {
return dbTx.StoreBlock(testBlock)
})
if err != nil {
t.Fatalf("TestHandleRollbackErrors: %s: Error adding test block to database: %s", test.name, err)
}
pdb.store.handleRollback(test.fileNum, test.offset)
if pdb.store.writeCursor.curFileNum != test.fileNum {
t.Errorf("TestHandleRollbackErrors: %s: Expected fileNum: %d, but got: %d",
test.name, test.fileNum, pdb.store.writeCursor.curFileNum)
}
if pdb.store.writeCursor.curOffset != test.offset {
t.Errorf("TestHandleRollbackErrors: %s: offset fileNum: %d, but got: %d",
test.name, test.offset, pdb.store.writeCursor.curOffset)
}
}()
}
}

View File

@@ -1,43 +0,0 @@
package ffldb
import (
"os"
"path"
"path/filepath"
"testing"
"github.com/kaspanet/kaspad/wire"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
)
func newTestDb(testName string, t *testing.T) *db {
dbPath := path.Join(os.TempDir(), "db_test", testName)
err := os.RemoveAll(dbPath)
if err != nil && !os.IsNotExist(err) {
t.Fatalf("%s: Error deleting database folder before starting: %s", testName, err)
}
network := wire.Simnet
opts := opt.Options{
ErrorIfExist: true,
Strict: opt.DefaultStrict,
Compression: opt.NoCompression,
Filter: filter.NewBloomFilter(10),
}
metadataDbPath := filepath.Join(dbPath, metadataDbName)
ldb, err := leveldb.OpenFile(metadataDbPath, &opts)
if err != nil {
t.Errorf("%s: Error opening metadataDbPath: %s", testName, err)
}
err = initDB(ldb)
if err != nil {
t.Errorf("%s: Error initializing metadata Db: %s", testName, err)
}
store := newBlockStore(dbPath, network)
cache := newDbCache(ldb, store, defaultCacheSize, defaultFlushSecs)
return &db{store: store, cache: cache}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,658 +0,0 @@
package ffldb
import (
"bytes"
"testing"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
// TestCursorDeleteErrors tests all error-cases in *cursor.Delete().
// The non-error-cases are tested in the more general tests.
func TestCursorDeleteErrors(t *testing.T) {
pdb := newTestDb("TestCursorDeleteErrors", t)
nestedBucket := []byte("nestedBucket")
key := []byte("key")
value := []byte("value")
err := pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
_, err := metadata.CreateBucket(nestedBucket)
if err != nil {
return err
}
metadata.Put(key, value)
return nil
})
if err != nil {
t.Fatalf("TestCursorDeleteErrors: Error setting up test-database: %s", err)
}
// Check for error when attempted to delete a bucket
err = pdb.Update(func(dbTx database.Tx) error {
cursor := dbTx.Metadata().Cursor()
found := false
for ok := cursor.First(); ok; ok = cursor.Next() {
if bytes.Equal(cursor.Key(), nestedBucket) {
found = true
break
}
}
if !found {
t.Errorf("TestCursorDeleteErrors: Key '%s' not found", string(nestedBucket))
}
err := cursor.Delete()
if !database.IsErrorCode(err, database.ErrIncompatibleValue) {
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrIncompatibleValue, "+
"when deleting bucket, but got %v", err)
}
return nil
})
if err != nil {
t.Fatalf("TestCursorDeleteErrors: Unexpected error from pdb.Update "+
"when attempting to delete bucket: %s", err)
}
// Check for error when transaction is not writable
err = pdb.View(func(dbTx database.Tx) error {
cursor := dbTx.Metadata().Cursor()
if !cursor.First() {
t.Fatal("TestCursorDeleteErrors: Nothing in cursor when testing for delete in " +
"non-writable transaction")
}
err := cursor.Delete()
if !database.IsErrorCode(err, database.ErrTxNotWritable) {
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrTxNotWritable "+
"when calling .Delete() on non-writable transaction, but got '%v' instead", err)
}
return nil
})
if err != nil {
t.Fatalf("TestCursorDeleteErrors: Unexpected error from pdb.Update "+
"when attempting to delete on non-writable transaction: %s", err)
}
// Check for error when cursor was exhausted
err = pdb.Update(func(dbTx database.Tx) error {
cursor := dbTx.Metadata().Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
}
err := cursor.Delete()
if !database.IsErrorCode(err, database.ErrIncompatibleValue) {
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrIncompatibleValue "+
"when calling .Delete() on exhausted cursor, but got '%v' instead", err)
}
return nil
})
if err != nil {
t.Fatalf("TestCursorDeleteErrors: Unexpected error from pdb.Update "+
"when attempting to delete on exhausted cursor: %s", err)
}
// Check for error when transaction is closed
tx, err := pdb.Begin(true)
if err != nil {
t.Fatalf("TestCursorDeleteErrors: Error in pdb.Begin(): %s", err)
}
cursor := tx.Metadata().Cursor()
err = tx.Commit()
if err != nil {
t.Fatalf("TestCursorDeleteErrors: Error in tx.Commit(): %s", err)
}
err = cursor.Delete()
if !database.IsErrorCode(err, database.ErrTxClosed) {
t.Errorf("TestCursorDeleteErrors: Expected error of type ErrTxClosed "+
"when calling .Delete() on with closed transaction, but got '%s' instead", err)
}
}
func TestSkipPendingUpdates(t *testing.T) {
pdb := newTestDb("TestSkipPendingUpdates", t)
defer pdb.Close()
value := []byte("value")
// Add numbered prefixes to keys so that they are in expected order, and before any other keys
firstKey := []byte("1 - first")
toDeleteKey := []byte("2 - toDelete")
toUpdateKey := []byte("3 - toUpdate")
secondKey := []byte("4 - second")
// create initial metadata for test
err := pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
if err := metadata.Put(firstKey, value); err != nil {
return err
}
if err := metadata.Put(toDeleteKey, value); err != nil {
return err
}
if err := metadata.Put(toUpdateKey, value); err != nil {
return err
}
if err := metadata.Put(secondKey, value); err != nil {
return err
}
return nil
})
if err != nil {
t.Fatalf("TestSkipPendingUpdates: Error adding to metadata: %s", err)
}
// test skips
err = pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
if err := metadata.Delete(toDeleteKey); err != nil {
return err
}
if err := metadata.Put(toUpdateKey, value); err != nil {
return err
}
cursor := metadata.Cursor().(*cursor)
dbIter := cursor.dbIter
// Check that first is ok
dbIter.First()
expectedKey := bucketizedKey(metadataBucketID, firstKey)
if !bytes.Equal(dbIter.Key(), expectedKey) {
t.Errorf("TestSkipPendingUpdates: 1: key expected to be %v but is %v", expectedKey, dbIter.Key())
}
// Go to the next key, which is toDelete
dbIter.Next()
expectedKey = bucketizedKey(metadataBucketID, toDeleteKey)
if !bytes.Equal(dbIter.Key(), expectedKey) {
t.Errorf("TestSkipPendingUpdates: 2: key expected to be %s but is %s", expectedKey, dbIter.Key())
}
// at this point toDeleteKey and toUpdateKey should be skipped
cursor.skipPendingUpdates(true)
expectedKey = bucketizedKey(metadataBucketID, secondKey)
if !bytes.Equal(dbIter.Key(), expectedKey) {
t.Errorf("TestSkipPendingUpdates: 3: key expected to be %s but is %s", expectedKey, dbIter.Key())
}
// now traverse backwards - should get toUpdate
dbIter.Prev()
expectedKey = bucketizedKey(metadataBucketID, toUpdateKey)
if !bytes.Equal(dbIter.Key(), expectedKey) {
t.Errorf("TestSkipPendingUpdates: 4: key expected to be %s but is %s", expectedKey, dbIter.Key())
}
// at this point toUpdateKey and toDeleteKey should be skipped
cursor.skipPendingUpdates(false)
expectedKey = bucketizedKey(metadataBucketID, firstKey)
if !bytes.Equal(dbIter.Key(), expectedKey) {
t.Errorf("TestSkipPendingUpdates: 5: key expected to be %s but is %s", expectedKey, dbIter.Key())
}
return nil
})
if err != nil {
t.Fatalf("TestSkipPendingUpdates: Error running main part of test: %s", err)
}
}
// TestCursor tests various edge-cases in cursor that were not hit by the more general tests
func TestCursor(t *testing.T) {
pdb := newTestDb("TestCursor", t)
defer pdb.Close()
value := []byte("value")
// Add numbered prefixes to keys so that they are in expected order, and before any other keys
firstKey := []byte("1 - first")
toDeleteKey := []byte("2 - toDelete")
toUpdateKey := []byte("3 - toUpdate")
secondKey := []byte("4 - second")
// create initial metadata for test
err := pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
if err := metadata.Put(firstKey, value); err != nil {
return err
}
if err := metadata.Put(toDeleteKey, value); err != nil {
return err
}
if err := metadata.Put(toUpdateKey, value); err != nil {
return err
}
if err := metadata.Put(secondKey, value); err != nil {
return err
}
return nil
})
if err != nil {
t.Fatalf("Error adding to metadata: %s", err)
}
// run the actual tests
err = pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
if err := metadata.Delete(toDeleteKey); err != nil {
return err
}
if err := metadata.Put(toUpdateKey, value); err != nil {
return err
}
cursor := metadata.Cursor().(*cursor)
// Check prev when currentIter == nil
if ok := cursor.Prev(); ok {
t.Error("1: .Prev() should have returned false, but have returned true")
}
// Same thing for .Next()
for ok := cursor.First(); ok; ok = cursor.Next() {
}
if ok := cursor.Next(); ok {
t.Error("2: .Next() should have returned false, but have returned true")
}
// Check that Key(), rawKey(), Value(), and rawValue() all return nil when currentIter == nil
if key := cursor.Key(); key != nil {
t.Errorf("3: .Key() should have returned nil, but have returned '%s' instead", key)
}
if key := cursor.rawKey(); key != nil {
t.Errorf("4: .rawKey() should have returned nil, but have returned '%s' instead", key)
}
if value := cursor.Value(); value != nil {
t.Errorf("5: .Value() should have returned nil, but have returned '%s' instead", value)
}
if value := cursor.rawValue(); value != nil {
t.Errorf("6: .rawValue() should have returned nil, but have returned '%s' instead", value)
}
// Check rawValue in normal operation
cursor.First()
if rawValue := cursor.rawValue(); !bytes.Equal(rawValue, value) {
t.Errorf("7: rawValue should have returned '%s' but have returned '%s' instead", value, rawValue)
}
return nil
})
if err != nil {
t.Fatalf("Error running the actual tests: %s", err)
}
}
// TestCreateBucketErrors tests all error-cases in *bucket.CreateBucket().
// The non-error-cases are tested in the more general tests.
func TestCreateBucketErrors(t *testing.T) {
testKey := []byte("key")
tests := []struct {
name string
key []byte
isWritable bool
isClosed bool
expectedErr database.ErrorCode
}{
{"empty key", []byte{}, true, false, database.ErrBucketNameRequired},
{"transaction is closed", testKey, true, true, database.ErrTxClosed},
{"transaction is not writable", testKey, false, false, database.ErrTxNotWritable},
{"key already exists", blockIdxBucketName, true, false, database.ErrBucketExists},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestCreateBucketErrors", t)
defer pdb.Close()
tx, err := pdb.Begin(test.isWritable)
defer tx.Commit()
if err != nil {
t.Fatalf("TestCreateBucketErrors: %s: error from pdb.Begin: %s", test.name, err)
}
if test.isClosed {
err = tx.Commit()
if err != nil {
t.Fatalf("TestCreateBucketErrors: %s: error from tx.Commit: %s", test.name, err)
}
}
metadata := tx.Metadata()
_, err = metadata.CreateBucket(test.key)
if !database.IsErrorCode(err, test.expectedErr) {
t.Errorf("TestCreateBucketErrors: %s: Expected error of type %d "+
"but got '%v'", test.name, test.expectedErr, err)
}
}()
}
}
// TestPutErrors tests all error-cases in *bucket.Put().
// The non-error-cases are tested in the more general tests.
func TestPutErrors(t *testing.T) {
testKey := []byte("key")
testValue := []byte("value")
tests := []struct {
name string
key []byte
isWritable bool
isClosed bool
expectedErr database.ErrorCode
}{
{"empty key", []byte{}, true, false, database.ErrKeyRequired},
{"transaction is closed", testKey, true, true, database.ErrTxClosed},
{"transaction is not writable", testKey, false, false, database.ErrTxNotWritable},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestPutErrors", t)
defer pdb.Close()
tx, err := pdb.Begin(test.isWritable)
defer tx.Commit()
if err != nil {
t.Fatalf("TestPutErrors: %s: error from pdb.Begin: %s", test.name, err)
}
if test.isClosed {
err = tx.Commit()
if err != nil {
t.Fatalf("TestPutErrors: %s: error from tx.Commit: %s", test.name, err)
}
}
metadata := tx.Metadata()
err = metadata.Put(test.key, testValue)
if !database.IsErrorCode(err, test.expectedErr) {
t.Errorf("TestPutErrors: %s: Expected error of type %d "+
"but got '%v'", test.name, test.expectedErr, err)
}
}()
}
}
// TestGetErrors tests all error-cases in *bucket.Get().
// The non-error-cases are tested in the more general tests.
func TestGetErrors(t *testing.T) {
testKey := []byte("key")
tests := []struct {
name string
key []byte
isClosed bool
}{
{"empty key", []byte{}, false},
{"transaction is closed", testKey, true},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestGetErrors", t)
defer pdb.Close()
tx, err := pdb.Begin(false)
defer tx.Rollback()
if err != nil {
t.Fatalf("TestGetErrors: %s: error from pdb.Begin: %s", test.name, err)
}
if test.isClosed {
err = tx.Rollback()
if err != nil {
t.Fatalf("TestGetErrors: %s: error from tx.Commit: %s", test.name, err)
}
}
metadata := tx.Metadata()
if result := metadata.Get(test.key); result != nil {
t.Errorf("TestGetErrors: %s: Expected to return nil, but got %v", test.name, result)
}
}()
}
}
// TestDeleteErrors tests all error-cases in *bucket.Delete().
// The non-error-cases are tested in the more general tests.
func TestDeleteErrors(t *testing.T) {
testKey := []byte("key")
tests := []struct {
name string
key []byte
isWritable bool
isClosed bool
expectedErr database.ErrorCode
}{
{"empty key", []byte{}, true, false, database.ErrKeyRequired},
{"transaction is closed", testKey, true, true, database.ErrTxClosed},
{"transaction is not writable", testKey, false, false, database.ErrTxNotWritable},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestDeleteErrors", t)
defer pdb.Close()
tx, err := pdb.Begin(test.isWritable)
defer tx.Commit()
if err != nil {
t.Fatalf("TestDeleteErrors: %s: error from pdb.Begin: %s", test.name, err)
}
if test.isClosed {
err = tx.Commit()
if err != nil {
t.Fatalf("TestDeleteErrors: %s: error from tx.Commit: %s", test.name, err)
}
}
metadata := tx.Metadata()
err = metadata.Delete(test.key)
if !database.IsErrorCode(err, test.expectedErr) {
t.Errorf("TestDeleteErrors: %s: Expected error of type %d "+
"but got '%v'", test.name, test.expectedErr, err)
}
}()
}
}
func TestForEachBucket(t *testing.T) {
pdb := newTestDb("TestForEachBucket", t)
// set-up test
testKey := []byte("key")
testValue := []byte("value")
bucketKeys := [][]byte{{1}, {2}, {3}}
err := pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
for _, bucketKey := range bucketKeys {
bucket, err := metadata.CreateBucket(bucketKey)
if err != nil {
return err
}
err = bucket.Put(testKey, testValue)
if err != nil {
return err
}
}
return nil
})
if err != nil {
t.Fatalf("TestForEachBucket: Error setting up test-database: %s", err)
}
// actual test
err = pdb.View(func(dbTx database.Tx) error {
i := 0
metadata := dbTx.Metadata()
err := metadata.ForEachBucket(func(bucketKey []byte) error {
if i >= len(bucketKeys) { // in case there are any other buckets in metadata
return nil
}
expectedBucketKey := bucketKeys[i]
if !bytes.Equal(expectedBucketKey, bucketKey) {
t.Errorf("TestForEachBucket: %d: Expected bucket key: %v, but got: %v",
i, expectedBucketKey, bucketKey)
return nil
}
bucket := metadata.Bucket(bucketKey)
if bucket == nil {
t.Errorf("TestForEachBucket: %d: Bucket is nil", i)
return nil
}
value := bucket.Get(testKey)
if !bytes.Equal(testValue, value) {
t.Errorf("TestForEachBucket: %d: Expected value: %s, but got: %s",
i, testValue, value)
return nil
}
i++
return nil
})
return err
})
if err != nil {
t.Fatalf("TestForEachBucket: Error running actual tests: %s", err)
}
}
// TestStoreBlockErrors tests all error-cases in *tx.StoreBlock().
// The non-error-cases are tested in the more general tests.
func TestStoreBlockErrors(t *testing.T) {
testBlock := util.NewBlock(wire.NewMsgBlock(wire.NewBlockHeader(1, []*daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)))
tests := []struct {
name string
isWritable bool
isClosed bool
expectedErr database.ErrorCode
}{
{"transaction is closed", true, true, database.ErrTxClosed},
{"transaction is not writable", false, false, database.ErrTxNotWritable},
}
for _, test := range tests {
func() {
pdb := newTestDb("TestStoreBlockErrors", t)
defer pdb.Close()
tx, err := pdb.Begin(test.isWritable)
defer tx.Commit()
if err != nil {
t.Fatalf("TestStoreBlockErrors: %s: error from pdb.Begin: %s", test.name, err)
}
if test.isClosed {
err = tx.Commit()
if err != nil {
t.Fatalf("TestStoreBlockErrors: %s: error from tx.Commit: %s", test.name, err)
}
}
err = tx.StoreBlock(testBlock)
if !database.IsErrorCode(err, test.expectedErr) {
t.Errorf("TestStoreBlockErrors: %s: Expected error of type %d "+
"but got '%v'", test.name, test.expectedErr, err)
}
}()
}
}
// TestDeleteDoubleNestedBucket tests what happens when bucket.DeleteBucket()
// is invoked on a bucket that contains a nested bucket.
func TestDeleteDoubleNestedBucket(t *testing.T) {
pdb := newTestDb("TestDeleteDoubleNestedBucket", t)
defer pdb.Close()
firstKey := []byte("first")
secondKey := []byte("second")
key := []byte("key")
value := []byte("value")
var rawKey, rawSecondKey []byte
// Test setup
err := pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
firstBucket, err := metadata.CreateBucket(firstKey)
if err != nil {
return errors.Errorf("Error creating first bucket: %s", err)
}
secondBucket, err := firstBucket.CreateBucket(secondKey)
if err != nil {
return errors.Errorf("Error creating second bucket: %s", err)
}
secondBucket.Put(key, value)
// extract rawKey from cursor and make sure it's in raw database
c := secondBucket.Cursor()
for ok := c.First(); ok && !bytes.Equal(c.Key(), key); ok = c.Next() {
}
if !bytes.Equal(c.Key(), key) {
return errors.Errorf("Couldn't find key to extract rawKey")
}
rawKey = c.(*cursor).rawKey()
if dbTx.(*transaction).fetchKey(rawKey) == nil {
return errors.Errorf("rawKey not found")
}
// extract rawSecondKey from cursor and make sure it's in raw database
c = firstBucket.Cursor()
for ok := c.First(); ok && !bytes.Equal(c.Key(), secondKey); ok = c.Next() {
}
if !bytes.Equal(c.Key(), secondKey) {
return errors.Errorf("Couldn't find secondKey to extract rawSecondKey")
}
rawSecondKey = c.(*cursor).rawKey()
if dbTx.(*transaction).fetchKey(rawSecondKey) == nil {
return errors.Errorf("rawSecondKey not found for some reason")
}
return nil
})
if err != nil {
t.Fatalf("TestDeleteDoubleNestedBucket: Error in test setup pdb.Update: %s", err)
}
// Actual test
err = pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
err := metadata.DeleteBucket(firstKey)
if err != nil {
return err
}
if dbTx.(*transaction).fetchKey(rawSecondKey) != nil {
t.Error("TestDeleteDoubleNestedBucket: secondBucket was not deleted")
}
if dbTx.(*transaction).fetchKey(rawKey) != nil {
t.Error("TestDeleteDoubleNestedBucket: value inside secondBucket was not deleted")
}
return nil
})
if err != nil {
t.Fatalf("TestDeleteDoubleNestedBucket: Error in actual test pdb.Update: %s", err)
}
}

View File

@@ -1,647 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ffldb
import (
"bytes"
"fmt"
"sync"
"time"
"github.com/kaspanet/kaspad/database/internal/treap"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/util"
)
const (
// defaultCacheSize is the default size for the database cache.
defaultCacheSize = 100 * 1024 * 1024 // 100 MB
// defaultFlushSecs is the default number of seconds to use as a
// threshold in between database cache flushes when the cache size has
// not been exceeded.
defaultFlushSecs = 300 // 5 minutes
)
// ldbCacheIter wraps a treap iterator to provide the additional functionality
// needed to satisfy the leveldb iterator.Iterator interface.
type ldbCacheIter struct {
*treap.Iterator
}
// Enforce ldbCacheIterator implements the leveldb iterator.Iterator interface.
var _ iterator.Iterator = (*ldbCacheIter)(nil)
// Error is only provided to satisfy the iterator interface as there are no
// errors for this memory-only structure.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *ldbCacheIter) Error() error {
return nil
}
// SetReleaser is only provided to satisfy the iterator interface as there is no
// need to override it.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *ldbCacheIter) SetReleaser(releaser util.Releaser) {
}
// Release is only provided to satisfy the iterator interface.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *ldbCacheIter) Release() {
}
// newLdbCacheIter creates a new treap iterator for the given slice against the
// pending keys for the passed cache snapshot and returns it wrapped in an
// ldbCacheIter so it can be used as a leveldb iterator.
func newLdbCacheIter(snap *dbCacheSnapshot, slice *util.Range) *ldbCacheIter {
iter := snap.pendingKeys.Iterator(slice.Start, slice.Limit)
return &ldbCacheIter{Iterator: iter}
}
// dbCacheIterator defines an iterator over the key/value pairs in the database
// cache and underlying database.
type dbCacheIterator struct {
cacheSnapshot *dbCacheSnapshot
dbIter iterator.Iterator
cacheIter iterator.Iterator
currentIter iterator.Iterator
released bool
}
// Enforce dbCacheIterator implements the leveldb iterator.Iterator interface.
var _ iterator.Iterator = (*dbCacheIterator)(nil)
// skipPendingUpdates skips any keys at the current database iterator position
// that are being updated by the cache. The forwards flag indicates the
// direction the iterator is moving.
func (iter *dbCacheIterator) skipPendingUpdates(forwards bool) {
for iter.dbIter.Valid() {
var skip bool
key := iter.dbIter.Key()
if iter.cacheSnapshot.pendingRemove.Has(key) {
skip = true
} else if iter.cacheSnapshot.pendingKeys.Has(key) {
skip = true
}
if !skip {
break
}
if forwards {
iter.dbIter.Next()
} else {
iter.dbIter.Prev()
}
}
}
// chooseIterator first skips any entries in the database iterator that are
// being updated by the cache and sets the current iterator to the appropriate
// iterator depending on their validity and the order they compare in while taking
// into account the direction flag. When the iterator is being moved forwards
// and both iterators are valid, the iterator with the smaller key is chosen and
// vice versa when the iterator is being moved backwards.
func (iter *dbCacheIterator) chooseIterator(forwards bool) bool {
// Skip any keys at the current database iterator position that are
// being updated by the cache.
iter.skipPendingUpdates(forwards)
// When both iterators are exhausted, the iterator is exhausted too.
if !iter.dbIter.Valid() && !iter.cacheIter.Valid() {
iter.currentIter = nil
return false
}
// Choose the database iterator when the cache iterator is exhausted.
if !iter.cacheIter.Valid() {
iter.currentIter = iter.dbIter
return true
}
// Choose the cache iterator when the database iterator is exhausted.
if !iter.dbIter.Valid() {
iter.currentIter = iter.cacheIter
return true
}
// Both iterators are valid, so choose the iterator with either the
// smaller or larger key depending on the forwards flag.
compare := bytes.Compare(iter.dbIter.Key(), iter.cacheIter.Key())
if (forwards && compare > 0) || (!forwards && compare < 0) {
iter.currentIter = iter.cacheIter
} else {
iter.currentIter = iter.dbIter
}
return true
}
// First positions the iterator at the first key/value pair and returns whether
// or not the pair exists.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *dbCacheIterator) First() bool {
// Seek to the first key in both the database and cache iterators and
// choose the iterator that is both valid and has the smaller key.
iter.dbIter.First()
iter.cacheIter.First()
return iter.chooseIterator(true)
}
// Last positions the iterator at the last key/value pair and returns whether or
// not the pair exists.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *dbCacheIterator) Last() bool {
// Seek to the last key in both the database and cache iterators and
// choose the iterator that is both valid and has the larger key.
iter.dbIter.Last()
iter.cacheIter.Last()
return iter.chooseIterator(false)
}
// Next moves the iterator one key/value pair forward and returns whether or not
// the pair exists.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *dbCacheIterator) Next() bool {
// Nothing to return if cursor is exhausted.
if iter.currentIter == nil {
return false
}
// Move the current iterator to the next entry and choose the iterator
// that is both valid and has the smaller key.
iter.currentIter.Next()
return iter.chooseIterator(true)
}
// Prev moves the iterator one key/value pair backward and returns whether or
// not the pair exists.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *dbCacheIterator) Prev() bool {
// Nothing to return if cursor is exhausted.
if iter.currentIter == nil {
return false
}
// Move the current iterator to the previous entry and choose the
// iterator that is both valid and has the larger key.
iter.currentIter.Prev()
return iter.chooseIterator(false)
}
// Seek positions the iterator at the first key/value pair that is greater than
// or equal to the passed seek key. Returns false if no suitable key was found.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *dbCacheIterator) Seek(key []byte) bool {
// Seek to the provided key in both the database and cache iterators
// then choose the iterator that is both valid and has the larger key.
iter.dbIter.Seek(key)
iter.cacheIter.Seek(key)
return iter.chooseIterator(true)
}
// Valid indicates whether the iterator is positioned at a valid key/value pair.
// It will be considered invalid when the iterator is newly created or exhausted.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *dbCacheIterator) Valid() bool {
return iter.currentIter != nil
}
// Key returns the current key the iterator is pointing to.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *dbCacheIterator) Key() []byte {
// Nothing to return if iterator is exhausted.
if iter.currentIter == nil {
return nil
}
return iter.currentIter.Key()
}
// Value returns the current value the iterator is pointing to.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *dbCacheIterator) Value() []byte {
// Nothing to return if iterator is exhausted.
if iter.currentIter == nil {
return nil
}
return iter.currentIter.Value()
}
// SetReleaser is only provided to satisfy the iterator interface as there is no
// need to override it.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *dbCacheIterator) SetReleaser(releaser util.Releaser) {
}
// Release releases the iterator by removing the underlying treap iterator from
// the list of active iterators against the pending keys treap.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *dbCacheIterator) Release() {
if !iter.released {
iter.dbIter.Release()
iter.cacheIter.Release()
iter.currentIter = nil
iter.released = true
}
}
// Error is only provided to satisfy the iterator interface as there are no
// errors for this memory-only structure.
//
// This is part of the leveldb iterator.Iterator interface implementation.
func (iter *dbCacheIterator) Error() error {
return nil
}
// dbCacheSnapshot defines a snapshot of the database cache and underlying
// database at a particular point in time.
type dbCacheSnapshot struct {
dbSnapshot *leveldb.Snapshot
pendingKeys *treap.Immutable
pendingRemove *treap.Immutable
}
// Has returns whether or not the passed key exists.
func (snap *dbCacheSnapshot) Has(key []byte) bool {
// Check the cached entries first.
if snap.pendingRemove.Has(key) {
return false
}
if snap.pendingKeys.Has(key) {
return true
}
// Consult the database.
hasKey, _ := snap.dbSnapshot.Has(key, nil)
return hasKey
}
// Get returns the value for the passed key. The function will return nil when
// the key does not exist.
func (snap *dbCacheSnapshot) Get(key []byte) []byte {
// Check the cached entries first.
if snap.pendingRemove.Has(key) {
return nil
}
if value := snap.pendingKeys.Get(key); value != nil {
return value
}
// Consult the database.
value, err := snap.dbSnapshot.Get(key, nil)
if err != nil {
return nil
}
return value
}
// Release releases the snapshot.
func (snap *dbCacheSnapshot) Release() {
snap.dbSnapshot.Release()
snap.pendingKeys = nil
snap.pendingRemove = nil
}
// NewIterator returns a new iterator for the snapshot. The newly returned
// iterator is not pointing to a valid item until a call to one of the methods
// to position it is made.
//
// The slice parameter allows the iterator to be limited to a range of keys.
// The start key is inclusive and the limit key is exclusive. Either or both
// can be nil if the functionality is not desired.
func (snap *dbCacheSnapshot) NewIterator(slice *util.Range) *dbCacheIterator {
return &dbCacheIterator{
dbIter: snap.dbSnapshot.NewIterator(slice, nil),
cacheIter: newLdbCacheIter(snap, slice),
cacheSnapshot: snap,
}
}
// dbCache provides a database cache layer backed by an underlying database. It
// allows a maximum cache size and flush interval to be specified such that the
// cache is flushed to the database when the cache size exceeds the maximum
// configured value or it has been longer than the configured interval since the
// last flush. This effectively provides transaction batching so that callers
// can commit transactions at will without incurring large performance hits due
// to frequent disk syncs.
type dbCache struct {
// ldb is the underlying leveldb DB for metadata.
ldb *leveldb.DB
// store is used to sync blocks to flat files.
store *blockStore
// The following fields are related to flushing the cache to persistent
// storage. Note that all flushing is performed in an opportunistic
// fashion. This means that it is only flushed during a transaction or
// when the database cache is closed.
//
// maxSize is the maximum size threshold the cache can grow to before
// it is flushed.
//
// flushInterval is the threshold interval of time that is allowed to
// pass before the cache is flushed.
//
// lastFlush is the time the cache was last flushed. It is used in
// conjunction with the current time and the flush interval.
//
// NOTE: These flush related fields are protected by the database write
// lock.
maxSize uint64
flushInterval time.Duration
lastFlush time.Time
// The following fields hold the keys that need to be stored or deleted
// from the underlying database once the cache is full, enough time has
// passed, or when the database is shutting down. Note that these are
// stored using immutable treaps to support O(1) MVCC snapshots against
// the cached data. The cacheLock is used to protect concurrent access
// for cache updates and snapshots.
cacheLock sync.RWMutex
cachedKeys *treap.Immutable
cachedRemove *treap.Immutable
}
// Snapshot returns a snapshot of the database cache and underlying database at
// a particular point in time.
//
// The snapshot must be released after use by calling Release.
func (c *dbCache) Snapshot() (*dbCacheSnapshot, error) {
dbSnapshot, err := c.ldb.GetSnapshot()
if err != nil {
str := "failed to open transaction"
return nil, convertErr(str, err)
}
// Since the cached keys to be added and removed use an immutable treap,
// a snapshot is simply obtaining the root of the tree under the lock
// which is used to atomically swap the root.
c.cacheLock.RLock()
cacheSnapshot := &dbCacheSnapshot{
dbSnapshot: dbSnapshot,
pendingKeys: c.cachedKeys,
pendingRemove: c.cachedRemove,
}
c.cacheLock.RUnlock()
return cacheSnapshot, nil
}
// updateDB invokes the passed function in the context of a managed leveldb
// transaction. Any errors returned from the user-supplied function will cause
// the transaction to be rolled back and are returned from this function.
// Otherwise, the transaction is committed when the user-supplied function
// returns a nil error.
func (c *dbCache) updateDB(fn func(ldbTx *leveldb.Transaction) error) error {
// Start a leveldb transaction.
ldbTx, err := c.ldb.OpenTransaction()
if err != nil {
return convertErr("failed to open ldb transaction", err)
}
if err := fn(ldbTx); err != nil {
ldbTx.Discard()
return err
}
// Commit the leveldb transaction and convert any errors as needed.
if err := ldbTx.Commit(); err != nil {
return convertErr("failed to commit leveldb transaction", err)
}
return nil
}
// TreapForEacher is an interface which allows iteration of a treap in ascending
// order using a user-supplied callback for each key/value pair. It mainly
// exists so both mutable and immutable treaps can be atomically committed to
// the database with the same function.
type TreapForEacher interface {
ForEach(func(k, v []byte) bool)
}
// commitTreaps atomically commits all of the passed pending add/update/remove
// updates to the underlying database.
func (c *dbCache) commitTreaps(pendingKeys, pendingRemove TreapForEacher) error {
// Perform all leveldb updates using an atomic transaction.
return c.updateDB(func(ldbTx *leveldb.Transaction) error {
var innerErr error
pendingKeys.ForEach(func(k, v []byte) bool {
if dbErr := ldbTx.Put(k, v, nil); dbErr != nil {
str := fmt.Sprintf("failed to put key %q to "+
"ldb transaction", k)
innerErr = convertErr(str, dbErr)
return false
}
return true
})
if innerErr != nil {
return innerErr
}
pendingRemove.ForEach(func(k, v []byte) bool {
if dbErr := ldbTx.Delete(k, nil); dbErr != nil {
str := fmt.Sprintf("failed to delete "+
"key %q from ldb transaction",
k)
innerErr = convertErr(str, dbErr)
return false
}
return true
})
return innerErr
})
}
// flush flushes the database cache to persistent storage. This involes syncing
// the block store and replaying all transactions that have been applied to the
// cache to the underlying database.
//
// This function MUST be called with the database write lock held.
func (c *dbCache) flush() error {
c.lastFlush = time.Now()
// Sync the current write file associated with the block store. This is
// necessary before writing the metadata to prevent the case where the
// metadata contains information about a block which actually hasn't
// been written yet in unexpected shutdown scenarios.
if err := c.store.syncBlocks(); err != nil {
return err
}
// Since the cached keys to be added and removed use an immutable treap,
// a snapshot is simply obtaining the root of the tree under the lock
// which is used to atomically swap the root.
c.cacheLock.RLock()
cachedKeys := c.cachedKeys
cachedRemove := c.cachedRemove
c.cacheLock.RUnlock()
// Nothing to do if there is no data to flush.
if cachedKeys.Len() == 0 && cachedRemove.Len() == 0 {
return nil
}
// Perform all leveldb updates using an atomic transaction.
if err := c.commitTreaps(cachedKeys, cachedRemove); err != nil {
return err
}
// Clear the cache since it has been flushed.
c.cacheLock.Lock()
c.cachedKeys = treap.NewImmutable()
c.cachedRemove = treap.NewImmutable()
c.cacheLock.Unlock()
return nil
}
// needsFlush returns whether or not the database cache needs to be flushed to
// persistent storage based on its current size, whether or not adding all of
// the entries in the passed database transaction would cause it to exceed the
// configured limit, and how much time has elapsed since the last time the cache
// was flushed.
//
// This function MUST be called with the database write lock held.
func (c *dbCache) needsFlush(tx *transaction) bool {
// A flush is needed when more time has elapsed than the configured
// flush interval.
if time.Since(c.lastFlush) >= c.flushInterval {
return true
}
// A flush is needed when the size of the database cache exceeds the
// specified max cache size. The total calculated size is multiplied by
// 1.5 here to account for additional memory consumption that will be
// needed during the flush as well as old nodes in the cache that are
// referenced by the snapshot used by the transaction.
snap := tx.snapshot
totalSize := snap.pendingKeys.Size() + snap.pendingRemove.Size()
totalSize = uint64(float64(totalSize) * 1.5)
return totalSize > c.maxSize
}
// commitTx atomically adds all of the pending keys to add and remove into the
// database cache. When adding the pending keys would cause the size of the
// cache to exceed the max cache size, or the time since the last flush exceeds
// the configured flush interval, the cache will be flushed to the underlying
// persistent database.
//
// This is an atomic operation with respect to the cache in that either all of
// the pending keys to add and remove in the transaction will be applied or none
// of them will.
//
// The database cache itself might be flushed to the underlying persistent
// database even if the transaction fails to apply, but it will only be the
// state of the cache without the transaction applied.
//
// This function MUST be called during a database write transaction which in
// turn implies the database write lock will be held.
func (c *dbCache) commitTx(tx *transaction) error {
// Flush the cache and write the current transaction directly to the
// database if a flush is needed.
if c.needsFlush(tx) {
if err := c.flush(); err != nil {
return err
}
// Perform all leveldb updates using an atomic transaction.
err := c.commitTreaps(tx.pendingKeys, tx.pendingRemove)
if err != nil {
return err
}
// Clear the transaction entries since they have been committed.
tx.pendingKeys = nil
tx.pendingRemove = nil
return nil
}
// At this point a database flush is not needed, so atomically commit
// the transaction to the cache.
// Since the cached keys to be added and removed use an immutable treap,
// a snapshot is simply obtaining the root of the tree under the lock
// which is used to atomically swap the root.
c.cacheLock.RLock()
newCachedKeys := c.cachedKeys
newCachedRemove := c.cachedRemove
c.cacheLock.RUnlock()
// Apply every key to add in the database transaction to the cache.
tx.pendingKeys.ForEach(func(k, v []byte) bool {
newCachedRemove = newCachedRemove.Delete(k)
newCachedKeys = newCachedKeys.Put(k, v)
return true
})
tx.pendingKeys = nil
// Apply every key to remove in the database transaction to the cache.
tx.pendingRemove.ForEach(func(k, v []byte) bool {
newCachedKeys = newCachedKeys.Delete(k)
newCachedRemove = newCachedRemove.Put(k, nil)
return true
})
tx.pendingRemove = nil
// Atomically replace the immutable treaps which hold the cached keys to
// add and delete.
c.cacheLock.Lock()
c.cachedKeys = newCachedKeys
c.cachedRemove = newCachedRemove
c.cacheLock.Unlock()
return nil
}
// Close cleanly shuts down the database cache by syncing all data and closing
// the underlying leveldb database.
//
// This function MUST be called with the database write lock held.
func (c *dbCache) Close() error {
// Flush any outstanding cached entries to disk.
if err := c.flush(); err != nil {
// Even if there is an error while flushing, attempt to close
// the underlying database. The error is ignored since it would
// mask the flush error.
_ = c.ldb.Close()
return err
}
// Close the underlying leveldb database.
if err := c.ldb.Close(); err != nil {
str := "failed to close underlying leveldb database"
return convertErr(str, err)
}
return nil
}
// newDbCache returns a new database cache instance backed by the provided
// leveldb instance. The cache will be flushed to leveldb when the max size
// exceeds the provided value or it has been longer than the provided interval
// since the last flush.
func newDbCache(ldb *leveldb.DB, store *blockStore, maxSize uint64, flushIntervalSecs uint32) *dbCache {
return &dbCache{
ldb: ldb,
store: store,
maxSize: maxSize,
flushInterval: time.Second * time.Duration(flushIntervalSecs),
lastFlush: time.Now(),
cachedKeys: treap.NewImmutable(),
cachedRemove: treap.NewImmutable(),
}
}

View File

@@ -1,136 +0,0 @@
package ffldb
import (
"bytes"
"testing"
"github.com/kaspanet/kaspad/database"
ldbutil "github.com/syndtr/goleveldb/leveldb/util"
)
func TestExhaustedDbCacheIterator(t *testing.T) {
db := newTestDb("TestExhaustedDbCacheIterator", t)
defer db.Close()
snapshot, err := db.cache.Snapshot()
if err != nil {
t.Fatalf("TestExhaustedDbCacheIterator: Error creating cache snapshot: %s", err)
}
iterator := snapshot.NewIterator(&ldbutil.Range{})
if next := iterator.Next(); next != false {
t.Errorf("TestExhaustedDbCacheIterator: Expected .Next() = false, but got %v", next)
}
if prev := iterator.Prev(); prev != false {
t.Errorf("TestExhaustedDbCacheIterator: Expected .Prev() = false, but got %v", prev)
}
if key := iterator.Key(); key != nil {
t.Errorf("TestExhaustedDbCacheIterator: Expected .Key() = nil, but got %v", key)
}
if value := iterator.Value(); value != nil {
t.Errorf("TestExhaustedDbCacheIterator: Expected .Value() = nil, but got %v", value)
}
}
// TestLDBIteratorImplPlaceholders hits functions that are there to implement leveldb iterator.Iterator interface,
// but surve no other purpose.
func TestLDBIteratorImplPlaceholders(t *testing.T) {
db := newTestDb("TestIteratorImplPlaceholders", t)
defer db.Close()
snapshot, err := db.cache.Snapshot()
if err != nil {
t.Fatalf("TestLDBIteratorImplPlaceholders: Error creating cache snapshot: %s", err)
}
iterator := newLdbCacheIter(snapshot, &ldbutil.Range{})
if err = iterator.Error(); err != nil {
t.Errorf("TestLDBIteratorImplPlaceholders: Expected .Error() = nil, but got %v", err)
}
// Call SetReleaser to achieve coverage of it. Actually does nothing
iterator.SetReleaser(nil)
}
func TestSkipPendingUpdatesCache(t *testing.T) {
pdb := newTestDb("TestSkipPendingUpdatesCache", t)
defer pdb.Close()
value := []byte("value")
// Add numbered prefixes to keys so that they are in expected order, and before any other keys
firstKey := []byte("1 - first")
toDeleteKey := []byte("2 - toDelete")
toUpdateKey := []byte("3 - toUpdate")
secondKey := []byte("4 - second")
// create initial metadata for test
err := pdb.Update(func(dbTx database.Tx) error {
metadata := dbTx.Metadata()
if err := metadata.Put(firstKey, value); err != nil {
return err
}
if err := metadata.Put(toDeleteKey, value); err != nil {
return err
}
if err := metadata.Put(toUpdateKey, value); err != nil {
return err
}
if err := metadata.Put(secondKey, value); err != nil {
return err
}
return nil
})
if err != nil {
t.Fatalf("Error adding to metadata: %s", err)
}
err = pdb.cache.flush()
if err != nil {
t.Fatalf("Error flushing cache: %s", err)
}
// test skips
err = pdb.Update(func(dbTx database.Tx) error {
snapshot, err := pdb.cache.Snapshot()
if err != nil {
t.Fatalf("TestSkipPendingUpdatesCache: Error getting snapshot: %s", err)
}
iterator := snapshot.NewIterator(&ldbutil.Range{})
snapshot.pendingRemove = snapshot.pendingRemove.Put(bucketizedKey(metadataBucketID, toDeleteKey), value)
snapshot.pendingKeys = snapshot.pendingKeys.Put(bucketizedKey(metadataBucketID, toUpdateKey), value)
// Check that first is ok
iterator.First()
expectedKey := bucketizedKey(metadataBucketID, firstKey)
actualKey := iterator.Key()
if !bytes.Equal(actualKey, expectedKey) {
t.Errorf("TestSkipPendingUpdatesCache: 1: key expected to be %v but is %v", expectedKey, actualKey)
}
// Go to the next key, which is second, toDelete and toUpdate will be skipped
iterator.Next()
expectedKey = bucketizedKey(metadataBucketID, secondKey)
actualKey = iterator.Key()
if !bytes.Equal(actualKey, expectedKey) {
t.Errorf("TestSkipPendingUpdatesCache: 2: key expected to be %s but is %s", expectedKey, actualKey)
}
// now traverse backwards - should get first, toUpdate and toDelete will be skipped
iterator.Prev()
expectedKey = bucketizedKey(metadataBucketID, firstKey)
actualKey = iterator.Key()
if !bytes.Equal(actualKey, expectedKey) {
t.Errorf("TestSkipPendingUpdatesCache: 4: key expected to be %s but is %s", expectedKey, actualKey)
}
return nil
})
if err != nil {
t.Fatalf("TestSkipPendingUpdatesCache: Error running main part of test: %s", err)
}
}

View File

@@ -1,25 +0,0 @@
/*
Package ffldb implements a driver for the database package that uses leveldb
for the backing metadata and flat files for block storage.
This driver is the recommended driver for use with kaspad. It makes use leveldb
for the metadata, flat files for block storage, and checksums in key areas to
ensure data integrity.
Usage
This package is a driver to the database package and provides the database type
of "ffldb". The parameters the Open and Create functions take are the
database path as a string and the block network:
db, err := database.Open("ffldb", "path/to/database", wire.Mainnet)
if err != nil {
// Handle error
}
db, err := database.Create("ffldb", "path/to/database", wire.Mainnet)
if err != nil {
// Handle error
}
*/
package ffldb

View File

@@ -1,60 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ffldb
import (
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
const (
dbType = "ffldb"
)
// parseArgs parses the arguments from the database Open/Create methods.
func parseArgs(funcName string, args ...interface{}) (string, wire.KaspaNet, error) {
if len(args) != 2 {
return "", 0, errors.Errorf("invalid arguments to %s.%s -- "+
"expected database path and block network", dbType,
funcName)
}
dbPath, ok := args[0].(string)
if !ok {
return "", 0, errors.Errorf("first argument to %s.%s is invalid -- "+
"expected database path string", dbType, funcName)
}
network, ok := args[1].(wire.KaspaNet)
if !ok {
return "", 0, errors.Errorf("second argument to %s.%s is invalid -- "+
"expected block network", dbType, funcName)
}
return dbPath, network, nil
}
// openDBDriver is the callback provided during driver registration that opens
// an existing database for use.
func openDBDriver(args ...interface{}) (database.DB, error) {
dbPath, network, err := parseArgs("Open", args...)
if err != nil {
return nil, err
}
return openDB(dbPath, network, false)
}
// createDBDriver is the callback provided during driver registration that
// creates, initializes, and opens a database for use.
func createDBDriver(args ...interface{}) (database.DB, error) {
dbPath, network, err := parseArgs("Create", args...)
if err != nil {
return nil, err
}
return openDB(dbPath, network, true)
}

View File

@@ -1,290 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ffldb_test
import (
"github.com/pkg/errors"
"os"
"path/filepath"
"reflect"
"runtime"
"testing"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
)
// dbType is the database type name for this driver.
const dbType = "ffldb"
// TestCreateOpenFail ensures that errors related to creating and opening a
// database are handled properly.
func TestCreateOpenFail(t *testing.T) {
t.Parallel()
// Ensure that attempting to open a database that doesn't exist returns
// the expected error.
wantErrCode := database.ErrDbDoesNotExist
_, err := database.Open(dbType, "noexist", blockDataNet)
if !checkDbError(t, "Open", err, wantErrCode) {
return
}
// Ensure that attempting to open a database with the wrong number of
// parameters returns the expected error.
wantErr := errors.Errorf("invalid arguments to %s.Open -- expected "+
"database path and block network", dbType)
_, err = database.Open(dbType, 1, 2, 3)
if err.Error() != wantErr.Error() {
t.Errorf("Open: did not receive expected error - got %v, "+
"want %v", err, wantErr)
return
}
// Ensure that attempting to open a database with an invalid type for
// the first parameter returns the expected error.
wantErr = errors.Errorf("first argument to %s.Open is invalid -- "+
"expected database path string", dbType)
_, err = database.Open(dbType, 1, blockDataNet)
if err.Error() != wantErr.Error() {
t.Errorf("Open: did not receive expected error - got %v, "+
"want %v", err, wantErr)
return
}
// Ensure that attempting to open a database with an invalid type for
// the second parameter returns the expected error.
wantErr = errors.Errorf("second argument to %s.Open is invalid -- "+
"expected block network", dbType)
_, err = database.Open(dbType, "noexist", "invalid")
if err.Error() != wantErr.Error() {
t.Errorf("Open: did not receive expected error - got %v, "+
"want %v", err, wantErr)
return
}
// Ensure that attempting to create a database with the wrong number of
// parameters returns the expected error.
wantErr = errors.Errorf("invalid arguments to %s.Create -- expected "+
"database path and block network", dbType)
_, err = database.Create(dbType, 1, 2, 3)
if err.Error() != wantErr.Error() {
t.Errorf("Create: did not receive expected error - got %v, "+
"want %v", err, wantErr)
return
}
// Ensure that attempting to create a database with an invalid type for
// the first parameter returns the expected error.
wantErr = errors.Errorf("first argument to %s.Create is invalid -- "+
"expected database path string", dbType)
_, err = database.Create(dbType, 1, blockDataNet)
if err.Error() != wantErr.Error() {
t.Errorf("Create: did not receive expected error - got %v, "+
"want %v", err, wantErr)
return
}
// Ensure that attempting to create a database with an invalid type for
// the second parameter returns the expected error.
wantErr = errors.Errorf("second argument to %s.Create is invalid -- "+
"expected block network", dbType)
_, err = database.Create(dbType, "noexist", "invalid")
if err.Error() != wantErr.Error() {
t.Errorf("Create: did not receive expected error - got %v, "+
"want %v", err, wantErr)
return
}
// Ensure operations against a closed database return the expected
// error.
dbPath := filepath.Join(os.TempDir(), "ffldb-createfail")
_ = os.RemoveAll(dbPath)
db, err := database.Create(dbType, dbPath, blockDataNet)
if err != nil {
t.Errorf("Create: unexpected error: %v", err)
return
}
defer os.RemoveAll(dbPath)
db.Close()
wantErrCode = database.ErrDbNotOpen
err = db.View(func(dbTx database.Tx) error {
return nil
})
if !checkDbError(t, "View", err, wantErrCode) {
return
}
wantErrCode = database.ErrDbNotOpen
err = db.Update(func(dbTx database.Tx) error {
return nil
})
if !checkDbError(t, "Update", err, wantErrCode) {
return
}
wantErrCode = database.ErrDbNotOpen
_, err = db.Begin(false)
if !checkDbError(t, "Begin(false)", err, wantErrCode) {
return
}
wantErrCode = database.ErrDbNotOpen
_, err = db.Begin(true)
if !checkDbError(t, "Begin(true)", err, wantErrCode) {
return
}
wantErrCode = database.ErrDbNotOpen
err = db.Close()
if !checkDbError(t, "Close", err, wantErrCode) {
return
}
}
// TestPersistence ensures that values stored are still valid after closing and
// reopening the database.
func TestPersistence(t *testing.T) {
t.Parallel()
// Create a new database to run tests against.
dbPath := filepath.Join(os.TempDir(), "ffldb-persistencetest")
_ = os.RemoveAll(dbPath)
db, err := database.Create(dbType, dbPath, blockDataNet)
if err != nil {
t.Errorf("Failed to create test database (%s) %v", dbType, err)
return
}
defer os.RemoveAll(dbPath)
defer db.Close()
// Create a bucket, put some values into it, and store a block so they
// can be tested for existence on re-open.
bucket1Key := []byte("bucket1")
storeValues := map[string]string{
"b1key1": "foo1",
"b1key2": "foo2",
"b1key3": "foo3",
}
genesisBlock := util.NewBlock(dagconfig.MainnetParams.GenesisBlock)
genesisHash := dagconfig.MainnetParams.GenesisHash
err = db.Update(func(dbTx database.Tx) error {
metadataBucket := dbTx.Metadata()
if metadataBucket == nil {
return errors.Errorf("Metadata: unexpected nil bucket")
}
bucket1, err := metadataBucket.CreateBucket(bucket1Key)
if err != nil {
return errors.Errorf("CreateBucket: unexpected error: %v",
err)
}
for k, v := range storeValues {
err := bucket1.Put([]byte(k), []byte(v))
if err != nil {
return errors.Errorf("Put: unexpected error: %v",
err)
}
}
if err := dbTx.StoreBlock(genesisBlock); err != nil {
return errors.Errorf("StoreBlock: unexpected error: %v",
err)
}
return nil
})
if err != nil {
t.Errorf("Update: unexpected error: %v", err)
return
}
// Close and reopen the database to ensure the values persist.
db.Close()
db, err = database.Open(dbType, dbPath, blockDataNet)
if err != nil {
t.Errorf("Failed to open test database (%s) %v", dbType, err)
return
}
defer db.Close()
// Ensure the values previously stored in the 3rd namespace still exist
// and are correct.
err = db.View(func(dbTx database.Tx) error {
metadataBucket := dbTx.Metadata()
if metadataBucket == nil {
return errors.Errorf("Metadata: unexpected nil bucket")
}
bucket1 := metadataBucket.Bucket(bucket1Key)
if bucket1 == nil {
return errors.Errorf("Bucket1: unexpected nil bucket")
}
for k, v := range storeValues {
gotVal := bucket1.Get([]byte(k))
if !reflect.DeepEqual(gotVal, []byte(v)) {
return errors.Errorf("Get: key '%s' does not "+
"match expected value - got %s, want %s",
k, gotVal, v)
}
}
genesisBlockBytes, _ := genesisBlock.Bytes()
gotBytes, err := dbTx.FetchBlock(genesisHash)
if err != nil {
return errors.Errorf("FetchBlock: unexpected error: %v",
err)
}
if !reflect.DeepEqual(gotBytes, genesisBlockBytes) {
return errors.Errorf("FetchBlock: stored block mismatch")
}
return nil
})
if err != nil {
t.Errorf("View: unexpected error: %v", err)
return
}
}
// TestInterface performs all interfaces tests for this database driver.
func TestInterface(t *testing.T) {
t.Parallel()
// Create a new database to run tests against.
dbPath := filepath.Join(os.TempDir(), "ffldb-interfacetest")
_ = os.RemoveAll(dbPath)
db, err := database.Create(dbType, dbPath, blockDataNet)
if err != nil {
t.Errorf("Failed to create test database (%s) %v", dbType, err)
return
}
defer os.RemoveAll(dbPath)
defer db.Close()
// Ensure the driver type is the expected value.
gotDbType := db.Type()
if gotDbType != dbType {
t.Errorf("Type: unepxected driver type - got %v, want %v",
gotDbType, dbType)
return
}
// Run all of the interface tests against the database.
runtime.GOMAXPROCS(runtime.NumCPU())
// Change the maximum file size to a small value to force multiple flat
// files with the test data set.
// Change maximum open files to small value to force shifts in the LRU
// mechanism
ffldb.TstRunWithMaxBlockFileSizeAndMaxOpenFiles(db, 2048, 10, func() {
testInterface(t, db)
})
}

View File

@@ -1,29 +0,0 @@
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
This test file is part of the ffldb package rather than than the ffldb_test
package so it can bridge access to the internals to properly test cases which
are either not possible or can't reliably be tested via the public interface.
The functions are only exported while the tests are being run.
*/
package ffldb
import "github.com/kaspanet/kaspad/database"
// TstRunWithMaxBlockFileSize runs the passed function with the maximum allowed
// file size for the database set to the provided value. The value will be set
// back to the original value upon completion.
func TstRunWithMaxBlockFileSizeAndMaxOpenFiles(idb database.DB, size uint32, maxOpenFiles int, fn func()) {
ffldb := idb.(*db)
origSize := ffldb.store.maxBlockFileSize
origMaxOpenFiles := ffldb.store.maxOpenFiles
ffldb.store.maxBlockFileSize = size
ffldb.store.maxOpenFiles = maxOpenFiles
fn()
ffldb.store.maxBlockFileSize = origSize
ffldb.store.maxOpenFiles = origMaxOpenFiles
}

View File

@@ -0,0 +1,229 @@
package ff
import (
"container/list"
"encoding/binary"
"fmt"
"github.com/pkg/errors"
"hash/crc32"
"os"
"path/filepath"
"sync"
)
const (
// maxOpenFiles is the max number of open files to maintain in each store's
// cache. Note that this does not include the current/write file, so there
// will typically be one more than this value open.
maxOpenFiles = 25
// maxFileSize is the maximum size for each file used to store data.
//
// NOTE: The current code uses uint32 for all offsets, so this value
// must be less than 2^32 (4 GiB). This is also why it's a typed
// constant.
maxFileSize uint32 = 512 * 1024 * 1024 // 512 MiB
)
var (
// byteOrder is the preferred byte order used through the flat files.
// Sometimes big endian will be used to allow ordered byte sortable
// integer values.
byteOrder = binary.LittleEndian
// crc32ByteOrder is the byte order used for CRC-32 checksums.
crc32ByteOrder = binary.BigEndian
// crc32ChecksumLength is the length in bytes of a CRC-32 checksum.
crc32ChecksumLength = 4
// dataLengthLength is the length in bytes of the "data length" section
// of a serialized entry in a flat file store.
dataLengthLength = 4
// castagnoli houses the Catagnoli polynomial used for CRC-32 checksums.
castagnoli = crc32.MakeTable(crc32.Castagnoli)
)
// flatFileStore houses information used to handle reading and writing data
// into flat files with support for multiple concurrent readers.
type flatFileStore struct {
// basePath is the base path used for the flat files.
basePath string
// storeName is the name of this flat-file store.
storeName string
// The following fields are related to the flat files which hold the
// actual data. The number of open files is limited by maxOpenFiles.
//
// openFilesMutex protects concurrent access to the openFiles map. It
// is a RWMutex so multiple readers can simultaneously access open
// files.
//
// openFiles houses the open file handles for existing files which have
// been opened read-only along with an individual RWMutex. This scheme
// allows multiple concurrent readers to the same file while preventing
// the file from being closed out from under them.
//
// lruMutex protects concurrent access to the least recently used list
// and lookup map.
//
// openFilesLRU tracks how the open files are referenced by pushing the
// most recently used files to the front of the list thereby trickling
// the least recently used files to end of the list. When a file needs
// to be closed due to exceeding the max number of allowed open
// files, the one at the end of the list is closed.
//
// fileNumberToLRUElement is a mapping between a specific file number and
// the associated list element on the least recently used list.
//
// Thus, with the combination of these fields, the database supports
// concurrent non-blocking reads across multiple and individual files
// along with intelligently limiting the number of open file handles by
// closing the least recently used files as needed.
//
// NOTE: The locking order used throughout is well-defined and MUST be
// followed. Failure to do so could lead to deadlocks. In particular,
// the locking order is as follows:
// 1) openFilesMutex
// 2) lruMutex
// 3) writeCursor mutex
// 4) specific file mutexes
//
// None of the mutexes are required to be locked at the same time, and
// often aren't. However, if they are to be locked simultaneously, they
// MUST be locked in the order previously specified.
//
// Due to the high performance and multi-read concurrency requirements,
// write locks should only be held for the minimum time necessary.
openFilesMutex sync.RWMutex
openFiles map[uint32]*lockableFile
lruMutex sync.Mutex
openFilesLRU *list.List // Contains uint32 file numbers.
fileNumberToLRUElement map[uint32]*list.Element
// writeCursor houses the state for the current file and location that
// new data is written to.
writeCursor *writeCursor
// isClosed is true when the store is closed. Any operations on a closed
// store will fail.
isClosed bool
}
// writeCursor represents the current file and offset of the flat file on disk
// for performing all writes. It also contains a read-write mutex to support
// multiple concurrent readers which can reuse the file handle.
type writeCursor struct {
sync.RWMutex
// currentFile is the current file that will be appended to when writing
// new data.
currentFile *lockableFile
// currentFileNumber is the current file number and is used to allow
// readers to use the same open file handle.
currentFileNumber uint32
// currentOffset is the offset in the current file where the next new
// data will be written.
currentOffset uint32
}
// openFlatFileStore returns a new flat file store with the current file number
// and offset set and all fields initialized.
func openFlatFileStore(basePath string, storeName string) (*flatFileStore, error) {
// Look for the end of the latest file to determine what the write cursor
// position is from the viewpoint of the flat files on disk.
fileNumber, fileOffset, err := findCurrentLocation(basePath, storeName)
if err != nil {
return nil, err
}
store := &flatFileStore{
basePath: basePath,
storeName: storeName,
openFiles: make(map[uint32]*lockableFile),
openFilesLRU: list.New(),
fileNumberToLRUElement: make(map[uint32]*list.Element),
writeCursor: &writeCursor{
currentFile: &lockableFile{},
currentFileNumber: fileNumber,
currentOffset: fileOffset,
},
isClosed: false,
}
return store, nil
}
func (s *flatFileStore) Close() error {
if s.isClosed {
return errors.Errorf("cannot close a closed store %s",
s.storeName)
}
s.isClosed = true
// Close the write cursor. We lock the write cursor here
// to let it finish any undergoing writing.
s.writeCursor.Lock()
defer s.writeCursor.Unlock()
err := s.writeCursor.currentFile.Close()
if err != nil {
return err
}
// Close all open files
for _, openFile := range s.openFiles {
err := openFile.Close()
if err != nil {
return err
}
}
return nil
}
func (s *flatFileStore) currentLocation() *flatFileLocation {
return &flatFileLocation{
fileNumber: s.writeCursor.currentFileNumber,
fileOffset: s.writeCursor.currentOffset,
dataLength: 0,
}
}
// findCurrentLocation searches the database directory for all flat files for a given
// store to find the end of the most recent file. This position is considered
// the current write cursor.
func findCurrentLocation(dbPath string, storeName string) (fileNumber uint32, fileLength uint32, err error) {
currentFileNumber := uint32(0)
currentFileLength := uint32(0)
for {
currentFilePath := flatFilePath(dbPath, storeName, currentFileNumber)
stat, err := os.Stat(currentFilePath)
if err != nil {
if !os.IsNotExist(err) {
return 0, 0, errors.WithStack(err)
}
if currentFileNumber > 0 {
fileNumber = currentFileNumber - 1
}
fileLength = currentFileLength
break
}
currentFileLength = uint32(stat.Size())
currentFileNumber++
}
log.Tracef("Scan for store '%s' found latest file #%d with length %d",
storeName, fileNumber, fileLength)
return fileNumber, fileLength, nil
}
// flatFilePath return the file path for the provided store's flat file number.
func flatFilePath(dbPath string, storeName string, fileNumber uint32) string {
// Choose 9 digits of precision for the filenames. 9 digits provide
// 10^9 files @ 512MiB each a total of ~476.84PiB.
fileName := fmt.Sprintf("%s-%09d.fdb", storeName, fileNumber)
return filepath.Join(dbPath, fileName)
}

View File

@@ -0,0 +1,74 @@
package ff
import (
"io/ioutil"
"reflect"
"testing"
)
func TestFlatFileStoreSanity(t *testing.T) {
// Open a test store
path, err := ioutil.TempDir("", "TestFlatFileStoreSanity")
if err != nil {
t.Fatalf("TestFlatFileStoreSanity: TempDir unexpectedly "+
"failed: %s", err)
}
name := "test"
store, err := openFlatFileStore(path, name)
if err != nil {
t.Fatalf("TestFlatFileStoreSanity: openFlatFileStore "+
"unexpectedly failed: %s", err)
}
// Write something to the store
writeData := []byte("Hello world!")
location, err := store.write(writeData)
if err != nil {
t.Fatalf("TestFlatFileStoreSanity: Write returned "+
"unexpected error: %s", err)
}
// Read from the location previously written to
readData, err := store.read(location)
if err != nil {
t.Fatalf("TestFlatFileStoreSanity: read returned "+
"unexpected error: %s", err)
}
// Make sure that the written data and the read data are equal
if !reflect.DeepEqual(readData, writeData) {
t.Fatalf("TestFlatFileStoreSanity: read data and "+
"write data are not equal. Wrote: %s, read: %s",
string(writeData), string(readData))
}
}
func TestFlatFilePath(t *testing.T) {
tests := []struct {
dbPath string
storeName string
fileNumber uint32
expectedPath string
}{
{
dbPath: "path",
storeName: "store",
fileNumber: 0,
expectedPath: "path/store-000000000.fdb",
},
{
dbPath: "path/to/database",
storeName: "blocks",
fileNumber: 123456789,
expectedPath: "path/to/database/blocks-123456789.fdb",
},
}
for _, test := range tests {
path := flatFilePath(test.dbPath, test.storeName, test.fileNumber)
if path != test.expectedPath {
t.Errorf("TestFlatFilePath: unexpected path. Want: %s, got: %s",
test.expectedPath, path)
}
}
}

View File

@@ -0,0 +1,103 @@
package ff
// FlatFileDB is a flat-file database. It supports opening
// multiple flat-file stores. See flatFileStore for further
// details.
type FlatFileDB struct {
path string
flatFileStores map[string]*flatFileStore
}
// NewFlatFileDB opens the flat-file database defined by
// the given path.
func NewFlatFileDB(path string) *FlatFileDB {
return &FlatFileDB{
path: path,
flatFileStores: make(map[string]*flatFileStore),
}
}
// Close closes the flat-file database.
func (ffdb *FlatFileDB) Close() error {
for _, store := range ffdb.flatFileStores {
err := store.Close()
if err != nil {
return err
}
}
return nil
}
// Write appends the specified data bytes to the specified store.
// It returns a serialized location handle that's meant to be
// stored and later used when querying the data that has just now
// been inserted.
// See flatFileStore.write() for further details.
func (ffdb *FlatFileDB) Write(storeName string, data []byte) ([]byte, error) {
store, err := ffdb.store(storeName)
if err != nil {
return nil, err
}
location, err := store.write(data)
if err != nil {
return nil, err
}
return serializeLocation(location), nil
}
// Read reads data from the specified flat file store at the
// location specified by the given serialized location handle.
// It returns ErrNotFound if the location does not exist.
// See flatFileStore.read() for further details.
func (ffdb *FlatFileDB) Read(storeName string, serializedLocation []byte) ([]byte, error) {
store, err := ffdb.store(storeName)
if err != nil {
return nil, err
}
location, err := deserializeLocation(serializedLocation)
if err != nil {
return nil, err
}
return store.read(location)
}
// CurrentLocation returns the serialized location handle to
// the current location within the flat file store defined
// storeName. It is mainly to be used to rollback flat-file
// stores in case of data incongruency.
func (ffdb *FlatFileDB) CurrentLocation(storeName string) ([]byte, error) {
store, err := ffdb.store(storeName)
if err != nil {
return nil, err
}
currentLocation := store.currentLocation()
return serializeLocation(currentLocation), nil
}
// Rollback truncates the flat-file store defined by the given
// storeName to the location defined by the given serialized
// location handle.
func (ffdb *FlatFileDB) Rollback(storeName string, serializedLocation []byte) error {
store, err := ffdb.store(storeName)
if err != nil {
return err
}
location, err := deserializeLocation(serializedLocation)
if err != nil {
return err
}
return store.rollback(location)
}
func (ffdb *FlatFileDB) store(storeName string) (*flatFileStore, error) {
store, ok := ffdb.flatFileStores[storeName]
if !ok {
var err error
store, err = openFlatFileStore(ffdb.path, storeName)
if err != nil {
return nil, err
}
ffdb.flatFileStores[storeName] = store
}
return store, nil
}

View File

@@ -0,0 +1,44 @@
package ff
import "github.com/pkg/errors"
// flatFileLocationSerializedSize is the size in bytes of a serialized flat
// file location. See serializeLocation for further details.
const flatFileLocationSerializedSize = 12
// flatFileLocation identifies a particular flat file location.
type flatFileLocation struct {
fileNumber uint32
fileOffset uint32
dataLength uint32
}
// serializeLocation returns the serialization of the passed flat file location
// of certain data. This to later on be used for retrieval of said data.
// The serialized location format is:
//
// [0:4] File Number (4 bytes)
// [4:8] File offset (4 bytes)
// [8:12] Data length (4 bytes)
func serializeLocation(location *flatFileLocation) []byte {
var serializedLocation [flatFileLocationSerializedSize]byte
byteOrder.PutUint32(serializedLocation[0:4], location.fileNumber)
byteOrder.PutUint32(serializedLocation[4:8], location.fileOffset)
byteOrder.PutUint32(serializedLocation[8:12], location.dataLength)
return serializedLocation[:]
}
// deserializeLocation deserializes the passed serialized flat file location.
// See serializeLocation for further details.
func deserializeLocation(serializedLocation []byte) (*flatFileLocation, error) {
if len(serializedLocation) != flatFileLocationSerializedSize {
return nil, errors.Errorf("unexpected serializedLocation length: %d",
len(serializedLocation))
}
location := &flatFileLocation{
fileNumber: byteOrder.Uint32(serializedLocation[0:4]),
fileOffset: byteOrder.Uint32(serializedLocation[4:8]),
dataLength: byteOrder.Uint32(serializedLocation[8:12]),
}
return location, nil
}

Some files were not shown because too many files have changed in this diff Show More