mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-06-24 15:02:32 +00:00

* [NOD-540] Implement reachability (#545) * [NOD-540] Begin implementing reachability. * [NOD-540] Finish implementing reachability. * [NOD-540] Implement TestIsFutureBlock. * [NOD-540] Implement TestInsertFutureBlock. * [NOD-540] Add comments. * [NOD-540] Add comment for interval in blockNode. * [NOD-540] Updated comments over insertFutureBlock and isFutureBlock. * [NOD-540] Implement interval splitting methods. * [NOD-540] Begin implementing tree manipulation in blockNode. * [NOD-540] Implement countSubtreesUp. * [NOD-540] Add a comment explaining an impossible condition. * [NOD-540] Implement applyIntervalDown. * [NOD-540] Moved the reachability tree stuff into reachability.go. * [NOD-540] Add some comments. * [NOD-540] Add more comments, implement isInPast. * [NOD-540] Fix comments. * [NOD-540] Implement TestSplitFraction. * [NOD-540] Implement TestSplitExact. * [NOD-540] Implement TestSplit. * [NOD-540] Add comments to structs. * [NOD-540] Implement TestAddTreeChild. * [NOD-540] Fix a comment. * [NOD-540] Rename isInPast to isAncestorOf. * [NOD-540] Rename futureBlocks to futureCoveringSet. * [NOD-540] Rename isFutureBlock to isInFuture. * [NOD-540] move reachabilityInterval to the top of reachability.go. * [NOD-540] Change "s.t." to "such that" in a comment. * [NOD-540] Fix indentation. * [NOD-540] Fix a potential bug involving float inaccuracy. * [NOD-540] Wrote a more descriptive error message. * [NOD-540] Fix error messsage. * [NOD-540] Fix the recursive countSubtreesUp. * [NOD-540] Rename countSubtreesUp to countSubtrees and applyIntervalDown to propagateInterval. * [NOD-540] Implement updating reachability for a valid new block. * [NOD-540] Implement a disk storage for reachability data. * [NOD-540] Fix not all tree nodes being written to the database. * [NOD-540] Implement serialization for reachabilityData. * [NOD-540] Implement some deserialization for reachabilityData. * [NOD-540] Implement restoring the reachabilityStore on node restart. * [NOD-540] Made interval and remainingInterval pointers. * [NOD-540] Rename setTreeInterval to setInterval. * [NOD-540] Rename reindexTreeIntervals to reindexIntervals and fixed the comment above it. * [NOD-540] Expand the comment above reindexIntervals. * [NOD-540] Fix comment above countSubtrees. * [NOD-540] Fix comment above countSubtrees some more. * [NOD-540] Fix comment above split. * [NOD-540] Fix comment above isAncestorOf. * [NOD-540] Fix comment above reachabilityTreeNode. * [NOD-540] Fix weird condition in addTreeChild. * [NOD-540] Rename addTreeChild to addChild. * [NOD-540] Fix weird condition in splitFraction. * [NOD-540] Reverse the lines in reachabilityTreeNode.String(). * [NOD-540] Renamed f to fraction and x to size. * [NOD-540] Fix comment above bisect. * [NOD-540] Implement rtn.isAncestorOf(). * [NOD-540] Use treeNode isAncestorOf instead of treeInterval isAncestorOf. * [NOD-540] Use newReachabilityInterval instead of struct initialization. * [NOD-540] Make reachabilityTreeNode.String() use strings.Join. * [NOD-540] Use sync.RWMutex instead of locks.PriorityMutex. * [NOD-540] Rename thisTreeNode to newTreeNode. * [NOD-540] Rename setTreeNode to addTreeNode. * [NOD-540] Extracted selectedParentAnticone to a separate function. * [NOD-540] Rename node to this. * [NOD-540] Move updateReachability and isAncestorOf from dag.go to reachability.go. * [NOD-540] Add whitespace after multiline function signatures in reachability.go. * [NOD-540] Make splitFraction return an error on empty interval. * [NOD-540] Add a comment about rounding to splitFraction. * [NOD-540] Replace sneaky tabs with spaces. * [NOD-540] Rename split to splitExponential. * [NOD-540] Extract exponentialFractions to a separate function. * [NOD-540] Rename bisect to findIndex. * [NOD-540] Add call to reachabilityStore.clearDirtyEntries at the end of saveChangesFromBlock. * [NOD-540] Explain the dirty hack in reachabilityStore.init(). * [NOD-540] Split the function signature for deserializeReachabilityData to two lines. * [NOD-540] Add a comment about float precision loss to exponentialFractions. * [NOD-540] Corrected a comment about float precision loss to exponentialFractions. * [NOD-540] Fixed a comment about float precision loss to exponentialFractions some more. * [NOD-540] Added further comments above futureCoveringBlockSet. * [NOD-540] Rename addTreeNode to setTreeNode. * [NOD-540] Rename splitExponential to splitWithExponentialBias. * [NOD-540] Fix object references in reachabilityData deserialization (#563) * [NOD-540] Fix broken references in deserialization. * [NOD-540] Fix broken references in futureCoveringSet deserialization. Also add comments. * [NOD-540] Don't deserialize on the first pass in reachabilityStore.init(). * [NOD-540] Remove redundant assignment to loaded[hash]. * [NOD-540] Use NewHash instead of SetBytes. Rename data to destination. * [NOD-540] Preallocate futureCoveringSet. * [NOD-541] Implement GHOSTDAG (#560) * [NOD-541] Implement GHOSTDAG * [NOD-541] Replace the old PHANTOM variant with GHOSTDAG * [NOD-541] Move dag.updateReachability to the top of dag.applyDAGChanges to update reachability before the virtual block is updated * [NOD-541] Fix blueAnticoneSize * [NOD-541] Initialize node.bluesAnticoneSizes * [NOD-541] Fix pastUTXO and applyBlueBlocks blues order * [NOD-541] Add serialization logic to node.bluesAnticoneSizes * [NOD-541] Fix GHOSTDAG to not count the new block and the blue candidates anticone, add selected parent to blues, and save to node.bluesAnticoneSizes properly * [NOD-541] Fix test names in inner strings * [NOD-541] Writing TestGHOSTDAG * [NOD-541] In blueAnticoneSize change node->current * [NOD-541] name ghostdag return values * [NOD-541] fix ghostdag to return slice * [NOD-541] Split k-cluster violation rules * [NOD-541] Add missing space * [NOD-541] Add comment to ghostdag * [NOD-541] In selectedParentAnticone rename past->selectedParentPast * [NOD-541] Fix misrefernces to TestChainUpdates * [NOD-541] Fix ghostdag comment * [NOD-541] Make PrepareBlockForTest in blockdag package * [NOD-541] Make PrepareBlockForTest in blockdag package * [NOD-541] Assign to selectedParentAnticone[i] instead of appending * [NOD-541] Remove redundant forceTransactions arguments from PrepareBlockForTEST * [NOD-541] Add non-selected parents to anticoneHeap * [NOD-541] add test for ghostdag * [NOD-541] Add comments * [NOD-541] Use adjusted time for initializing blockNode * [NOD-541] Rename isAncestorOf -> isAncestorOfBlueCandidate * [NOD-541] Remove params from PrepareBlockForTest * [NOD-541] Fix TestChainHeight * [NOD-541] Remove recursive lock * [NOD-541] Fix TestTxIndexConnectBlock * [NOD-541] Fix TestBlueBlockWindow * [NOD-541] Put prepareAndProcessBlock in common_test.go * [NOD-541] Fix TestConfirmations * [NOD-541] Fix TestAcceptingBlock * [NOD-541] Fix TestDifficulty * [NOD-541] Fix TestVirtualBlock * [NOD-541] Fix TestSelectedPath * [NOD-541] Fix TestChainUpdates * [NOD-541] Shorten TestDifficulty test time * [NOD-541] Make PrepareBlockForTest use minimal valid block time * [NOD-541] Remove TODO comment * [NOD-541] Move blockdag related mining functions to mining.go * [NOD-541] Use NextBlockCoinbaseTransaction instead of NextBlockCoinbaseTransactionNoLock in NextCoinbaseFromAddress * [NOD-541] Remove useMinimalTime from BlockForMining * [NOD-541] Make MedianAdjustedTime a *BlockDAG method * [NOD-541] Fix ghostdag to use anticone slice instead of heap * [NOD-541] Fix NewBlockTemplate locks * [NOD-541] Fix ghostdag comments * [NOD-541] Convert MedianAdjustedTime to NextBlockTime * [NOD-541] Fix ghostdag comment * [NOD-541] Fix TestGHOSTDAG comment * [NOD-541] Add comment before sanity check * [NOD-541] Explicitly initialize .blues in ghostdag * [NOD-541] Rename *blockNode.lessThan to *blockNode.less * [NOD-541] Remove redundant check if block != chainBlock * [NOD-541] Fix comment * [NOD-541] Fix comment * [NOD-497] Add comment; General refactoring * [NOD-497] General refactoring. * [NOD-497] Use isAncestor of the tree rather than the node * [NOD-497] Remove reachability mutex lock as it is redundant (dag lock is held so no need); General refactoring. * [NOD-497] Update comment * [NOD-497] Undo test blocktimestamp * [NOD-497] Update comments; Use BlockNode.less for blockset; * [NOD-497] Change processBlock to return boolean and not the delay duration (merge conflict) * [NOD-497] Undo change for bluest to use less; Change blocknode less to use daghash.Less Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com> Co-authored-by: Dan Aharoni <dereeno@protonmail.com>
312 lines
8.2 KiB
Go
312 lines
8.2 KiB
Go
package blockdag
|
|
|
|
// This file functions are not considered safe for regular use, and should be used for test purposes only.
|
|
|
|
import (
|
|
"compress/bzip2"
|
|
"encoding/binary"
|
|
"github.com/kaspanet/kaspad/util"
|
|
"github.com/pkg/errors"
|
|
"io"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
"sync"
|
|
|
|
"github.com/kaspanet/kaspad/util/subnetworkid"
|
|
|
|
"github.com/kaspanet/kaspad/database"
|
|
_ "github.com/kaspanet/kaspad/database/ffldb" // blank import ffldb so that its init() function runs before tests
|
|
"github.com/kaspanet/kaspad/txscript"
|
|
"github.com/kaspanet/kaspad/util/daghash"
|
|
"github.com/kaspanet/kaspad/wire"
|
|
)
|
|
|
|
const (
|
|
// testDbType is the database backend type to use for the tests.
|
|
testDbType = "ffldb"
|
|
|
|
// testDbRoot is the root directory used to create all test databases.
|
|
testDbRoot = "testdbs"
|
|
|
|
// blockDataNet is the expected network in the test block data.
|
|
blockDataNet = wire.MainNet
|
|
)
|
|
|
|
// isSupportedDbType returns whether or not the passed database type is
|
|
// currently supported.
|
|
func isSupportedDbType(dbType string) bool {
|
|
supportedDrivers := database.SupportedDrivers()
|
|
for _, driver := range supportedDrivers {
|
|
if dbType == driver {
|
|
return true
|
|
}
|
|
}
|
|
|
|
return false
|
|
}
|
|
|
|
// FileExists returns whether or not the named file or directory exists.
|
|
func FileExists(name string) bool {
|
|
if _, err := os.Stat(name); err != nil {
|
|
if os.IsNotExist(err) {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
}
|
|
|
|
// DAGSetup is used to create a new db and DAG instance with the genesis
|
|
// block already inserted. In addition to the new DAG instance, it returns
|
|
// a teardown function the caller should invoke when done testing to clean up.
|
|
func DAGSetup(dbName string, config Config) (*BlockDAG, func(), error) {
|
|
if !isSupportedDbType(testDbType) {
|
|
return nil, nil, errors.Errorf("unsupported db type %s", testDbType)
|
|
}
|
|
|
|
var teardown func()
|
|
|
|
// To make sure that the teardown function is not called before any goroutines finished to run -
|
|
// overwrite `spawn` to count the number of running goroutines
|
|
spawnWaitGroup := sync.WaitGroup{}
|
|
realSpawn := spawn
|
|
spawn = func(f func()) {
|
|
spawnWaitGroup.Add(1)
|
|
realSpawn(func() {
|
|
f()
|
|
spawnWaitGroup.Done()
|
|
})
|
|
}
|
|
|
|
if config.DB == nil {
|
|
// Create the root directory for test databases.
|
|
if !FileExists(testDbRoot) {
|
|
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
|
|
err := errors.Errorf("unable to create test db "+
|
|
"root: %s", err)
|
|
return nil, nil, err
|
|
}
|
|
}
|
|
|
|
dbPath := filepath.Join(testDbRoot, dbName)
|
|
_ = os.RemoveAll(dbPath)
|
|
var err error
|
|
config.DB, err = database.Create(testDbType, dbPath, blockDataNet)
|
|
if err != nil {
|
|
return nil, nil, errors.Errorf("error creating db: %s", err)
|
|
}
|
|
|
|
// Setup a teardown function for cleaning up. This function is
|
|
// returned to the caller to be invoked when it is done testing.
|
|
teardown = func() {
|
|
spawnWaitGroup.Wait()
|
|
spawn = realSpawn
|
|
config.DB.Close()
|
|
os.RemoveAll(dbPath)
|
|
os.RemoveAll(testDbRoot)
|
|
}
|
|
} else {
|
|
teardown = func() {
|
|
spawnWaitGroup.Wait()
|
|
spawn = realSpawn
|
|
config.DB.Close()
|
|
}
|
|
}
|
|
|
|
config.TimeSource = NewMedianTime()
|
|
config.SigCache = txscript.NewSigCache(1000)
|
|
|
|
// Create the DAG instance.
|
|
dag, err := New(&config)
|
|
if err != nil {
|
|
teardown()
|
|
err := errors.Errorf("failed to create dag instance: %s", err)
|
|
return nil, nil, err
|
|
}
|
|
return dag, teardown, nil
|
|
}
|
|
|
|
// OpTrueScript is script returning TRUE
|
|
var OpTrueScript = []byte{txscript.OpTrue}
|
|
|
|
type txSubnetworkData struct {
|
|
subnetworkID *subnetworkid.SubnetworkID
|
|
Gas uint64
|
|
Payload []byte
|
|
}
|
|
|
|
func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, subnetworkData *txSubnetworkData) *wire.MsgTx {
|
|
txIns := []*wire.TxIn{}
|
|
txOuts := []*wire.TxOut{}
|
|
|
|
for i := uint32(0); i < numInputs; i++ {
|
|
txIns = append(txIns, &wire.TxIn{
|
|
PreviousOutpoint: *wire.NewOutpoint(&daghash.TxID{}, i),
|
|
SignatureScript: []byte{},
|
|
Sequence: wire.MaxTxInSequenceNum,
|
|
})
|
|
}
|
|
|
|
for i := uint32(0); i < numOutputs; i++ {
|
|
txOuts = append(txOuts, &wire.TxOut{
|
|
ScriptPubKey: OpTrueScript,
|
|
Value: outputValue,
|
|
})
|
|
}
|
|
|
|
if subnetworkData != nil {
|
|
return wire.NewSubnetworkMsgTx(wire.TxVersion, txIns, txOuts, subnetworkData.subnetworkID, subnetworkData.Gas, subnetworkData.Payload)
|
|
}
|
|
|
|
return wire.NewNativeMsgTx(wire.TxVersion, txIns, txOuts)
|
|
}
|
|
|
|
// VirtualForTest is an exported version for virtualBlock, so that it can be returned by exported test_util methods
|
|
type VirtualForTest *virtualBlock
|
|
|
|
// SetVirtualForTest replaces the dag's virtual block. This function is used for test purposes only
|
|
func SetVirtualForTest(dag *BlockDAG, virtual VirtualForTest) VirtualForTest {
|
|
oldVirtual := dag.virtual
|
|
dag.virtual = virtual
|
|
return VirtualForTest(oldVirtual)
|
|
}
|
|
|
|
// GetVirtualFromParentsForTest generates a virtual block with the given parents.
|
|
func GetVirtualFromParentsForTest(dag *BlockDAG, parentHashes []*daghash.Hash) (VirtualForTest, error) {
|
|
parents := newSet()
|
|
for _, hash := range parentHashes {
|
|
parent := dag.index.LookupNode(hash)
|
|
if parent == nil {
|
|
return nil, errors.Errorf("GetVirtualFromParentsForTest: didn't found node for hash %s", hash)
|
|
}
|
|
parents.add(parent)
|
|
}
|
|
virtual := newVirtualBlock(dag, parents)
|
|
|
|
pastUTXO, _, err := dag.pastUTXO(&virtual.blockNode)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
diffUTXO := pastUTXO.clone().(*DiffUTXOSet)
|
|
err = diffUTXO.meldToBase()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
virtual.utxoSet = diffUTXO.base
|
|
|
|
return VirtualForTest(virtual), nil
|
|
}
|
|
|
|
// LoadBlocks reads files containing kaspa gzipped block data from disk
|
|
// and returns them as an array of util.Block.
|
|
func LoadBlocks(filename string) (blocks []*util.Block, err error) {
|
|
var network = wire.MainNet
|
|
var dr io.Reader
|
|
var fi io.ReadCloser
|
|
|
|
fi, err = os.Open(filename)
|
|
if err != nil {
|
|
return
|
|
}
|
|
|
|
if strings.HasSuffix(filename, ".bz2") {
|
|
dr = bzip2.NewReader(fi)
|
|
} else {
|
|
dr = fi
|
|
}
|
|
defer fi.Close()
|
|
|
|
var block *util.Block
|
|
|
|
err = nil
|
|
for height := uint64(0); err == nil; height++ {
|
|
var rintbuf uint32
|
|
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
|
if err == io.EOF {
|
|
// hit end of file at expected offset: no warning
|
|
height--
|
|
err = nil
|
|
break
|
|
}
|
|
if err != nil {
|
|
break
|
|
}
|
|
if rintbuf != uint32(network) {
|
|
break
|
|
}
|
|
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
|
blocklen := rintbuf
|
|
|
|
rbytes := make([]byte, blocklen)
|
|
|
|
// read block
|
|
dr.Read(rbytes)
|
|
|
|
block, err = util.NewBlockFromBytes(rbytes)
|
|
if err != nil {
|
|
return
|
|
}
|
|
blocks = append(blocks, block)
|
|
}
|
|
|
|
return
|
|
}
|
|
|
|
// opTrueAddress returns an address pointing to a P2SH anyone-can-spend script
|
|
func opTrueAddress(prefix util.Bech32Prefix) (util.Address, error) {
|
|
return util.NewAddressScriptHash(OpTrueScript, prefix)
|
|
}
|
|
|
|
// PrepareBlockForTest generates a block with the proper merkle roots, coinbase transaction etc. This function is used for test purposes only
|
|
func PrepareBlockForTest(dag *BlockDAG, parentHashes []*daghash.Hash, transactions []*wire.MsgTx) (*wire.MsgBlock, error) {
|
|
newVirtual, err := GetVirtualFromParentsForTest(dag, parentHashes)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
oldVirtual := SetVirtualForTest(dag, newVirtual)
|
|
defer SetVirtualForTest(dag, oldVirtual)
|
|
|
|
OpTrueAddr, err := opTrueAddress(dag.dagParams.Prefix)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
blockTransactions := make([]*util.Tx, len(transactions)+1)
|
|
|
|
extraNonce := generateDeterministicExtraNonceForTest()
|
|
coinbasePayloadExtraData, err := CoinbasePayloadExtraData(extraNonce, "")
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
blockTransactions[0], err = dag.NextCoinbaseFromAddress(OpTrueAddr, coinbasePayloadExtraData)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
for i, tx := range transactions {
|
|
blockTransactions[i+1] = util.NewTx(tx)
|
|
}
|
|
|
|
block, err := dag.BlockForMining(blockTransactions)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
block.Header.Timestamp = dag.NextBlockMinimumTime()
|
|
block.Header.Bits = dag.NextRequiredDifficulty(block.Header.Timestamp)
|
|
|
|
return block, nil
|
|
}
|
|
|
|
// generateDeterministicExtraNonceForTest returns a unique deterministic extra nonce for coinbase data, in order to create unique coinbase transactions.
|
|
func generateDeterministicExtraNonceForTest() uint64 {
|
|
extraNonceForTest++
|
|
return extraNonceForTest
|
|
}
|
|
|
|
func resetExtraNonceForTest() {
|
|
extraNonceForTest = 0
|
|
}
|
|
|
|
var extraNonceForTest = uint64(0)
|