mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-09-15 05:50:10 +00:00

* Calculate GHOSTDAG, reachability etc for each level * Don't preallocate cache for dag stores except level 0 and reduce the number of connections in the integration test to 32 * Reduce the number of connections in the integration test to 16 * Increase page file * BuildPruningPointProof * BuildPruningPointProof * Add PruningProofManager * Implement ApplyPruningPointProof * Add prefix and fix blockAtDepth and fill headersByLevel * Some bug fixes * Include all relevant blocks for each level in the proof * Fix syncAndValidatePruningPointProof to return the right block hash * Fix block window * Fix isAncestorOfPruningPoint * Ban for rule errors on pruning proof * Find common ancestor for blockAtDepthMAtNextLevel * Use pruning proof in TestValidateAndInsertImportedPruningPoint * stage status and finality point for proof blocks * Uncomment golint * Change test timeouts * Calculate merge set for ApplyPruningPointProof * Increase test timeout * Add better caching for daa window store * Return to default timeout * Add ErrPruningProofMissesBlocksBelowPruningPoint * Add errDAAWindowBlockNotFound * Force connection loop next iteration on connection manager stop * Revert to Test64IncomingConnections * Remove BlockAtDepth from DAGTraversalManager * numBullies->16 * Set page file size to 8gb * Increase p2p max message size * Test64IncomingConnections->Test16IncomingConnections * Add comment for PruningProofM * Add comment in `func (c *ConnectionManager) Stop()` * Rename isAncestorOfPruningPoint->isAncestorOfSelectedTip * Revert page file to 16gb * Improve ExpectedHeaderPruningPoint perf * Fix comment * Revert "Improve ExpectedHeaderPruningPoint perf" This reverts commit bca1080e7140c78d510f51bbea858ae280c2f38e. * Don't test windows
79 lines
3.0 KiB
Go
79 lines
3.0 KiB
Go
package pow
|
|
|
|
import (
|
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
|
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
|
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
|
"github.com/kaspanet/kaspad/domain/consensus/utils/serialization"
|
|
"github.com/kaspanet/kaspad/util/difficulty"
|
|
|
|
"github.com/pkg/errors"
|
|
"math/big"
|
|
)
|
|
|
|
// CheckProofOfWorkWithTarget check's if the block has a valid PoW according to the provided target
|
|
// it does not check if the difficulty itself is valid or less than the maximum for the appropriate network
|
|
func CheckProofOfWorkWithTarget(header externalapi.MutableBlockHeader, target *big.Int) bool {
|
|
// The block pow must be less than the claimed target
|
|
powNum := CalculateProofOfWorkValue(header)
|
|
|
|
// The block hash must be less or equal than the claimed target.
|
|
return powNum.Cmp(target) <= 0
|
|
}
|
|
|
|
// CheckProofOfWorkByBits check's if the block has a valid PoW according to its Bits field
|
|
// it does not check if the difficulty itself is valid or less than the maximum for the appropriate network
|
|
func CheckProofOfWorkByBits(header externalapi.MutableBlockHeader) bool {
|
|
return CheckProofOfWorkWithTarget(header, difficulty.CompactToBig(header.Bits()))
|
|
}
|
|
|
|
// CalculateProofOfWorkValue hashes the given header and returns its big.Int value
|
|
func CalculateProofOfWorkValue(header externalapi.MutableBlockHeader) *big.Int {
|
|
// Zero out the time and nonce.
|
|
timestamp, nonce := header.TimeInMilliseconds(), header.Nonce()
|
|
header.SetTimeInMilliseconds(0)
|
|
header.SetNonce(0)
|
|
|
|
prePowHash := consensushashing.HeaderHash(header)
|
|
header.SetTimeInMilliseconds(timestamp)
|
|
header.SetNonce(nonce)
|
|
|
|
// PRE_POW_HASH || TIME || 32 zero byte padding || NONCE
|
|
writer := hashes.NewPoWHashWriter()
|
|
writer.InfallibleWrite(prePowHash.ByteSlice())
|
|
err := serialization.WriteElement(writer, timestamp)
|
|
if err != nil {
|
|
panic(errors.Wrap(err, "this should never happen. Hash digest should never return an error"))
|
|
}
|
|
zeroes := [32]byte{}
|
|
writer.InfallibleWrite(zeroes[:])
|
|
err = serialization.WriteElement(writer, nonce)
|
|
if err != nil {
|
|
panic(errors.Wrap(err, "this should never happen. Hash digest should never return an error"))
|
|
}
|
|
return toBig(writer.Finalize())
|
|
}
|
|
|
|
// ToBig converts a externalapi.DomainHash into a big.Int treated as a little endian string.
|
|
func toBig(hash *externalapi.DomainHash) *big.Int {
|
|
// We treat the Hash as little-endian for PoW purposes, but the big package wants the bytes in big-endian, so reverse them.
|
|
buf := hash.ByteSlice()
|
|
blen := len(buf)
|
|
for i := 0; i < blen/2; i++ {
|
|
buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i]
|
|
}
|
|
|
|
return new(big.Int).SetBytes(buf)
|
|
}
|
|
|
|
// BlockLevel returns the block level of the given header.
|
|
func BlockLevel(header externalapi.BlockHeader) int {
|
|
proofOfWorkValue := CalculateProofOfWorkValue(header.ToMutable())
|
|
for blockLevel := 0; ; blockLevel++ {
|
|
if blockLevel == constants.MaxBlockLevel || proofOfWorkValue.Bit(blockLevel+1) != 0 {
|
|
return blockLevel
|
|
}
|
|
}
|
|
}
|