Compare commits

..

2 Commits

Author SHA1 Message Date
stasatdaglabs
a388112e86 Merge remote-tracking branch 'origin/v0.11.0-dev' into mainnet-dnsseeder 2021-11-07 10:21:19 +02:00
stasatdaglabs
49285d64f2 Add the daglabs-dev mainnet dnsseeder. 2021-11-07 10:02:43 +02:00
92 changed files with 808 additions and 1776 deletions

View File

@@ -1,7 +1,7 @@
name: Build and upload assets
on:
release:
types: [ published ]
types: [published]
jobs:
build:
@@ -36,7 +36,7 @@ jobs:
# `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
# `-s -w` strips the binary to produce smaller size binaries
run: |
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ . ./cmd/...
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./...
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
zip -r "${archive}" ./bin/*
@@ -47,7 +47,7 @@ jobs:
if: runner.os == 'Windows'
shell: bash
run: |
go build -v -ldflags="-s -w" -o bin/ . ./cmd/...
go build -v -ldflags="-s -w" -o bin/ ./...
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
powershell "Compress-Archive bin/* \"${archive}\""
@@ -57,7 +57,7 @@ jobs:
- name: Build on MacOS
if: runner.os == 'macOS'
run: |
go build -v -ldflags="-s -w" -o ./bin/ . ./cmd/...
go build -v -ldflags="-s -w" -o ./bin/ ./...
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
zip -r "${archive}" ./bin/*

View File

@@ -46,4 +46,4 @@ jobs:
run: |
git checkout "${{ env.run_on }}"
git status
go test -timeout 20m -race ./...
go test -race ./...

View File

@@ -20,10 +20,7 @@ import (
"github.com/kaspanet/kaspad/version"
)
const (
leveldbCacheSizeMiB = 256
defaultDataDirname = "datadir2"
)
const leveldbCacheSizeMiB = 256
var desiredLimits = &limits.DesiredLimits{
FileLimitWant: 2048,
@@ -162,7 +159,7 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
// dbPath returns the path to the block database given a database type.
func databasePath(cfg *config.Config) string {
return filepath.Join(cfg.AppDir, defaultDataDirname)
return filepath.Join(cfg.AppDir, "data")
}
func removeDatabase(cfg *config.Config) error {

View File

@@ -12,7 +12,7 @@ import (
const (
// ProtocolVersion is the latest protocol version this package supports.
ProtocolVersion uint32 = 3
ProtocolVersion uint32 = 1
// DefaultServices describes the default services that are supported by
// the server.

View File

@@ -8,7 +8,7 @@ import (
// DefaultTimeout is the default duration to wait for enqueuing/dequeuing
// to/from routes.
const DefaultTimeout = 120 * time.Second
const DefaultTimeout = 30 * time.Second
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")

View File

@@ -3,12 +3,8 @@ package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"runtime"
"sync/atomic"
)
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
@@ -16,80 +12,51 @@ type PruningPointAndItsAnticoneRequestsContext interface {
Domain() domain.Domain
}
var isBusy uint32
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
// the pruning point and its anticone to the requesting peer.
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
err := func() error {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
}
defer atomic.StoreUint32(&isBusy, 0)
log.Debugf("Got request for pruning point and its anticone from %s", peer)
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
if err != nil {
return err
}
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
for i, header := range pruningPointHeaders {
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
}
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
if err != nil {
return err
}
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
if err != nil {
return err
}
for _, blockHash := range pointAndItsAnticone {
err := sendBlockWithTrustedData(context, outgoingRoute, blockHash)
if err != nil {
return err
}
}
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
if err != nil {
return err
}
log.Debugf("Sent pruning point and its anticone to %s", peer)
return nil
}()
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
log.Debugf("Got request for pruning point and its anticone from %s", peer)
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
if err != nil {
return err
}
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
for i, header := range pruningPointHeaders {
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
}
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
if err != nil {
return err
}
blocks, err := context.Domain().Consensus().PruningPointAndItsAnticoneWithTrustedData()
if err != nil {
return err
}
for _, block := range blocks {
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(block))
if err != nil {
return err
}
}
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
if err != nil {
return err
}
log.Debugf("Sent pruning point and its anticone to %s", peer)
}
}
func sendBlockWithTrustedData(context PruningPointAndItsAnticoneRequestsContext, outgoingRoute *router.Route, blockHash *externalapi.DomainHash) error {
blockWithTrustedData, err := context.Domain().Consensus().BlockWithTrustedData(blockHash)
if err != nil {
return err
}
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(blockWithTrustedData))
if err != nil {
return err
}
runtime.GC()
return nil
}

View File

@@ -70,8 +70,7 @@ func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXO
}
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
if !ok {
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
}
return msgRequestPruningPointUTXOSet, nil
@@ -103,17 +102,14 @@ func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
return err
}
finished := len(pruningPointUTXOs) < step
if finished && chunksSent%ibdBatchSize != 0 {
if len(pruningPointUTXOs) < step && chunksSent%ibdBatchSize == 0 {
log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSet.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
}
if len(pruningPointUTXOs) > 0 {
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
}
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
chunksSent++
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
@@ -124,17 +120,9 @@ func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
}
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
if !ok {
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return protocolerrors.Errorf(false, "received unexpected message type. "+
return protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
}
if finished {
log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSet.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
}
}
}
}

View File

@@ -130,7 +130,7 @@ func (flow *handleRelayInvsFlow) downloadHeadersAndPruningUTXOSet(highHash *exte
return err
}
log.Infof("Headers downloaded from peer %s", flow.peer)
log.Debugf("Headers downloaded from peer %s", flow.peer)
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
if err != nil {

View File

@@ -15,7 +15,7 @@ import (
// maxProtocolVersion version is the maximum supported protocol
// version this kaspad node supports
const maxProtocolVersion = 3
const maxProtocolVersion = 1
// Peer holds data about a peer.
type Peer struct {

View File

@@ -102,7 +102,7 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection, outgoingRoute *routerpkg.Route) {
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
if m.context.Config().EnableBanning && protocolErr.ShouldBan {
if !m.context.Config().DisableBanning && protocolErr.ShouldBan {
log.Warnf("Banning %s (reason: %s)", netConnection, protocolErr.Cause)
err := m.context.ConnectionManager().Ban(netConnection)

View File

@@ -14,14 +14,9 @@ import (
func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage)
isSynced, err := context.ProtocolManager.ShouldMine()
if err != nil {
return nil, err
}
if !context.Config.AllowSubmitBlockWhenNotSynced && !isSynced {
if context.ProtocolManager.IsIBDRunning() {
return &appmessage.SubmitBlockResponseMessage{
Error: appmessage.RPCErrorf("Block not submitted - node is not synced"),
Error: appmessage.RPCErrorf("Block not submitted - IBD is running"),
RejectReason: appmessage.RejectReasonIsInIBD,
}, nil
}

View File

@@ -15,11 +15,13 @@ golint -set_exit_status ./...
staticcheck -checks SA4006,SA4008,SA4009,SA4010,SA5003,SA1004,SA1014,SA1021,SA1023,SA1024,SA1025,SA1026,SA1027,SA1028,SA2000,SA2001,SA2003,SA4000,SA4001,SA4003,SA4004,SA4011,SA4012,SA4013,SA4014,SA4015,SA4016,SA4017,SA4018,SA4019,SA4020,SA4021,SA4022,SA4023,SA5000,SA5002,SA5004,SA5005,SA5007,SA5008,SA5009,SA5010,SA5011,SA5012,SA6001,SA6002,SA9001,SA9002,SA9003,SA9004,SA9005,SA9006,ST1019 ./...
go vet -composites=false $FLAGS ./...
go build $FLAGS -o kaspad .
if [ -n "${NO_PARALLEL}" ]
then
go test -timeout 20m -parallel=1 $FLAGS ./...
go test -parallel=1 $FLAGS ./...
else
go test -timeout 20m $FLAGS ./...
go test $FLAGS ./...
fi

View File

@@ -1,15 +1,3 @@
Kaspad v0.11.2 - 2021-11-11
===========================
Bug fixes:
* Enlarge p2p max message size to 1gb
* Fix UTXO chunks logic
* Increase tests timeout to 20 minutes
Kaspad v0.11.1 - 2021-11-09
===========================
Non-breaking changes:
* Cache the miner state
Kaspad v0.10.2 - 2021-05-18
===========================
Non-breaking changes:

View File

@@ -13,6 +13,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/pkg/errors"
)
@@ -142,20 +143,20 @@ func mineNextBlock(mineWhenNotSynced bool) *externalapi.DomainBlock {
// In the rare case where the nonce space is exhausted for a specific
// block, it'll keep looping the nonce until a new block template
// is discovered.
block, state := getBlockForMining(mineWhenNotSynced)
state.Nonce = nonce
block := getBlockForMining(mineWhenNotSynced)
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
headerForMining := block.Header.ToMutable()
headerForMining.SetNonce(nonce)
atomic.AddUint64(&hashesTried, 1)
if state.CheckProofOfWork() {
mutHeader := block.Header.ToMutable()
mutHeader.SetNonce(nonce)
block.Header = mutHeader.ToImmutable()
if pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
block.Header = headerForMining.ToImmutable()
log.Infof("Found block %s with parents %s", consensushashing.BlockHash(block), block.Header.DirectParents())
return block
}
}
}
func getBlockForMining(mineWhenNotSynced bool) (*externalapi.DomainBlock, *pow.State) {
func getBlockForMining(mineWhenNotSynced bool) *externalapi.DomainBlock {
tryCount := 0
const sleepTime = 500 * time.Millisecond
@@ -165,7 +166,7 @@ func getBlockForMining(mineWhenNotSynced bool) (*externalapi.DomainBlock, *pow.S
tryCount++
shouldLog := (tryCount-1)%10 == 0
template, state, isSynced := templatemanager.Get()
template, isSynced := templatemanager.Get()
if template == nil {
if shouldLog {
log.Info("Waiting for the initial template")
@@ -181,7 +182,7 @@ func getBlockForMining(mineWhenNotSynced bool) (*externalapi.DomainBlock, *pow.S
continue
}
return template, state
return template
}
}

View File

@@ -3,26 +3,23 @@ package templatemanager
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"sync"
)
var currentTemplate *externalapi.DomainBlock
var currentState *pow.State
var isSynced bool
var lock = &sync.Mutex{}
// Get returns the template to work on
func Get() (*externalapi.DomainBlock, *pow.State, bool) {
func Get() (*externalapi.DomainBlock, bool) {
lock.Lock()
defer lock.Unlock()
// Shallow copy the block so when the user replaces the header it won't affect the template here.
if currentTemplate == nil {
return nil, nil, false
return nil, false
}
block := *currentTemplate
state := *currentState
return &block, &state, isSynced
return &block, isSynced
}
// Set sets the current template to work on
@@ -34,7 +31,6 @@ func Set(template *appmessage.GetBlockTemplateResponseMessage) error {
lock.Lock()
defer lock.Unlock()
currentTemplate = block
currentState = pow.NewState(block.Header.ToMutable())
isSynced = template.IsSynced
return nil
}

View File

@@ -56,7 +56,6 @@ func create(conf *createConfig) error {
}
file := keys.File{
Version: keys.LastVersion,
EncryptedMnemonics: encryptedMnemonics,
ExtendedPublicKeys: extendedPublicKeys,
MinimumSignatures: conf.MinimumSignatures,

View File

@@ -19,7 +19,7 @@ func Connect(address string) (pb.KaspawalletdClient, func(), error) {
conn, err := grpc.DialContext(ctx, address, grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
if errors.Is(err, context.DeadlineExceeded) {
return nil, nil, errors.New("kaspawallet daemon is not running, start it with `kaspawallet start-daemon`")
return nil, nil, errors.New("kaspactl daemon is not running, start it with `kaspactl start-daemon`")
}
return nil, nil, err
}

View File

@@ -97,7 +97,7 @@ func encryptMnemonic(mnemonic string, password []byte) (*EncryptedMnemonic, erro
return nil, err
}
aead, err := getAEAD(defaultNumThreads, password, salt)
aead, err := getAEAD(password, salt)
if err != nil {
return nil, err
}

View File

@@ -10,7 +10,6 @@ import (
"os"
"path/filepath"
"runtime"
"strings"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/util"
@@ -23,9 +22,6 @@ var (
defaultAppDir = util.AppDir("kaspawallet", false)
)
// LastVersion is the most up to date file format version
const LastVersion = 1
func defaultKeysFile(netParams *dagconfig.Params) string {
return filepath.Join(defaultAppDir, netParams.Name, "keys.json")
}
@@ -36,8 +32,6 @@ type encryptedPrivateKeyJSON struct {
}
type keysFileJSON struct {
Version uint32 `json:"version"`
NumThreads uint8 `json:"numThreads,omitempty"` // This field is ignored for versions different from 0. See more details at the function `numThreads`.
EncryptedPrivateKeys []*encryptedPrivateKeyJSON `json:"encryptedMnemonics"`
ExtendedPublicKeys []string `json:"publicKeys"`
MinimumSignatures uint32 `json:"minimumSignatures"`
@@ -55,8 +49,6 @@ type EncryptedMnemonic struct {
// File holds all the data related to the wallet keys
type File struct {
Version uint32
NumThreads uint8 // This field is ignored for versions different than 0
EncryptedMnemonics []*EncryptedMnemonic
ExtendedPublicKeys []string
MinimumSignatures uint32
@@ -77,8 +69,6 @@ func (d *File) toJSON() *keysFileJSON {
}
return &keysFileJSON{
Version: d.Version,
NumThreads: d.NumThreads,
EncryptedPrivateKeys: encryptedPrivateKeysJSON,
ExtendedPublicKeys: d.ExtendedPublicKeys,
MinimumSignatures: d.MinimumSignatures,
@@ -90,8 +80,6 @@ func (d *File) toJSON() *keysFileJSON {
}
func (d *File) fromJSON(fileJSON *keysFileJSON) error {
d.Version = fileJSON.Version
d.NumThreads = fileJSON.NumThreads
d.MinimumSignatures = fileJSON.MinimumSignatures
d.ECDSA = fileJSON.ECDSA
d.ExtendedPublicKeys = fileJSON.ExtendedPublicKeys
@@ -193,20 +181,10 @@ func (d *File) DecryptMnemonics(cmdLinePassword string) ([]string, error) {
if len(password) == 0 {
password = getPassword("Password:")
}
var numThreads uint8
if len(d.EncryptedMnemonics) > 0 {
var err error
numThreads, err = d.numThreads(password)
if err != nil {
return nil, err
}
}
privateKeys := make([]string, len(d.EncryptedMnemonics))
for i, encryptedPrivateKey := range d.EncryptedMnemonics {
var err error
privateKeys[i], err = decryptMnemonic(numThreads, encryptedPrivateKey, password)
privateKeys[i], err = decryptMnemonic(encryptedPrivateKey, password)
if err != nil {
return nil, err
}
@@ -300,73 +278,13 @@ func (d *File) Save() error {
return nil
}
const defaultNumThreads = 8
func (d *File) numThreads(password []byte) (uint8, error) {
// There's a bug in v0 wallets where the number of threads
// was determined by the number of logical CPUs at the machine,
// which made the authentication non-deterministic across platforms.
// In order to solve it we introduce v1 where the number of threads
// is constant, and brute force the number of threads in v0. After we
// find the right amount via brute force we save the result to the file.
if d.Version != 0 {
return defaultNumThreads, nil
}
if d.NumThreads != 0 {
return d.NumThreads, nil
}
numThreads, err := d.detectNumThreads(password, d.EncryptedMnemonics[0].salt)
if err != nil {
return 0, err
}
d.NumThreads = numThreads
err = d.Save()
if err != nil {
return 0, err
}
return numThreads, nil
}
func (d *File) detectNumThreads(password, salt []byte) (uint8, error) {
numCPU := uint8(runtime.NumCPU())
_, err := getAEAD(numCPU, password, salt)
if err != nil {
if !strings.Contains(err.Error(), "message authentication failed") {
return 0, err
}
} else {
return numCPU, nil
}
for i := uint8(1); ; i++ {
if i == numCPU {
continue
}
_, err := getAEAD(i, password, salt)
if err != nil {
const maxTries = 32
if i > maxTries || !strings.Contains(err.Error(), "message authentication failed") {
return 0, err
}
} else {
return i, nil
}
}
}
func getAEAD(threads uint8, password, salt []byte) (cipher.AEAD, error) {
key := argon2.IDKey(password, salt, 1, 64*1024, threads, 32)
func getAEAD(password, salt []byte) (cipher.AEAD, error) {
key := argon2.IDKey(password, salt, 1, 64*1024, uint8(runtime.NumCPU()), 32)
return chacha20poly1305.NewX(key)
}
func decryptMnemonic(numThreads uint8, encryptedPrivateKey *EncryptedMnemonic, password []byte) (string, error) {
aead, err := getAEAD(numThreads, password, encryptedPrivateKey.salt)
func decryptMnemonic(encryptedPrivateKey *EncryptedMnemonic, password []byte) (string, error) {
aead, err := getAEAD(password, encryptedPrivateKey.salt)
if err != nil {
return "", err
}

View File

@@ -137,18 +137,11 @@ func (s *consensus) Init(skipAddingGenesis bool) error {
return nil
}
func (s *consensus) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) {
func (s *consensus) PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error) {
s.lock.Lock()
defer s.lock.Unlock()
return s.pruningManager.PruningPointAndItsAnticone()
}
func (s *consensus) BlockWithTrustedData(blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
s.lock.Lock()
defer s.lock.Unlock()
return s.pruningManager.BlockWithTrustedData(model.NewStagingArea(), blockHash)
return s.pruningManager.PruningPointAndItsAnticoneWithTrustedData()
}
// BuildBlock builds a block over the current state, with the transactions
@@ -723,10 +716,7 @@ func (s *consensus) PopulateMass(transaction *externalapi.DomainTransaction) {
func (s *consensus) ResolveVirtual() error {
// In order to prevent a situation that the consensus lock is held for too much time, we
// release the lock each time resolve 100 blocks.
for i := 0; ; i++ {
if i%10 == 0 {
log.Infof("Resolving virtual. This may take some time...")
}
for {
var isCompletelyResolved bool
var err error
func() {
@@ -740,7 +730,6 @@ func (s *consensus) ResolveVirtual() error {
}
if isCompletelyResolved {
log.Infof("Resolved virtual")
return nil
}
}

View File

@@ -6,9 +6,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/util/staging"
"github.com/pkg/errors"
)
var reachabilityDataBucketName = []byte("reachability-data")
@@ -52,8 +50,6 @@ func (rds *reachabilityDataStore) IsStaged(stagingArea *model.StagingArea) bool
return rds.stagingShard(stagingArea).isStaged()
}
var errNotFound = errors.Wrap(database.ErrNotFound, "reachability data not found")
// ReachabilityData returns the reachabilityData associated with the given blockHash
func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.ReachabilityData, error) {
stagingShard := rds.stagingShard(stagingArea)
@@ -63,16 +59,10 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, sta
}
if reachabilityData, ok := rds.reachabilityDataCache.Get(blockHash); ok {
if reachabilityData == nil {
return nil, errNotFound
}
return reachabilityData.(model.ReachabilityData), nil
}
reachabilityDataBytes, err := dbContext.Get(rds.reachabilityDataBlockHashAsKey(blockHash))
if database.IsNotFoundError(err) {
rds.reachabilityDataCache.Add(blockHash, nil)
}
if err != nil {
return nil, err
}
@@ -86,15 +76,17 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, sta
}
func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) {
_, err := rds.ReachabilityData(dbContext, stagingArea, blockHash)
if database.IsNotFoundError(err) {
return false, nil
}
if err != nil {
return false, err
stagingShard := rds.stagingShard(stagingArea)
if _, ok := stagingShard.reachabilityData[*blockHash]; ok {
return true, nil
}
return true, nil
if rds.reachabilityDataCache.Has(blockHash) {
return true, nil
}
return dbContext.Has(rds.reachabilityDataBlockHashAsKey(blockHash))
}
// ReachabilityReindexRoot returns the current reachability reindex root

View File

@@ -4,7 +4,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
parentssanager "github.com/kaspanet/kaspad/domain/consensus/processes/parentsmanager"
"github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"io/ioutil"
@@ -159,16 +158,12 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
dagTraversalManager := dagTraversalManagers[0]
// Processes
parentsManager := parentssanager.New(config.GenesisHash)
blockParentBuilder := blockparentbuilder.New(
dbManager,
blockHeaderStore,
dagTopologyManager,
parentsManager,
reachabilityDataStore,
pruningStore,
config.GenesisHash,
)
pastMedianTimeManager := f.pastMedianTimeConsructor(
config.TimestampDeviationTolerance,
@@ -321,7 +316,6 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
finalityManager,
blockParentBuilder,
pruningManager,
parentsManager,
pruningStore,
blockStore,
@@ -409,7 +403,6 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
ghostdagManagers,
reachabilityManagers,
dagTraversalManagers,
parentsManager,
ghostdagDataStores,
pruningStore,
@@ -588,7 +581,7 @@ func dagStores(config *Config,
ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, ghostdagDataCacheSize, preallocateCaches)
} else {
blockRelationStores[i] = blockrelationstore.New(prefixBucket, 200, false)
reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, 86400, false)
reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, 200, false)
ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, 200, false)
}
}

View File

@@ -16,8 +16,8 @@ import (
func TestFinality(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
// Set finalityInterval to 20 blocks, so that test runs quickly
consensusConfig.FinalityDuration = 20 * consensusConfig.TargetTimePerBlock
// Set finalityInterval to 50 blocks, so that test runs quickly
consensusConfig.FinalityDuration = 50 * consensusConfig.TargetTimePerBlock
factory := consensus.NewFactory()
consensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestFinality")
@@ -180,8 +180,7 @@ func TestFinality(t *testing.T) {
func TestBoundedMergeDepth(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
// Set finalityInterval to 50 blocks, so that test runs quickly
consensusConfig.K = 5
consensusConfig.FinalityDuration = 7 * consensusConfig.TargetTimePerBlock
consensusConfig.FinalityDuration = 50 * consensusConfig.TargetTimePerBlock
finalityInterval := int(consensusConfig.FinalityDepth())
if int(consensusConfig.K) >= finalityInterval {

View File

@@ -58,6 +58,7 @@ type BlockHeader interface {
type BaseBlockHeader interface {
Version() uint16
Parents() []BlockLevelParents
ParentsAtLevel(level int) BlockLevelParents
DirectParents() BlockLevelParents
HashMerkleRoot() *DomainHash
AcceptedIDMerkleRoot() *DomainHash
@@ -69,7 +70,6 @@ type BaseBlockHeader interface {
BlueScore() uint64
BlueWork() *big.Int
PruningPoint() *DomainHash
BlockLevel() int
Equal(other BaseBlockHeader) bool
}

View File

@@ -25,8 +25,7 @@ type Consensus interface {
GetVirtualUTXOs(expectedVirtualParents []*DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error)
PruningPoint() (*DomainHash, error)
PruningPointHeaders() ([]BlockHeader, error)
PruningPointAndItsAnticone() ([]*DomainHash, error)
BlockWithTrustedData(blockHash *DomainHash) (*BlockWithTrustedData, error)
PruningPointAndItsAnticoneWithTrustedData() ([]*BlockWithTrustedData, error)
ClearImportedPruningPointData() error
AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) error
ValidateAndInsertImportedPruningPoint(newPruningPoint *DomainHash) error

View File

@@ -5,7 +5,5 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// BlockParentBuilder exposes a method to build super-block parents for
// a given set of direct parents
type BlockParentBuilder interface {
BuildParents(stagingArea *StagingArea,
daaScore uint64,
directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error)
BuildParents(stagingArea *StagingArea, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error)
}

View File

@@ -1,9 +0,0 @@
package model
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// ParentsManager lets is a wrapper above header parents that replaces empty parents with genesis when needed.
type ParentsManager interface {
ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents
Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents
}

View File

@@ -12,7 +12,6 @@ type PruningManager interface {
AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error
UpdatePruningPointIfRequired() error
PruneAllBlocksBelow(stagingArea *StagingArea, pruningPointHash *externalapi.DomainHash) error
PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error)
PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error)
ExpectedHeaderPruningPoint(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error)
BlockWithTrustedData(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error)
}

View File

@@ -183,16 +183,10 @@ func (bb *blockBuilder) newBlockCoinbaseTransaction(stagingArea *model.StagingAr
func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions []*externalapi.DomainTransaction,
newBlockPruningPoint *externalapi.DomainHash) (externalapi.BlockHeader, error) {
daaScore, err := bb.newBlockDAAScore(stagingArea)
parents, err := bb.newBlockParents(stagingArea)
if err != nil {
return nil, err
}
parents, err := bb.newBlockParents(stagingArea, daaScore)
if err != nil {
return nil, err
}
timeInMilliseconds, err := bb.newBlockTime(stagingArea)
if err != nil {
return nil, err
@@ -210,6 +204,10 @@ func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions
if err != nil {
return nil, err
}
daaScore, err := bb.newBlockDAAScore(stagingArea)
if err != nil {
return nil, err
}
blueWork, err := bb.newBlockBlueWork(stagingArea)
if err != nil {
return nil, err
@@ -235,12 +233,12 @@ func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions
), nil
}
func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea, daaScore uint64) ([]externalapi.BlockLevelParents, error) {
func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea) ([]externalapi.BlockLevelParents, error) {
virtualBlockRelations, err := bb.blockRelationStore.BlockRelation(bb.databaseContext, stagingArea, model.VirtualBlockHash)
if err != nil {
return nil, err
}
return bb.blockParentBuilder.BuildParents(stagingArea, daaScore, virtualBlockRelations.Parents)
return bb.blockParentBuilder.BuildParents(stagingArea, virtualBlockRelations.Parents)
}
func (bb *blockBuilder) newBlockTime(stagingArea *model.StagingArea) (int64, error) {

View File

@@ -83,7 +83,7 @@ func (bb *testBlockBuilder) buildUTXOInvalidHeader(stagingArea *model.StagingAre
return nil, err
}
parents, err := bb.blockParentBuilder.BuildParents(stagingArea, daaScore, parentHashes)
parents, err := bb.blockParentBuilder.BuildParents(stagingArea, parentHashes)
if err != nil {
return nil, err
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/pkg/errors"
)
@@ -12,11 +13,8 @@ type blockParentBuilder struct {
databaseContext model.DBManager
blockHeaderStore model.BlockHeaderStore
dagTopologyManager model.DAGTopologyManager
parentsManager model.ParentsManager
reachabilityDataStore model.ReachabilityDataStore
pruningStore model.PruningStore
genesisHash *externalapi.DomainHash
}
// New creates a new instance of a BlockParentBuilder
@@ -24,27 +22,20 @@ func New(
databaseContext model.DBManager,
blockHeaderStore model.BlockHeaderStore,
dagTopologyManager model.DAGTopologyManager,
parentsManager model.ParentsManager,
reachabilityDataStore model.ReachabilityDataStore,
pruningStore model.PruningStore,
genesisHash *externalapi.DomainHash,
) model.BlockParentBuilder {
return &blockParentBuilder{
databaseContext: databaseContext,
blockHeaderStore: blockHeaderStore,
dagTopologyManager: dagTopologyManager,
parentsManager: parentsManager,
databaseContext: databaseContext,
blockHeaderStore: blockHeaderStore,
dagTopologyManager: dagTopologyManager,
reachabilityDataStore: reachabilityDataStore,
pruningStore: pruningStore,
genesisHash: genesisHash,
}
}
func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
daaScore uint64, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) {
directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) {
// Late on we'll mutate direct parent hashes, so we first clone it.
directParentHashesCopy := make([]*externalapi.DomainHash, len(directParentHashes))
@@ -102,7 +93,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
// all the block levels they occupy
for _, directParentHeader := range directParentHeaders {
directParentHash := consensushashing.HeaderHash(directParentHeader)
blockLevel := directParentHeader.BlockLevel()
blockLevel := pow.BlockLevel(directParentHeader)
for i := 0; i <= blockLevel; i++ {
if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists {
candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
@@ -125,7 +116,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
}
for _, directParentHeader := range directParentHeaders {
for blockLevel, blockLevelParentsInHeader := range bpb.parentsManager.Parents(directParentHeader) {
for blockLevel, blockLevelParentsInHeader := range directParentHeader.Parents() {
isEmptyLevel := false
if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists {
candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
@@ -154,7 +145,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
} else {
for childHash, childHeader := range virtualGenesisChildrenHeaders {
childHash := childHash // Assign to a new pointer to avoid `range` pointer reuse
if bpb.parentsManager.ParentsAtLevel(childHeader, blockLevel).Contains(parent) {
if childHeader.ParentsAtLevel(blockLevel).Contains(parent) {
referenceBlocks = append(referenceBlocks, &childHash)
}
}
@@ -212,21 +203,14 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
}
}
parents := make([]externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap))
parents := make([]externalapi.BlockLevelParents, len(candidatesByLevelToReferenceBlocksMap))
for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ {
if blockLevel > 0 {
if _, ok := candidatesByLevelToReferenceBlocksMap[blockLevel][*bpb.genesisHash]; ok && len(candidatesByLevelToReferenceBlocksMap[blockLevel]) == 1 {
break
}
}
levelBlocks := make(externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap[blockLevel]))
for block := range candidatesByLevelToReferenceBlocksMap[blockLevel] {
block := block // Assign to a new pointer to avoid `range` pointer reuse
levelBlocks = append(levelBlocks, &block)
}
parents = append(parents, levelBlocks)
parents[blockLevel] = levelBlocks
}
return parents, nil
}

View File

@@ -1,20 +1,12 @@
package blockprocessor
import (
"bytes"
"compress/gzip"
"github.com/kaspanet/go-muhash"
// we need to embed the utxoset of mainnet genesis here
_ "embed"
"fmt"
"github.com/kaspanet/kaspad/domain/consensus/utils/multiset"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/kaspanet/kaspad/util/staging"
"io"
"sync"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
@@ -24,9 +16,6 @@ import (
"github.com/pkg/errors"
)
//go:embed resources/utxos.gz
var utxoDumpFile []byte
func (bp *blockProcessor) setBlockStatusAfterBlockValidation(
stagingArea *model.StagingArea, block *externalapi.DomainBlock, isPruningPoint bool) error {
@@ -113,33 +102,7 @@ func (bp *blockProcessor) validateAndInsertBlock(stagingArea *model.StagingArea,
}
}
shouldAddHeaderSelectedTip := false
if !hasHeaderSelectedTip {
shouldAddHeaderSelectedTip = true
} else {
pruningPoint, err := bp.pruningStore.PruningPoint(bp.databaseContext, stagingArea)
if err != nil {
return nil, err
}
isInSelectedChainOfPruningPoint, err := bp.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, pruningPoint, blockHash)
if err != nil {
return nil, err
}
// Don't set blocks in the anticone of the pruning point as header selected tip.
shouldAddHeaderSelectedTip = isInSelectedChainOfPruningPoint
}
if shouldAddHeaderSelectedTip {
// Don't set blocks in the anticone of the pruning point as header selected tip.
err = bp.headerTipsManager.AddHeaderTip(stagingArea, blockHash)
if err != nil {
return nil, err
}
}
err = bp.ifGenesisSetUtxoSet(block)
err = bp.headerTipsManager.AddHeaderTip(stagingArea, blockHash)
if err != nil {
return nil, err
}
@@ -231,93 +194,6 @@ func (bp *blockProcessor) validateAndInsertBlock(stagingArea *model.StagingArea,
}, nil
}
var mainnetGenesisUTXOSet externalapi.UTXODiff
var mainnetGenesisMultiSet model.Multiset
var mainnetGenesisOnce sync.Once
var mainnetGenesisErr error
func deserializeMainnetUTXOSet() (externalapi.UTXODiff, model.Multiset, error) {
mainnetGenesisOnce.Do(func() {
toAdd := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)
mainnetGenesisMultiSet = multiset.New()
file, err := gzip.NewReader(bytes.NewReader(utxoDumpFile))
if err != nil {
mainnetGenesisErr = err
return
}
for i := 0; ; i++ {
size := make([]byte, 1)
_, err = io.ReadFull(file, size)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
mainnetGenesisErr = err
return
}
serializedUTXO := make([]byte, size[0])
_, err = io.ReadFull(file, serializedUTXO)
if err != nil {
mainnetGenesisErr = err
return
}
mainnetGenesisMultiSet.Add(serializedUTXO)
entry, outpoint, err := utxo.DeserializeUTXO(serializedUTXO)
if err != nil {
mainnetGenesisErr = err
return
}
toAdd[*outpoint] = entry
}
mainnetGenesisUTXOSet, mainnetGenesisErr = utxo.NewUTXODiffFromCollections(utxo.NewUTXOCollection(toAdd), utxo.NewUTXOCollection(make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)))
})
return mainnetGenesisUTXOSet, mainnetGenesisMultiSet, mainnetGenesisErr
}
func (bp *blockProcessor) ifGenesisSetUtxoSet(block *externalapi.DomainBlock) error {
isGenesis := len(block.Header.DirectParents()) == 0
if !isGenesis {
return nil
}
blockHash := consensushashing.BlockHash(block)
if !block.Header.UTXOCommitment().Equal(externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray())) {
log.Infof("Loading checkpoint UTXO set")
diff, utxoSetMultiset, err := deserializeMainnetUTXOSet()
if err != nil {
return err
}
log.Infof("Finished loading checkpoint UTXO set")
utxoSetHash := utxoSetMultiset.Hash()
if !utxoSetHash.Equal(block.Header.UTXOCommitment()) {
return errors.New("Invalid UTXO set dump")
}
area := model.NewStagingArea()
bp.consensusStateStore.StageVirtualUTXODiff(area, diff)
bp.utxoDiffStore.Stage(area, blockHash, diff, nil)
// commit the multiset of genesis
bp.multisetStore.Stage(area, blockHash, utxoSetMultiset)
err = staging.CommitAllChanges(bp.databaseContext, area)
if err != nil {
return err
}
} else {
// if it's genesis but has an empty muhash then commit an empty multiset and an empty diff
area := model.NewStagingArea()
bp.consensusStateStore.StageVirtualUTXODiff(area, utxo.NewUTXODiff())
bp.utxoDiffStore.Stage(area, blockHash, utxo.NewUTXODiff(), nil)
bp.multisetStore.Stage(area, blockHash, multiset.New())
err := staging.CommitAllChanges(bp.databaseContext, area)
if err != nil {
return err
}
}
return nil
}
func isHeaderOnlyBlock(block *externalapi.DomainBlock) bool {
return len(block.Transactions) == 0
}

View File

@@ -87,18 +87,13 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
t.Fatalf("PruningPointHeaders: %+v", err)
}
pruningPointAndItsAnticone, err := tcSyncer.PruningPointAndItsAnticone()
pruningPointAndItsAnticoneWithTrustedData, err := tcSyncer.PruningPointAndItsAnticoneWithTrustedData()
if err != nil {
t.Fatalf("PruningPointAndItsAnticone: %+v", err)
t.Fatalf("PruningPointAndItsAnticoneWithTrustedData: %+v", err)
}
for _, blockHash := range pruningPointAndItsAnticone {
blockWithTrustedData, err := tcSyncer.BlockWithTrustedData(blockHash)
if err != nil {
return
}
_, err = synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
for _, blockWithTrustedData := range pruningPointAndItsAnticoneWithTrustedData {
_, err := synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
if err != nil {
t.Fatalf("ValidateAndInsertBlockWithTrustedData: %+v", err)
}
@@ -140,21 +135,10 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
}
}
var fromOutpoint *externalapi.DomainOutpoint
var pruningPointUTXOs []*externalapi.OutpointAndUTXOEntryPair
const step = 100_000
for {
outpointAndUTXOEntryPairs, err := tcSyncer.GetPruningPointUTXOs(pruningPoint, fromOutpoint, step)
if err != nil {
t.Fatalf("GetPruningPointUTXOs: %+v", err)
}
fromOutpoint = outpointAndUTXOEntryPairs[len(outpointAndUTXOEntryPairs)-1].Outpoint
pruningPointUTXOs = append(pruningPointUTXOs, outpointAndUTXOEntryPairs...)
if len(outpointAndUTXOEntryPairs) < step {
break
}
pruningPointUTXOs, err := tcSyncer.GetPruningPointUTXOs(pruningPoint, nil, 1000)
if err != nil {
t.Fatalf("GetPruningPointUTXOs: %+v", err)
}
err = synceeStaging.AppendImportedPruningPointUTXOs(pruningPointUTXOs)
if err != nil {
t.Fatalf("AppendImportedPruningPointUTXOs: %+v", err)
@@ -518,7 +502,7 @@ func TestGetPruningPointUTXOs(t *testing.T) {
// Get pruning point UTXOs in a loop
var allOutpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair
const step = 100_000
step := 100
var fromOutpoint *externalapi.DomainOutpoint
for {
outpointAndUTXOEntryPairs, err := testConsensus.GetPruningPointUTXOs(pruningPoint, fromOutpoint, step)
@@ -533,17 +517,11 @@ func TestGetPruningPointUTXOs(t *testing.T) {
}
}
const mainnetUTXOSize = 1232643
expected := len(outputs) + 1
if consensusConfig.Name == "kaspa-mainnet" {
expected += mainnetUTXOSize
}
// Make sure the length of the UTXOs is exactly spendingTransaction.Outputs + 1 coinbase
// output (includingBlock's coinbase)
if len(allOutpointAndUTXOEntryPairs) != expected {
if len(allOutpointAndUTXOEntryPairs) != len(outputs)+1 {
t.Fatalf("Returned an unexpected amount of UTXOs. "+
"Want: %d, got: %d", expected, len(allOutpointAndUTXOEntryPairs))
"Want: %d, got: %d", len(outputs)+2, len(allOutpointAndUTXOEntryPairs))
}
// Make sure all spendingTransaction.Outputs are in the returned UTXOs

View File

@@ -4,8 +4,6 @@ import (
"bytes"
"math"
"math/big"
"reflect"
"runtime"
"testing"
"github.com/kaspanet/kaspad/domain/consensus"
@@ -23,31 +21,6 @@ import (
"github.com/pkg/errors"
)
func TestBlockValidator_ValidateBodyInIsolation(t *testing.T) {
tests := []func(t *testing.T, tc testapi.TestConsensus, cfg *consensus.Config){
CheckBlockSanity,
CheckBlockHashMerkleRoot,
BlockMass,
CheckBlockDuplicateTransactions,
CheckBlockContainsOnlyOneCoinbase,
CheckBlockDoubleSpends,
CheckFirstBlockTransactionIsCoinbase,
}
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
tc, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestChainedTransactions")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
for _, test := range tests {
testName := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()
t.Run(testName, func(t *testing.T) {
test(t, tc, consensusConfig)
})
}
})
}
func TestChainedTransactions(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
consensusConfig.BlockCoinbaseMaturity = 0
@@ -116,39 +89,47 @@ func TestChainedTransactions(t *testing.T) {
})
}
// CheckBlockSanity tests the CheckBlockSanity function to ensure it works
// TestCheckBlockSanity tests the CheckBlockSanity function to ensure it works
// as expected.
func CheckBlockSanity(t *testing.T, tc testapi.TestConsensus, _ *consensus.Config) {
blockHash := consensushashing.BlockHash(&exampleValidBlock)
if len(exampleValidBlock.Transactions) < 3 {
t.Fatalf("Too few transactions in block, expect at least 3, got %v", len(exampleValidBlock.Transactions))
}
func TestCheckBlockSanity(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockSanity")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
blockHash := consensushashing.BlockHash(&exampleValidBlock)
if len(exampleValidBlock.Transactions) < 3 {
t.Fatalf("Too few transactions in block, expect at least 3, got %v", len(exampleValidBlock.Transactions))
}
stagingArea := model.NewStagingArea()
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, &exampleValidBlock)
tc.BlockStore().Stage(stagingArea, blockHash, &exampleValidBlock)
err := tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err != nil {
t.Fatalf("Failed validating block in isolation: %v", err)
}
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err != nil {
t.Fatalf("Failed validating block in isolation: %v", err)
}
// Test with block with wrong transactions sorting order
blockHash = consensushashing.BlockHash(&blockWithWrongTxOrder)
tc.BlockStore().Stage(stagingArea, blockHash, &blockWithWrongTxOrder)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if !errors.Is(err, ruleerrors.ErrTransactionsNotSorted) {
t.Errorf("CheckBlockSanity: Expected ErrTransactionsNotSorted error, instead got %v", err)
}
// Test with block with wrong transactions sorting order
blockHash = consensushashing.BlockHash(&blockWithWrongTxOrder)
tc.BlockStore().Stage(stagingArea, blockHash, &blockWithWrongTxOrder)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if !errors.Is(err, ruleerrors.ErrTransactionsNotSorted) {
t.Errorf("CheckBlockSanity: Expected ErrTransactionsNotSorted error, instead got %v", err)
}
// Test a block with invalid parents order
// We no longer require blocks to have ordered parents
blockHash = consensushashing.BlockHash(&unOrderedParentsBlock)
tc.BlockStore().Stage(stagingArea, blockHash, &unOrderedParentsBlock)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err != nil {
t.Errorf("CheckBlockSanity: Expected block to be be body in isolation valid, got error instead: %v", err)
}
// Test a block with invalid parents order
// We no longer require blocks to have ordered parents
blockHash = consensushashing.BlockHash(&unOrderedParentsBlock)
tc.BlockStore().Stage(stagingArea, blockHash, &unOrderedParentsBlock)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err != nil {
t.Errorf("CheckBlockSanity: Expected block to be be body in isolation valid, got error instead: %v", err)
}
})
}
var unOrderedParentsBlock = externalapi.DomainBlock{
@@ -1044,41 +1025,59 @@ var blockWithWrongTxOrder = externalapi.DomainBlock{
},
}
func CheckBlockHashMerkleRoot(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
blockWithInvalidMerkleRoot := block.Clone()
blockWithInvalidMerkleRoot.Transactions[0].Version += 1
func TestCheckBlockHashMerkleRoot(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockHashMerkleRoot")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
_, err = tc.ValidateAndInsertBlock(blockWithInvalidMerkleRoot, true)
if !errors.Is(err, ruleerrors.ErrBadMerkleRoot) {
t.Fatalf("Unexpected error: %+v", err)
}
block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
blockWithInvalidMerkleRoot := block.Clone()
blockWithInvalidMerkleRoot.Transactions[0].Version += 1
// Check that a block with invalid merkle root is not marked as invalid
// and can be re-added with the right transactions.
_, err = tc.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
_, err = tc.ValidateAndInsertBlock(blockWithInvalidMerkleRoot, true)
if !errors.Is(err, ruleerrors.ErrBadMerkleRoot) {
t.Fatalf("Unexpected error: %+v", err)
}
// Check that a block with invalid merkle root is not marked as invalid
// and can be re-added with the right transactions.
_, err = tc.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
})
}
func BlockMass(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := initBlockWithInvalidBlockMass(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
func TestBlockMass(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestBlockMass")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrBlockMassTooHigh) {
t.Fatalf("ValidateBodyInIsolationTest: TestBlockMass:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrBlockMassTooHigh, err)
}
block, _, err := initBlockWithInvalidBlockMass(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrBlockMassTooHigh) {
t.Fatalf("ValidateBodyInIsolationTest: TestBlockMass:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrBlockMassTooHigh, err)
}
})
}
func initBlockWithInvalidBlockMass(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) {
@@ -1114,20 +1113,30 @@ func initBlockWithInvalidBlockMass(consensusConfig *consensus.Config, tc testapi
return tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, &emptyCoinbase, []*externalapi.DomainTransaction{tx})
}
func CheckBlockDuplicateTransactions(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := initBlockWithDuplicateTransaction(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
func TestCheckBlockDuplicateTransactions(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrDuplicateTx) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDuplicateTransactions:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDuplicateTx, err)
}
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockDuplicateTransactions")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
block, _, err := initBlockWithDuplicateTransaction(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrDuplicateTx) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDuplicateTransactions:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDuplicateTx, err)
}
})
}
func initBlockWithDuplicateTransaction(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) {
@@ -1161,20 +1170,30 @@ func initBlockWithDuplicateTransaction(consensusConfig *consensus.Config, tc tes
return tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, &emptyCoinbase, []*externalapi.DomainTransaction{tx, tx})
}
func CheckBlockContainsOnlyOneCoinbase(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := initBlockWithMoreThanOneCoinbase(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
func TestCheckBlockContainsOnlyOneCoinbase(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrMultipleCoinbases) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockContainsOnlyOneCoinbase:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrMultipleCoinbases, err)
}
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockContainsOnlyOneCoinbase")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
block, _, err := initBlockWithMoreThanOneCoinbase(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrMultipleCoinbases) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockContainsOnlyOneCoinbase:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrMultipleCoinbases, err)
}
})
}
func initBlockWithMoreThanOneCoinbase(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) {
@@ -1208,20 +1227,30 @@ func initBlockWithMoreThanOneCoinbase(consensusConfig *consensus.Config, tc test
return tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, &emptyCoinbase, []*externalapi.DomainTransaction{tx})
}
func CheckBlockDoubleSpends(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := initBlockWithDoubleSpends(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
func TestCheckBlockDoubleSpends(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrDoubleSpendInSameBlock) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDoubleSpends:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDoubleSpendInSameBlock, err)
}
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockDoubleSpends")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
block, _, err := initBlockWithDoubleSpends(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrDoubleSpendInSameBlock) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDoubleSpends:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDoubleSpendInSameBlock, err)
}
})
}
func initBlockWithDoubleSpends(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) {
@@ -1274,18 +1303,27 @@ func initBlockWithDoubleSpends(consensusConfig *consensus.Config, tc testapi.Tes
&emptyCoinbase, []*externalapi.DomainTransaction{tx, txSameOutpoint})
}
func CheckFirstBlockTransactionIsCoinbase(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
func TestCheckFirstBlockTransactionIsCoinbase(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
block := initBlockWithFirstTransactionDifferentThanCoinbase(consensusConfig)
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckFirstBlockTransactionIsCoinbase")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
err := tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrFirstTxNotCoinbase) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckFirstBlockTransactionIsCoinbase:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrFirstTxNotCoinbase, err)
}
block := initBlockWithFirstTransactionDifferentThanCoinbase(consensusConfig)
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrFirstTxNotCoinbase) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckFirstBlockTransactionIsCoinbase:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrFirstTxNotCoinbase, err)
}
})
}
func initBlockWithFirstTransactionDifferentThanCoinbase(consensusConfig *consensus.Config) *externalapi.DomainBlock {

View File

@@ -6,6 +6,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
)
@@ -62,7 +63,7 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea,
return err
}
if !hasReachabilityData {
blockLevel := header.BlockLevel()
blockLevel := pow.BlockLevel(header)
for i := 0; i <= blockLevel; i++ {
err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash)
if err != nil {
@@ -194,7 +195,7 @@ func (v *blockValidator) checkMergeSizeLimit(stagingArea *model.StagingArea, has
}
func (v *blockValidator) checkIndirectParents(stagingArea *model.StagingArea, header externalapi.BlockHeader) error {
expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DAAScore(), header.DirectParents())
expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DirectParents())
if err != nil {
return err
}

View File

@@ -67,8 +67,8 @@ func TestValidateMedianTime(t *testing.T) {
blockTime := tip.Header.TimeInMilliseconds()
for i := 0; i < 10; i++ {
blockTime += 100
for i := 0; i < 100; i++ {
blockTime += 1000
_, tipHash = addBlock(blockTime, []*externalapi.DomainHash{tipHash}, nil)
}
@@ -163,17 +163,16 @@ func TestCheckParentsIncest(t *testing.T) {
func TestCheckMergeSizeLimit(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
consensusConfig.MergeSetSizeLimit = 5
consensusConfig.MergeSetSizeLimit = 2 * uint64(consensusConfig.K)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckMergeSizeLimit")
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckParentsIncest")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
chain1TipHash := consensusConfig.GenesisHash
// We add a chain larger by one than chain2 below, to make this one the selected chain
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit+1; i++ {
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit+2; i++ {
chain1TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain1TipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
@@ -181,9 +180,7 @@ func TestCheckMergeSizeLimit(t *testing.T) {
}
chain2TipHash := consensusConfig.GenesisHash
// We add a merge set of size exactly MergeSetSizeLimit (to violate the limit),
// since selected parent is also counted
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit; i++ {
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit+1; i++ {
chain2TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain2TipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
@@ -196,56 +193,3 @@ func TestCheckMergeSizeLimit(t *testing.T) {
}
})
}
func TestVirtualSelectionViolatingMergeSizeLimit(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
consensusConfig.MergeSetSizeLimit = 2 * uint64(consensusConfig.K)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestVirtualSelectionViolatingMergeSizeLimit")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
chain1TipHash := consensusConfig.GenesisHash
// We add a chain larger than chain2 below, to make this one the selected chain
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit; i++ {
chain1TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain1TipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
}
chain2TipHash := consensusConfig.GenesisHash
// We add a merge set of size exactly MergeSetSizeLimit-1 (to still not violate the limit)
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit-1; i++ {
chain2TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain2TipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
}
// We now add a single block over genesis which is expected to exceed the limit
_, _, err = tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
stagingArea := model.NewStagingArea()
virtualSelectedParent, err := tc.GetVirtualSelectedParent()
if err != nil {
t.Fatalf("GetVirtualSelectedParent: %+v", err)
}
selectedParentAnticone, err := tc.DAGTraversalManager().AnticoneFromVirtualPOV(stagingArea, virtualSelectedParent)
if err != nil {
t.Fatalf("AnticoneFromVirtualPOV: %+v", err)
}
// Test if Virtual's mergeset is too large
// Note: the selected parent itself is also counted in the mergeset limit
if len(selectedParentAnticone)+1 > (int)(consensusConfig.MergeSetSizeLimit) {
t.Fatalf("Virtual's mergset size (%d) exeeds merge set limit (%d)",
len(selectedParentAnticone)+1, consensusConfig.MergeSetSizeLimit)
}
})
}

View File

@@ -1,9 +1,6 @@
package blockvalidator_test
import (
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
"reflect"
"runtime"
"testing"
"github.com/kaspanet/kaspad/domain/consensus"
@@ -16,74 +13,73 @@ import (
"github.com/pkg/errors"
)
func TestBlockValidator_ValidateHeaderInIsolation(t *testing.T) {
tests := []func(t *testing.T, tc testapi.TestConsensus, cfg *consensus.Config){
CheckParentsLimit,
CheckBlockVersion,
CheckBlockTimestampInIsolation,
}
func TestCheckParentsLimit(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
tc, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestBlockValidator_ValidateHeaderInIsolation")
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckParentsLimit")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
for _, test := range tests {
testName := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()
t.Run(testName, func(t *testing.T) {
test(t, tc, consensusConfig)
})
for i := externalapi.KType(0); i < consensusConfig.MaxBlockParents+1; i++ {
_, _, err = tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
}
tips, err := tc.Tips()
if err != nil {
t.Fatalf("Tips: %+v", err)
}
_, _, err = tc.AddBlock(tips, nil, nil)
if !errors.Is(err, ruleerrors.ErrTooManyParents) {
t.Fatalf("Unexpected error: %+v", err)
}
})
}
func CheckParentsLimit(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
for i := externalapi.KType(0); i < consensusConfig.MaxBlockParents+1; i++ {
_, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
func TestCheckBlockVersion(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockVersion")
if err != nil {
t.Fatalf("AddBlock: %+v", err)
t.Fatalf("Error setting up consensus: %+v", err)
}
}
defer teardown(false)
tips, err := tc.Tips()
if err != nil {
t.Fatalf("Tips: %+v", err)
}
block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
_, _, err = tc.AddBlock(tips, nil, nil)
if !errors.Is(err, ruleerrors.ErrTooManyParents) {
t.Fatalf("Unexpected error: %+v", err)
}
block.Header = blockheader.NewImmutableBlockHeader(
constants.MaxBlockVersion+1,
block.Header.Parents(),
block.Header.HashMerkleRoot(),
block.Header.AcceptedIDMerkleRoot(),
block.Header.UTXOCommitment(),
block.Header.TimeInMilliseconds(),
block.Header.Bits(),
block.Header.Nonce(),
block.Header.DAAScore(),
block.Header.BlueScore(),
block.Header.BlueWork(),
block.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(block, true)
if !errors.Is(err, ruleerrors.ErrBlockVersionIsUnknown) {
t.Fatalf("Unexpected error: %+v", err)
}
})
}
func CheckBlockVersion(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
block.Header = blockheader.NewImmutableBlockHeader(
constants.MaxBlockVersion+1,
block.Header.Parents(),
block.Header.HashMerkleRoot(),
block.Header.AcceptedIDMerkleRoot(),
block.Header.UTXOCommitment(),
block.Header.TimeInMilliseconds(),
block.Header.Bits(),
block.Header.Nonce(),
block.Header.DAAScore(),
block.Header.BlueScore(),
block.Header.BlueWork(),
block.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(block, true)
if !errors.Is(err, ruleerrors.ErrBlockVersionIsUnknown) {
t.Fatalf("Unexpected error: %+v", err)
}
}
func CheckBlockTimestampInIsolation(t *testing.T, tc testapi.TestConsensus, cfg *consensus.Config) {
func TestCheckBlockTimestampInIsolation(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()

View File

@@ -37,7 +37,6 @@ type blockValidator struct {
finalityManager model.FinalityManager
blockParentBuilder model.BlockParentBuilder
pruningManager model.PruningManager
parentsManager model.ParentsManager
blockStore model.BlockStore
ghostdagDataStores []model.GHOSTDAGDataStore
@@ -73,7 +72,6 @@ func New(powMax *big.Int,
finalityManager model.FinalityManager,
blockParentBuilder model.BlockParentBuilder,
pruningManager model.PruningManager,
parentsManager model.ParentsManager,
pruningStore model.PruningStore,
blockStore model.BlockStore,
@@ -110,7 +108,6 @@ func New(powMax *big.Int,
finalityManager: finalityManager,
blockParentBuilder: blockParentBuilder,
pruningManager: pruningManager,
parentsManager: parentsManager,
pruningStore: pruningStore,
blockStore: blockStore,

View File

@@ -8,6 +8,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/virtual"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/pkg/errors"
)
@@ -49,11 +50,9 @@ func (v *blockValidator) ValidatePruningPointViolationAndProofOfWorkAndDifficult
}
}
if !blockHash.Equal(v.genesisHash) {
err = v.checkProofOfWork(header)
if err != nil {
return err
}
err = v.checkProofOfWork(header)
if err != nil {
return err
}
err = v.validateDifficulty(stagingArea, blockHash, isBlockWithTrustedData)
@@ -69,9 +68,9 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea,
header externalapi.BlockHeader,
isBlockWithTrustedData bool) error {
for level := 0; level <= header.BlockLevel(); level++ {
for level := 0; level <= pow.BlockLevel(header); level++ {
var parents []*externalapi.DomainHash
for _, parent := range v.parentsManager.ParentsAtLevel(header, level) {
for _, parent := range header.ParentsAtLevel(level) {
_, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false)
isNotFoundError := database.IsNotFoundError(err)
if !isNotFoundError && err != nil {
@@ -118,7 +117,7 @@ func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea,
return err
}
blockLevel := header.BlockLevel()
blockLevel := pow.BlockLevel(header)
for i := 1; i <= blockLevel; i++ {
err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash)
if err != nil {
@@ -150,8 +149,7 @@ func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea,
// difficulty is not performed.
func (v *blockValidator) checkProofOfWork(header externalapi.BlockHeader) error {
// The target difficulty must be larger than zero.
state := pow.NewState(header.ToMutable())
target := &state.Target
target := difficulty.CompactToBig(header.Bits())
if target.Sign() <= 0 {
return errors.Wrapf(ruleerrors.ErrNegativeTarget, "block target difficulty of %064x is too low",
target)
@@ -165,7 +163,7 @@ func (v *blockValidator) checkProofOfWork(header externalapi.BlockHeader) error
// The block pow must be valid unless the flag to avoid proof of work checks is set.
if !v.skipPoW {
valid := state.CheckProofOfWork()
valid := pow.CheckProofOfWorkWithTarget(header.ToMutable(), target)
if !valid {
return errors.Wrap(ruleerrors.ErrInvalidPoW, "block has invalid proof of work")
}

View File

@@ -101,26 +101,23 @@ func TestPOW(t *testing.T) {
t.Fatal(err)
}
random := rand.New(rand.NewSource(0))
// Difficulty is too high on mainnet to actually mine.
if consensusConfig.Name != "kaspa-mainnet" {
mining.SolveBlock(validBlock, random)
_, err = tc.ValidateAndInsertBlock(validBlock, true)
if err != nil {
t.Fatal(err)
}
mining.SolveBlock(validBlock, random)
_, err = tc.ValidateAndInsertBlock(validBlock, true)
if err != nil {
t.Fatal(err)
}
})
}
// solveBlockWithWrongPOW increments the given block's nonce until it gets wrong POW (for test!).
func solveBlockWithWrongPOW(block *externalapi.DomainBlock) *externalapi.DomainBlock {
header := block.Header.ToMutable()
state := pow.NewState(header)
for i := uint64(0); i < math.MaxUint64; i++ {
state.Nonce = i
if !state.CheckProofOfWork() {
header.SetNonce(state.Nonce)
block.Header = header.ToImmutable()
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
headerForMining := block.Header.ToMutable()
initialNonce := uint64(0)
for i := initialNonce; i < math.MaxUint64; i++ {
headerForMining.SetNonce(i)
if !pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
block.Header = headerForMining.ToImmutable()
return block
}
}
@@ -299,7 +296,7 @@ func TestCheckPruningPointViolation(t *testing.T) {
func TestValidateDifficulty(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
mocDifficulty := &mocDifficultyManager{genesisDaaScore: consensusConfig.GenesisBlock.Header.DAAScore()}
mocDifficulty := &mocDifficultyManager{}
factory.SetTestDifficultyManager(func(_ model.DBReader, _ model.GHOSTDAGManager, _ model.GHOSTDAGDataStore,
_ model.BlockHeaderStore, daaBlocksStore model.DAABlocksStore, _ model.DAGTopologyManager,
_ model.DAGTraversalManager, _ *big.Int, _ int, _ bool, _ time.Duration,
@@ -345,7 +342,6 @@ type mocDifficultyManager struct {
testDifficulty uint32
testGenesisBits uint32
daaBlocksStore model.DAABlocksStore
genesisDaaScore uint64
}
// RequiredDifficulty returns the difficulty required for the test
@@ -356,7 +352,7 @@ func (dm *mocDifficultyManager) RequiredDifficulty(*model.StagingArea, *external
// StageDAADataAndReturnRequiredDifficulty returns the difficulty required for the test
func (dm *mocDifficultyManager) StageDAADataAndReturnRequiredDifficulty(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) (uint32, error) {
// Populate daaBlocksStore with fake values
dm.daaBlocksStore.StageDAAScore(stagingArea, blockHash, dm.genesisDaaScore)
dm.daaBlocksStore.StageDAAScore(stagingArea, blockHash, 0)
dm.daaBlocksStore.StageBlockDAAAddedBlocks(stagingArea, blockHash, nil)
return dm.testDifficulty, nil

View File

@@ -86,5 +86,36 @@ func TestBlockRewardSwitch(t *testing.T) {
t.Fatalf("Subsidy has unexpected value. Want: %d, got: %d", consensusConfig.MinSubsidy, subsidy)
}
}
// Add another block. We expect it to be another pruning point
lastPruningPointHash, _, err := tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
// Make sure that another pruning point had been added
pruningPointHeaders, err = tc.PruningPointHeaders()
if err != nil {
t.Fatalf("PruningPointHeaders: %+v", pruningPointHeaders)
}
expectedPruningPointHeaderAmount = expectedPruningPointHeaderAmount + 1
if uint64(len(pruningPointHeaders)) != expectedPruningPointHeaderAmount {
t.Fatalf("Unexpected amount of pruning point headers. "+
"Want: %d, got: %d", expectedPruningPointHeaderAmount, len(pruningPointHeaders))
}
// Make sure that the last pruning point has a post-switch subsidy
lastPruningPoint, err := tc.GetBlock(lastPruningPointHash)
if err != nil {
t.Fatalf("GetBlock: %+v", err)
}
lastPruningPointCoinbase := lastPruningPoint.Transactions[transactionhelper.CoinbaseTransactionIndex]
_, _, subsidy, err := tc.CoinbaseManager().ExtractCoinbaseDataBlueScoreAndSubsidy(lastPruningPointCoinbase)
if err != nil {
t.Fatalf("ExtractCoinbaseDataBlueScoreAndSubsidy: %+v", err)
}
if subsidy != consensusConfig.SubsidyGenesisReward {
t.Fatalf("Subsidy has unexpected value. Want: %d, got: %d", consensusConfig.SubsidyGenesisReward, subsidy)
}
})
}

View File

@@ -2,8 +2,6 @@ package coinbasemanager
import (
"encoding/binary"
"math/big"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
@@ -12,6 +10,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionhelper"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/pkg/errors"
"math/big"
)
type coinbaseManager struct {
@@ -191,7 +190,51 @@ func (c *coinbaseManager) CalcBlockSubsidy(stagingArea *model.StagingArea,
return c.subsidyGenesisReward, nil
}
return c.maxSubsidy, nil
isBlockRewardFixed, err := c.isBlockRewardFixed(stagingArea, blockPruningPoint)
if err != nil {
return 0, err
}
if isBlockRewardFixed {
return c.subsidyGenesisReward, nil
}
averagePastSubsidy, err := c.calculateAveragePastSubsidy(stagingArea, blockHash)
if err != nil {
return 0, err
}
mergeSetSubsidySum, err := c.calculateMergeSetSubsidySum(stagingArea, blockHash)
if err != nil {
return 0, err
}
subsidyRandomVariable, err := c.calculateSubsidyRandomVariable(stagingArea, blockHash)
if err != nil {
return 0, err
}
pastSubsidy := new(big.Rat).Mul(averagePastSubsidy, c.subsidyPastRewardMultiplier)
mergeSetSubsidy := new(big.Rat).Mul(mergeSetSubsidySum, c.subsidyMergeSetRewardMultiplier)
// In order to avoid unsupported negative exponents in powInt64, flip
// the numerator and the denominator manually
subsidyRandom := new(big.Rat)
if subsidyRandomVariable >= 0 {
subsidyRandom = subsidyRandom.SetInt64(1 << subsidyRandomVariable)
} else {
subsidyRandom = subsidyRandom.SetFrac64(1, 1<<(-subsidyRandomVariable))
}
blockSubsidyBigRat := new(big.Rat).Add(mergeSetSubsidy, new(big.Rat).Mul(pastSubsidy, subsidyRandom))
blockSubsidyBigInt := new(big.Int).Div(blockSubsidyBigRat.Num(), blockSubsidyBigRat.Denom())
blockSubsidyUint64 := blockSubsidyBigInt.Uint64()
clampedBlockSubsidy := blockSubsidyUint64
if clampedBlockSubsidy < c.minSubsidy {
clampedBlockSubsidy = c.minSubsidy
} else if clampedBlockSubsidy > c.maxSubsidy {
clampedBlockSubsidy = c.maxSubsidy
}
return clampedBlockSubsidy, nil
}
func (c *coinbaseManager) calculateAveragePastSubsidy(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*big.Rat, error) {

View File

@@ -75,7 +75,7 @@ func TestVirtualDiff(t *testing.T) {
blockB.Transactions[0].Outputs[0].Value,
blockB.Transactions[0].Outputs[0].ScriptPublicKey,
true,
consensusConfig.GenesisBlock.Header.DAAScore()+2, //Expected virtual DAA score
2, //Expected virtual DAA score
)) {
t.Fatalf("Unexpected entry %s", entry)
}

View File

@@ -2,6 +2,7 @@ package consensusstatemanager
import (
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/multiset"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
@@ -22,16 +23,8 @@ func (csm *consensusStateManager) CalculatePastUTXOAndAcceptanceData(stagingArea
if blockHash.Equal(csm.genesisHash) {
log.Debugf("Block %s is the genesis. By definition, "+
"it has a predefined UTXO diff, empty acceptance data, and a predefined multiset", blockHash)
multiset, err := csm.multisetStore.Get(csm.databaseContext, stagingArea, blockHash)
if err != nil {
return nil, nil, nil, err
}
utxoDiff, err := csm.utxoDiffStore.UTXODiff(csm.databaseContext, stagingArea, blockHash)
if err != nil {
return nil, nil, nil, err
}
return utxoDiff, externalapi.AcceptanceData{}, multiset, nil
"it has an empty UTXO diff, empty acceptance data, and a blank multiset", blockHash)
return utxo.NewUTXODiff(), externalapi.AcceptanceData{}, multiset.New(), nil
}
blockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, blockHash, false)

View File

@@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/multiset"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
)
@@ -18,8 +19,8 @@ func (csm *consensusStateManager) calculateMultiset(stagingArea *model.StagingAr
if blockHash.Equal(csm.genesisHash) {
log.Debugf("Selected parent is nil, which could only happen for the genesis. " +
"The genesis has a predefined multiset")
return csm.multisetStore.Get(csm.databaseContext, stagingArea, blockHash)
"The genesis, by definition, has an empty multiset")
return multiset.New(), nil
}
ms, err := csm.multisetStore.Get(csm.databaseContext, stagingArea, blockGHOSTDAGData.SelectedParent())

View File

@@ -59,8 +59,7 @@ func (csm *consensusStateManager) pickVirtualParents(stagingArea *model.StagingA
selectedVirtualParents := []*externalapi.DomainHash{virtualSelectedParent}
mergeSetSize := uint64(1) // starts counting from 1 because selectedParent is already in the mergeSet
// First condition implies that no point in searching since limit was already reached
for mergeSetSize < csm.mergeSetSizeLimit && len(candidates) > 0 && uint64(len(selectedVirtualParents)) < uint64(csm.maxBlockParents) {
for len(candidates) > 0 && uint64(len(selectedVirtualParents)) < uint64(csm.maxBlockParents) {
candidate := candidates[0]
candidates = candidates[1:]

View File

@@ -35,7 +35,7 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (boo
var selectedTip *externalapi.DomainHash
isCompletelyResolved := true
for _, tip := range tips {
log.Debugf("Resolving tip %s", tip)
log.Infof("Resolving tip %s", tip)
resolveStagingArea := model.NewStagingArea()
unverifiedBlocks, err := csm.getUnverifiedChainBlocks(resolveStagingArea, tip)
if err != nil {
@@ -46,7 +46,7 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (boo
hasMoreUnverifiedThanMax := maxBlocksToResolve != 0 && uint64(len(unverifiedBlocks)) > maxBlocksToResolve
if hasMoreUnverifiedThanMax {
resolveTip = unverifiedBlocks[uint64(len(unverifiedBlocks))-maxBlocksToResolve]
log.Debugf("Has more than %d blocks to resolve. Changing the resolve tip to %s", maxBlocksToResolve, resolveTip)
log.Infof("Has more than %d blocks to resolve. Changing the resolve tip to %s", maxBlocksToResolve, resolveTip)
}
blockStatus, reversalData, err := csm.resolveBlockStatus(resolveStagingArea, resolveTip, true)

View File

@@ -3,6 +3,8 @@ package consensusstatemanager
import (
"fmt"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/util/staging"
"github.com/kaspanet/kaspad/domain/consensus/model"
@@ -127,11 +129,7 @@ func (csm *consensusStateManager) selectedParentInfo(
if lastUnverifiedBlock.Equal(csm.genesisHash) {
log.Debugf("the most recent unverified block is the genesis block, "+
"which by definition has status: %s", externalapi.StatusUTXOValid)
utxoDiff, err := csm.utxoDiffStore.UTXODiff(csm.databaseContext, stagingArea, lastUnverifiedBlock)
if err != nil {
return nil, 0, nil, err
}
return lastUnverifiedBlock, externalapi.StatusUTXOValid, utxoDiff, nil
return lastUnverifiedBlock, externalapi.StatusUTXOValid, utxo.NewUTXODiff(), nil
}
lastUnverifiedBlockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, lastUnverifiedBlock, false)
if err != nil {

View File

@@ -285,7 +285,7 @@ func TestTransactionAcceptance(t *testing.T) {
if err != nil {
t.Fatalf("Error getting blockF: %+v", err)
}
updatedDAAScoreVirtualBlock := consensusConfig.GenesisBlock.Header.DAAScore() + 26
updatedDAAScoreVirtualBlock := 26
//We expect the second transaction in the "blue block" (blueChildOfRedBlock) to be accepted because the merge set is ordered topologically
//and the red block is ordered topologically before the "blue block" so the input is known in the UTXOSet.
expectedAcceptanceData := externalapi.AcceptanceData{

View File

@@ -22,6 +22,78 @@ func TestBlockWindow(t *testing.T) {
expectedWindow []string
}{
dagconfig.MainnetParams.Name: {
{
parents: []string{"A"},
id: "B",
expectedWindow: []string{},
},
{
parents: []string{"B"},
id: "C",
expectedWindow: []string{"B"},
},
{
parents: []string{"B"},
id: "D",
expectedWindow: []string{"B"},
},
{
parents: []string{"C", "D"},
id: "E",
expectedWindow: []string{"D", "C", "B"},
},
{
parents: []string{"C", "D"},
id: "F",
expectedWindow: []string{"D", "C", "B"},
},
{
parents: []string{"A"},
id: "G",
expectedWindow: []string{},
},
{
parents: []string{"G"},
id: "H",
expectedWindow: []string{"G"},
},
{
parents: []string{"H", "F"},
id: "I",
expectedWindow: []string{"F", "D", "C", "H", "B", "G"},
},
{
parents: []string{"I"},
id: "J",
expectedWindow: []string{"I", "F", "D", "C", "H", "B", "G"},
},
{
parents: []string{"J"},
id: "K",
expectedWindow: []string{"J", "I", "F", "D", "C", "H", "B", "G"},
},
{
parents: []string{"K"},
id: "L",
expectedWindow: []string{"K", "J", "I", "F", "D", "C", "H", "B", "G"},
},
{
parents: []string{"L"},
id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "D", "C", "H", "B", "G"},
},
{
parents: []string{"M"},
id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "C", "H", "B"},
},
{
parents: []string{"N"},
id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "H"},
},
},
dagconfig.TestnetParams.Name: {
{
parents: []string{"A"},
id: "B",
@@ -67,7 +139,6 @@ func TestBlockWindow(t *testing.T) {
id: "J",
expectedWindow: []string{"I", "F", "D", "H", "C", "G", "B"},
},
//
{
parents: []string{"J"},
id: "K",
@@ -94,78 +165,6 @@ func TestBlockWindow(t *testing.T) {
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "H", "C"},
},
},
dagconfig.TestnetParams.Name: {
{
parents: []string{"A"},
id: "B",
expectedWindow: []string{},
},
{
parents: []string{"B"},
id: "C",
expectedWindow: []string{"B"},
},
{
parents: []string{"B"},
id: "D",
expectedWindow: []string{"B"},
},
{
parents: []string{"C", "D"},
id: "E",
expectedWindow: []string{"C", "D", "B"},
},
{
parents: []string{"C", "D"},
id: "F",
expectedWindow: []string{"C", "D", "B"},
},
{
parents: []string{"A"},
id: "G",
expectedWindow: []string{},
},
{
parents: []string{"G"},
id: "H",
expectedWindow: []string{"G"},
},
{
parents: []string{"H", "F"},
id: "I",
expectedWindow: []string{"F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"I"},
id: "J",
expectedWindow: []string{"I", "F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"J"},
id: "K",
expectedWindow: []string{"J", "I", "F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"K"},
id: "L",
expectedWindow: []string{"K", "J", "I", "F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"L"},
id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"M"},
id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "D", "H", "B"},
},
{
parents: []string{"N"},
id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "H"},
},
},
dagconfig.DevnetParams.Name: {
{
parents: []string{"A"},
@@ -183,12 +182,12 @@ func TestBlockWindow(t *testing.T) {
expectedWindow: []string{"B"},
},
{
parents: []string{"C", "D"},
parents: []string{"D", "C"},
id: "E",
expectedWindow: []string{"D", "C", "B"},
},
{
parents: []string{"C", "D"},
parents: []string{"D", "C"},
id: "F",
expectedWindow: []string{"D", "C", "B"},
},
@@ -205,37 +204,37 @@ func TestBlockWindow(t *testing.T) {
{
parents: []string{"H", "F"},
id: "I",
expectedWindow: []string{"F", "D", "H", "C", "B", "G"},
expectedWindow: []string{"F", "D", "C", "H", "G", "B"},
},
{
parents: []string{"I"},
id: "J",
expectedWindow: []string{"I", "F", "D", "H", "C", "B", "G"},
expectedWindow: []string{"I", "F", "D", "C", "H", "G", "B"},
},
{
parents: []string{"J"},
id: "K",
expectedWindow: []string{"J", "I", "F", "D", "H", "C", "B", "G"},
expectedWindow: []string{"J", "I", "F", "D", "C", "H", "G", "B"},
},
{
parents: []string{"K"},
id: "L",
expectedWindow: []string{"K", "J", "I", "F", "D", "H", "C", "B", "G"},
expectedWindow: []string{"K", "J", "I", "F", "D", "C", "H", "G", "B"},
},
{
parents: []string{"L"},
id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "D", "H", "C", "B", "G"},
expectedWindow: []string{"L", "K", "J", "I", "F", "D", "C", "H", "G", "B"},
},
{
parents: []string{"M"},
id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "H", "C", "B"},
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "C", "H", "G"},
},
{
parents: []string{"N"},
id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "H", "C"},
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "H"},
},
},
dagconfig.SimnetParams.Name: {
@@ -257,12 +256,12 @@ func TestBlockWindow(t *testing.T) {
{
parents: []string{"D", "C"},
id: "E",
expectedWindow: []string{"D", "C", "B"},
expectedWindow: []string{"C", "D", "B"},
},
{
parents: []string{"D", "C"},
id: "F",
expectedWindow: []string{"D", "C", "B"},
expectedWindow: []string{"C", "D", "B"},
},
{
parents: []string{"A"},
@@ -277,37 +276,37 @@ func TestBlockWindow(t *testing.T) {
{
parents: []string{"H", "F"},
id: "I",
expectedWindow: []string{"F", "H", "D", "C", "B", "G"},
expectedWindow: []string{"F", "C", "D", "H", "G", "B"},
},
{
parents: []string{"I"},
id: "J",
expectedWindow: []string{"I", "F", "H", "D", "C", "B", "G"},
expectedWindow: []string{"I", "F", "C", "D", "H", "G", "B"},
},
{
parents: []string{"J"},
id: "K",
expectedWindow: []string{"J", "I", "F", "H", "D", "C", "B", "G"},
expectedWindow: []string{"J", "I", "F", "C", "D", "H", "G", "B"},
},
{
parents: []string{"K"},
id: "L",
expectedWindow: []string{"K", "J", "I", "F", "H", "D", "C", "B", "G"},
expectedWindow: []string{"K", "J", "I", "F", "C", "D", "H", "G", "B"},
},
{
parents: []string{"L"},
id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "H", "D", "C", "B", "G"},
expectedWindow: []string{"L", "K", "J", "I", "F", "C", "D", "H", "G", "B"},
},
{
parents: []string{"M"},
id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "H", "D", "C", "B"},
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "D", "H", "G"},
},
{
parents: []string{"N"},
id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "H", "D", "C"},
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "H"},
},
},
}

View File

@@ -105,14 +105,10 @@ func (dm *difficultyManager) requiredDifficultyFromTargetsWindow(targetsWindow b
return dm.genesisBits, nil
}
// in the past this was < 2 as the comment explains, we changed it to under the window size to
// make the hashrate(which is ~1.5GH/s) constant in the first 2641 blocks so that we won't have a lot of tips
// We need at least 2 blocks to get a timestamp interval
// We could instead clamp the timestamp difference to `targetTimePerBlock`,
// but then everything will cancel out and we'll get the target from the last block, which will be the same as genesis.
// We add 64 as a safety margin
if len(targetsWindow) < 2 || len(targetsWindow) < dm.difficultyAdjustmentWindowSize {
if len(targetsWindow) < 2 {
return dm.genesisBits, nil
}
windowMinTimestamp, windowMaxTimeStamp, windowsMinIndex, _ := targetsWindow.minMaxTimestamps()
@@ -161,11 +157,7 @@ func (dm *difficultyManager) calculateDaaScoreAndAddedBlocks(stagingArea *model.
isBlockWithTrustedData bool) (uint64, []*externalapi.DomainHash, error) {
if blockHash.Equal(dm.genesisHash) {
genesisHeader, err := dm.headerStore.BlockHeader(dm.databaseContext, stagingArea, dm.genesisHash)
if err != nil {
return 0, nil, err
}
return genesisHeader.DAAScore(), nil, nil
return 0, nil, nil
}
ghostdagData, err := dm.ghostdagStore.Get(dm.databaseContext, stagingArea, blockHash, false)

View File

@@ -31,7 +31,7 @@ func TestDifficulty(t *testing.T) {
}
consensusConfig.K = 1
consensusConfig.DifficultyAdjustmentWindowSize = 140
consensusConfig.DifficultyAdjustmentWindowSize = 265
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestDifficulty")
@@ -108,7 +108,7 @@ func TestDifficulty(t *testing.T) {
"window size, the difficulty should be the same as genesis'")
}
}
for i := 0; i < consensusConfig.DifficultyAdjustmentWindowSize+10; i++ {
for i := 0; i < consensusConfig.DifficultyAdjustmentWindowSize+100; i++ {
tip, tipHash = addBlock(0, tipHash)
if tip.Header.Bits() != consensusConfig.GenesisBlock.Header.Bits() {
t.Fatalf("As long as the block rate remains the same, the difficulty shouldn't change")
@@ -130,9 +130,9 @@ func TestDifficulty(t *testing.T) {
var expectedBits uint32
switch consensusConfig.Name {
case dagconfig.TestnetParams.Name, dagconfig.DevnetParams.Name:
expectedBits = uint32(0x1e7f1441)
expectedBits = uint32(0x1e7f83df)
case dagconfig.MainnetParams.Name:
expectedBits = uint32(0x1d02c50f)
expectedBits = uint32(0x207f83df)
}
if tip.Header.Bits() != expectedBits {
@@ -231,7 +231,7 @@ func TestDifficulty(t *testing.T) {
func TestDAAScore(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
consensusConfig.DifficultyAdjustmentWindowSize = 86
consensusConfig.DifficultyAdjustmentWindowSize = 265
stagingArea := model.NewStagingArea()
@@ -263,9 +263,9 @@ func TestDAAScore(t *testing.T) {
t.Fatalf("DAAScore: %+v", err)
}
blockBlueScore3ExpectedDAAScore := uint64(2) + consensusConfig.GenesisBlock.Header.DAAScore()
blockBlueScore3ExpectedDAAScore := uint64(2)
if blockBlueScore3DAAScore != blockBlueScore3ExpectedDAAScore {
t.Fatalf("DAA score is expected to be %d but got %d", blockBlueScore3ExpectedDAAScore, blockBlueScore3DAAScore)
t.Fatalf("DAA score is expected to be %d but got %d", blockBlueScore3ExpectedDAAScore, blockBlueScore3ExpectedDAAScore)
}
tipDAAScore := blockBlueScore3ExpectedDAAScore

View File

@@ -1,41 +0,0 @@
package parentssanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
)
type parentsManager struct {
genesisHash *externalapi.DomainHash
}
// New instantiates a new ParentsManager
func New(genesisHash *externalapi.DomainHash) model.ParentsManager {
return &parentsManager{
genesisHash: genesisHash,
}
}
func (pm *parentsManager) ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents {
var parentsAtLevel externalapi.BlockLevelParents
if len(blockHeader.Parents()) > level {
parentsAtLevel = blockHeader.Parents()[level]
}
if len(parentsAtLevel) == 0 && len(blockHeader.DirectParents()) > 0 {
return externalapi.BlockLevelParents{pm.genesisHash}
}
return parentsAtLevel
}
func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents {
numParents := constants.MaxBlockLevel + 1
parents := make([]externalapi.BlockLevelParents, numParents)
for i := 0; i < numParents; i++ {
parents[i] = pm.ParentsAtLevel(blockHeader, i)
}
return parents
}

View File

@@ -2,6 +2,7 @@ package pruningmanager_test
import (
"encoding/json"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"os"
"path/filepath"
@@ -38,14 +39,12 @@ func TestPruning(t *testing.T) {
"dag-for-test-pruning.json": {
dagconfig.MainnetParams.Name: "503",
dagconfig.TestnetParams.Name: "502",
dagconfig.DevnetParams.Name: "502",
dagconfig.SimnetParams.Name: "502",
dagconfig.DevnetParams.Name: "503",
dagconfig.SimnetParams.Name: "503",
},
}
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
// Improve the performance of the test a little
consensusConfig.DisableDifficultyAdjustment = true
err := filepath.Walk("./testdata", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
@@ -73,7 +72,6 @@ func TestPruning(t *testing.T) {
consensusConfig.DifficultyAdjustmentWindowSize = 400
factory := consensus.NewFactory()
factory.SetTestLevelDBCacheSize(128)
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestPruning")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
@@ -142,11 +140,12 @@ func TestPruning(t *testing.T) {
// We expect blocks that are within the difficulty adjustment window size of
// the pruning point and its anticone to not get pruned
unprunedBlockHashesBelowPruningPoint := make(map[externalapi.DomainHash]struct{})
pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticone()
pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticoneWithTrustedData()
if err != nil {
t.Fatalf("pruningPointAndItsAnticone: %+v", err)
}
for _, blockHash := range pruningPointAndItsAnticone {
for _, block := range pruningPointAndItsAnticone {
blockHash := consensushashing.BlockHash(block.Block)
unprunedBlockHashesBelowPruningPoint[*blockHash] = struct{}{}
blockWindow, err := tc.DAGTraversalManager().BlockWindow(stagingArea, blockHash, consensusConfig.DifficultyAdjustmentWindowSize)
if err != nil {

View File

@@ -675,19 +675,7 @@ func (pm *pruningManager) calculateDiffBetweenPreviousAndCurrentPruningPoints(st
onEnd := logger.LogAndMeasureExecutionTime(log, "pruningManager.calculateDiffBetweenPreviousAndCurrentPruningPoints")
defer onEnd()
if currentPruningHash.Equal(pm.genesisHash) {
iter, err := pm.consensusStateManager.RestorePastUTXOSetIterator(stagingArea, currentPruningHash)
if err != nil {
return nil, err
}
set := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)
for ok := iter.First(); ok; ok = iter.Next() {
outpoint, entry, err := iter.Get()
if err != nil {
return nil, err
}
set[*outpoint] = entry
}
return utxo.NewUTXODiffFromCollections(utxo.NewUTXOCollection(set), utxo.NewUTXOCollection(make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)))
return utxo.NewUTXODiff(), nil
}
pruningPointIndex, err := pm.pruningStore.CurrentPruningPointIndex(pm.databaseContext, stagingArea)
@@ -919,8 +907,8 @@ func (pm *pruningManager) PruneAllBlocksBelow(stagingArea *model.StagingArea, pr
return nil
}
func (pm *pruningManager) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "PruningPointAndItsAnticone")
func (pm *pruningManager) PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "PruningPointAndItsAnticoneWithTrustedData")
defer onEnd()
stagingArea := model.NewStagingArea()
@@ -934,32 +922,34 @@ func (pm *pruningManager) PruningPointAndItsAnticone() ([]*externalapi.DomainHas
return nil, err
}
// Sorting the blocks in topological order
var sortErr error
sort.Slice(pruningPointAnticone, func(i, j int) bool {
headerI, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[i])
if err != nil {
sortErr = err
return false
}
blocks := make([]*externalapi.BlockWithTrustedData, 0, len(pruningPointAnticone)+1)
headerJ, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[j])
if err != nil {
sortErr = err
return false
}
return headerI.BlueWork().Cmp(headerJ.BlueWork()) < 0
})
if sortErr != nil {
return nil, sortErr
pruningPointWithTrustedData, err := pm.blockWithTrustedData(stagingArea, pruningPoint)
if err != nil {
return nil, err
}
for _, blockHash := range pruningPointAnticone {
blockWithTrustedData, err := pm.blockWithTrustedData(stagingArea, blockHash)
if err != nil {
return nil, err
}
blocks = append(blocks, blockWithTrustedData)
}
// Sorting the blocks in topological order
sort.Slice(blocks, func(i, j int) bool {
return blocks[i].Block.Header.BlueWork().Cmp(blocks[j].Block.Header.BlueWork()) < 0
})
// The pruning point should always come first
return append([]*externalapi.DomainHash{pruningPoint}, pruningPointAnticone...), nil
blocks = append([]*externalapi.BlockWithTrustedData{pruningPointWithTrustedData}, blocks...)
return blocks, nil
}
func (pm *pruningManager) BlockWithTrustedData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
func (pm *pruningManager) blockWithTrustedData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
block, err := pm.blocksStore.Block(pm.databaseContext, stagingArea, blockHash)
if err != nil {
return nil, err

View File

@@ -1,5 +0,0 @@
package pruningproofmanager
import "github.com/kaspanet/kaspad/infrastructure/logger"
var log = logger.RegisterSubSystem("PPMN")

View File

@@ -15,8 +15,8 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
"math/big"
)
@@ -28,7 +28,6 @@ type pruningProofManager struct {
ghostdagManagers []model.GHOSTDAGManager
reachabilityManagers []model.ReachabilityManager
dagTraversalManagers []model.DAGTraversalManager
parentsManager model.ParentsManager
ghostdagDataStores []model.GHOSTDAGDataStore
pruningStore model.PruningStore
@@ -40,9 +39,6 @@ type pruningProofManager struct {
genesisHash *externalapi.DomainHash
k externalapi.KType
pruningProofM uint64
cachedPruningPoint *externalapi.DomainHash
cachedProof *externalapi.PruningPointProof
}
// New instantiates a new PruningManager
@@ -53,7 +49,6 @@ func New(
ghostdagManagers []model.GHOSTDAGManager,
reachabilityManagers []model.ReachabilityManager,
dagTraversalManagers []model.DAGTraversalManager,
parentsManager model.ParentsManager,
ghostdagDataStores []model.GHOSTDAGDataStore,
pruningStore model.PruningStore,
@@ -73,7 +68,6 @@ func New(
ghostdagManagers: ghostdagManagers,
reachabilityManagers: reachabilityManagers,
dagTraversalManagers: dagTraversalManagers,
parentsManager: parentsManager,
ghostdagDataStores: ghostdagDataStores,
pruningStore: pruningStore,
@@ -89,33 +83,6 @@ func New(
}
func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "BuildPruningPointProof")
defer onEnd()
pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea)
if err != nil {
return nil, err
}
if ppm.cachedPruningPoint != nil && ppm.cachedPruningPoint.Equal(pruningPoint) {
return ppm.cachedProof, nil
}
proof, err := ppm.buildPruningPointProof(stagingArea)
if err != nil {
return nil, err
}
ppm.cachedProof = proof
ppm.cachedPruningPoint = pruningPoint
return proof, nil
}
func (ppm *pruningProofManager) buildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "buildPruningPointProof")
defer onEnd()
pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea)
if err != nil {
return nil, err
@@ -130,33 +97,17 @@ func (ppm *pruningProofManager) buildPruningPointProof(stagingArea *model.Stagin
return nil, err
}
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
maxLevel := len(pruningPointHeader.Parents()) - 1
headersByLevel := make(map[int][]externalapi.BlockHeader)
selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1)
pruningPointLevel := pruningPointHeader.BlockLevel()
pruningPointLevel := pow.BlockLevel(pruningPointHeader)
for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- {
var selectedTip *externalapi.DomainHash
if blockLevel <= pruningPointLevel {
selectedTip = pruningPoint
} else {
blockLevelParents := ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel)
selectedTipCandidates := make([]*externalapi.DomainHash, 0, len(blockLevelParents))
// In a pruned node, some pruning point parents might be missing, but we're guaranteed that its
// selected parent is not missing.
for _, parent := range blockLevelParents {
_, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
if database.IsNotFoundError(err) {
continue
}
if err != nil {
return nil, err
}
selectedTipCandidates = append(selectedTipCandidates, parent)
}
selectedTip, err = ppm.ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, selectedTipCandidates...)
blockLevelParents := pruningPointHeader.ParentsAtLevel(blockLevel)
selectedTip, err = ppm.ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, []*externalapi.DomainHash(blockLevelParents)...)
if err != nil {
return nil, err
}
@@ -297,9 +248,6 @@ func (ppm *pruningProofManager) blockAtDepth(stagingArea *model.StagingArea, gho
}
func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *externalapi.PruningPointProof) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "ValidatePruningPointProof")
defer onEnd()
stagingArea := model.NewStagingArea()
if len(pruningPointProof.Headers) == 0 {
@@ -309,8 +257,8 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
level0Headers := pruningPointProof.Headers[0]
pruningPointHeader := level0Headers[len(level0Headers)-1]
pruningPoint := consensushashing.HeaderHash(pruningPointHeader)
pruningPointBlockLevel := pruningPointHeader.BlockLevel()
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
pruningPointBlockLevel := pow.BlockLevel(pruningPointHeader)
maxLevel := len(pruningPointHeader.Parents()) - 1
if maxLevel >= len(pruningPointProof.Headers) {
return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+
"has parents from %d levels", len(pruningPointProof.Headers), maxLevel+1)
@@ -352,15 +300,15 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
var selectedTip *externalapi.DomainHash
for i, header := range headers {
blockHash := consensushashing.HeaderHash(header)
if header.BlockLevel() < blockLevel {
if pow.BlockLevel(header) < blockLevel {
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
"expected to be at least %d", blockHash, pow.BlockLevel(header), blockLevel)
}
blockHeaderStore.Stage(stagingArea, blockHash, header)
var parents []*externalapi.DomainHash
for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) {
for _, parent := range header.ParentsAtLevel(blockLevel) {
_, err := ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
if database.IsNotFoundError(err) {
continue
@@ -429,7 +377,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
}
}
if !selectedTip.Equal(pruningPoint) && !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) {
if !selectedTip.Equal(pruningPoint) && !pruningPointHeader.ParentsAtLevel(blockLevel).Contains(selectedTip) {
return errors.Wrapf(ruleerrors.ErrPruningProofMissesBlocksBelowPruningPoint, "the selected tip %s at "+
"level %d is not a parent of the pruning point", selectedTip, blockLevel)
}
@@ -447,7 +395,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipIsNotThePruningPoint, "the pruning "+
"proof selected tip %s at level %d is not the pruning point", selectedTip, blockLevel)
}
} else if !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) {
} else if !pruningPointHeader.ParentsAtLevel(blockLevel).Contains(selectedTip) {
return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipNotParentOfPruningPoint, "the pruning "+
"proof selected tip %s at level %d is not a parent of the of the pruning point on the same "+
"level", selectedTip, blockLevel)
@@ -606,22 +554,19 @@ func (ppm *pruningProofManager) dagProcesses(
}
func (ppm *pruningProofManager) ApplyPruningPointProof(stagingArea *model.StagingArea, pruningPointProof *externalapi.PruningPointProof) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "ApplyPruningPointProof")
defer onEnd()
for blockLevel, headers := range pruningPointProof.Headers {
var selectedTip *externalapi.DomainHash
for i, header := range headers {
blockHash := consensushashing.HeaderHash(header)
if header.BlockLevel() < blockLevel {
if pow.BlockLevel(header) < blockLevel {
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
"expected to be at least %d", blockHash, pow.BlockLevel(header), blockLevel)
}
ppm.blockHeaderStore.Stage(stagingArea, blockHash, header)
var parents []*externalapi.DomainHash
for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) {
for _, parent := range header.ParentsAtLevel(blockLevel) {
_, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
if database.IsNotFoundError(err) {
continue

View File

@@ -49,7 +49,7 @@ func TestCheckLockTimeVerifyConditionedByDAAScore(t *testing.T) {
}
fees := uint64(1)
//Create a CLTV script:
targetDAAScore := consensusConfig.GenesisBlock.Header.DAAScore() + uint64(30)
targetDAAScore := uint64(30)
redeemScriptCLTV, err := createScriptCLTV(targetDAAScore)
if err != nil {
t.Fatalf("Failed to create a script using createScriptCLTV: %v", err)
@@ -156,7 +156,7 @@ func TestCheckLockTimeVerifyConditionedByDAAScoreWithWrongLockTime(t *testing.T)
}
fees := uint64(1)
//Create a CLTV script:
targetDAAScore := consensusConfig.GenesisBlock.Header.DAAScore() + uint64(30)
targetDAAScore := uint64(30)
redeemScriptCLTV, err := createScriptCLTV(targetDAAScore)
if err != nil {
t.Fatalf("Failed to create a script using createScriptCLTV: %v", err)

View File

@@ -2,7 +2,6 @@ package blockheader
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"math/big"
)
@@ -19,9 +18,6 @@ type blockHeader struct {
blueScore uint64
blueWork *big.Int
pruningPoint *externalapi.DomainHash
isBlockLevelCached bool
blockLevel int
}
func (bh *blockHeader) BlueScore() uint64 {
@@ -45,12 +41,10 @@ func (bh *blockHeader) ToImmutable() externalapi.BlockHeader {
}
func (bh *blockHeader) SetNonce(nonce uint64) {
bh.isBlockLevelCached = false
bh.nonce = nonce
}
func (bh *blockHeader) SetTimeInMilliseconds(timeInMilliseconds int64) {
bh.isBlockLevelCached = false
bh.timeInMilliseconds = timeInMilliseconds
}
@@ -62,12 +56,16 @@ func (bh *blockHeader) Parents() []externalapi.BlockLevelParents {
return bh.parents
}
func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents {
if len(bh.parents) == 0 {
func (bh *blockHeader) ParentsAtLevel(level int) externalapi.BlockLevelParents {
if len(bh.parents) <= level {
return externalapi.BlockLevelParents{}
}
return bh.parents[0]
return bh.parents[level]
}
func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents {
return bh.ParentsAtLevel(0)
}
func (bh *blockHeader) HashMerkleRoot() *externalapi.DomainHash {
@@ -179,15 +177,6 @@ func (bh *blockHeader) ToMutable() externalapi.MutableBlockHeader {
return bh.clone()
}
func (bh *blockHeader) BlockLevel() int {
if !bh.isBlockLevelCached {
bh.blockLevel = pow.BlockLevel(bh)
bh.isBlockLevelCached = true
}
return bh.blockLevel
}
// NewImmutableBlockHeader returns a new immutable header
func NewImmutableBlockHeader(
version uint16,

View File

@@ -36,8 +36,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
8,
big.NewInt(9),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{10}),
false,
0,
},
expectedResult: false,
},
@@ -57,8 +55,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
headersToCompareTo: []headerToCompare{
{
@@ -79,8 +75,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: true,
},
@@ -98,8 +92,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -119,8 +111,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -138,8 +128,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -157,8 +145,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -176,8 +162,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -195,8 +179,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -214,8 +196,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -233,8 +213,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -252,8 +230,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -271,8 +247,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -290,8 +264,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
100,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -309,8 +281,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(100),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -328,8 +298,6 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}),
false,
0,
},
expectedResult: false,
},

View File

@@ -37,7 +37,5 @@ const (
LockTimeThreshold = 5e11 // Tue Nov 5 00:53:20 1985 UTC
// MaxBlockLevel is the maximum possible block level.
// This is technically 255, but we clamped it at 256 - block level of mainnet genesis
// This means that any block that has a level lower or equal to genesis will be level 0.
MaxBlockLevel = 225
MaxBlockLevel = 255
)

View File

@@ -4,7 +4,6 @@ import (
"crypto/sha256"
"github.com/pkg/errors"
"golang.org/x/crypto/blake2b"
"golang.org/x/crypto/sha3"
)
const (
@@ -14,7 +13,6 @@ const (
transcationSigningECDSADomain = "TransactionSigningHashECDSA"
blockDomain = "BlockHash"
proofOfWorkDomain = "ProofOfWorkHash"
heavyHashDomain = "HeavyHash"
merkleBranchDomain = "MerkleBranchHash"
)
@@ -67,15 +65,12 @@ func NewBlockHashWriter() HashWriter {
}
// NewPoWHashWriter Returns a new HashWriter used for the PoW function
func NewPoWHashWriter() ShakeHashWriter {
shake256 := sha3.NewCShake256(nil, []byte(proofOfWorkDomain))
return ShakeHashWriter{shake256}
}
// NewHeavyHashWriter Returns a new HashWriter used for the HeavyHash function
func NewHeavyHashWriter() ShakeHashWriter {
shake256 := sha3.NewCShake256(nil, []byte(heavyHashDomain))
return ShakeHashWriter{shake256}
func NewPoWHashWriter() HashWriter {
blake, err := blake2b.New256([]byte(proofOfWorkDomain))
if err != nil {
panic(errors.Wrapf(err, "this should never happen. %s is less than 64 bytes", proofOfWorkDomain))
}
return HashWriter{blake}
}
// NewMerkleBranchHashWriter Returns a new HashWriter used for a merkle tree branch

View File

@@ -3,7 +3,6 @@ package hashes
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
"hash"
)
@@ -31,29 +30,3 @@ func (h HashWriter) Finalize() *externalapi.DomainHash {
copy(sum[:], h.Sum(sum[:0]))
return externalapi.NewDomainHashFromByteArray(&sum)
}
// ShakeHashWriter is exactly the same as HashWriter but for CShake256
type ShakeHashWriter struct {
sha3.ShakeHash
}
// InfallibleWrite is just like write but doesn't return anything
func (h *ShakeHashWriter) InfallibleWrite(p []byte) {
// This write can never return an error, this is part of the hash.Hash interface contract.
_, err := h.Write(p)
if err != nil {
panic(errors.Wrap(err, "this should never happen. sha3.ShakeHash interface promises to not return errors."))
}
}
// Finalize returns the resulting hash
func (h *ShakeHashWriter) Finalize() *externalapi.DomainHash {
var sum [externalapi.DomainHashSize]byte
// This should prevent `Sum` for allocating an output buffer, by using the DomainHash buffer. we still copy because we don't want to rely on that.
_, err := h.Read(sum[:])
if err != nil {
panic(errors.Wrap(err, "this should never happen. sha3.ShakeHash interface promises to not return errors."))
}
h.ShakeHash = nil // prevent double reading as it will return a different hash
return externalapi.NewDomainHashFromByteArray(&sum)
}

View File

@@ -1,50 +0,0 @@
package hashes
import (
"math/rand"
"testing"
)
func BenchmarkNewBlockHashWriterSmall(b *testing.B) {
r := rand.New(rand.NewSource(0))
var someBytes [32]byte
r.Read(someBytes[:])
for i := 0; i < b.N; i++ {
hasher := NewBlockHashWriter()
hasher.InfallibleWrite(someBytes[:])
hasher.Finalize()
}
}
func BenchmarkNewBlockHashWriterBig(b *testing.B) {
r := rand.New(rand.NewSource(0))
var someBytes [1024]byte
r.Read(someBytes[:])
for i := 0; i < b.N; i++ {
hasher := NewBlockHashWriter()
hasher.InfallibleWrite(someBytes[:])
hasher.Finalize()
}
}
func BenchmarkNewHeavyHashWriterSmall(b *testing.B) {
r := rand.New(rand.NewSource(0))
var someBytes [32]byte
r.Read(someBytes[:])
for i := 0; i < b.N; i++ {
hasher := NewHeavyHashWriter()
hasher.InfallibleWrite(someBytes[:])
hasher.Finalize()
}
}
func BenchmarkNewHeavyHashWriterBig(b *testing.B) {
r := rand.New(rand.NewSource(0))
var someBytes [1024]byte
r.Read(someBytes[:])
for i := 0; i < b.N; i++ {
hasher := NewHeavyHashWriter()
hasher.InfallibleWrite(someBytes[:])
hasher.Finalize()
}
}

View File

@@ -6,17 +6,18 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/pkg/errors"
)
// SolveBlock increments the given block's nonce until it matches the difficulty requirements in its bits field
func SolveBlock(block *externalapi.DomainBlock, rd *rand.Rand) {
header := block.Header.ToMutable()
state := pow.NewState(header)
for state.Nonce = rd.Uint64(); state.Nonce < math.MaxUint64; state.Nonce++ {
if state.CheckProofOfWork() {
header.SetNonce(state.Nonce)
block.Header = header.ToImmutable()
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
headerForMining := block.Header.ToMutable()
for i := rd.Uint64(); i < math.MaxUint64; i++ {
headerForMining.SetNonce(i)
if pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
block.Header = headerForMining.ToImmutable()
return
}
}

View File

@@ -1,91 +0,0 @@
package pow
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
"math"
)
const eps float64 = 1e-9
type matrix [64][64]uint16
func generateMatrix(hash *externalapi.DomainHash) *matrix {
var mat matrix
generator := newxoShiRo256PlusPlus(hash)
for {
for i := range mat {
for j := 0; j < 64; j += 16 {
val := generator.Uint64()
for shift := 0; shift < 16; shift++ {
mat[i][j+shift] = uint16(val >> (4 * shift) & 0x0F)
}
}
}
if mat.computeRank() == 64 {
return &mat
}
}
}
func (mat *matrix) computeRank() int {
var B [64][64]float64
for i := range B {
for j := range B[0] {
B[i][j] = float64(mat[i][j])
}
}
var rank int
var rowSelected [64]bool
for i := 0; i < 64; i++ {
var j int
for j = 0; j < 64; j++ {
if !rowSelected[j] && math.Abs(B[j][i]) > eps {
break
}
}
if j != 64 {
rank++
rowSelected[j] = true
for p := i + 1; p < 64; p++ {
B[j][p] /= B[j][i]
}
for k := 0; k < 64; k++ {
if k != j && math.Abs(B[k][i]) > eps {
for p := i + 1; p < 64; p++ {
B[k][p] -= B[j][p] * B[k][i]
}
}
}
}
}
return rank
}
func (mat *matrix) HeavyHash(hash *externalapi.DomainHash) *externalapi.DomainHash {
hashBytes := hash.ByteArray()
var vector [64]uint16
var product [64]uint16
for i := 0; i < 32; i++ {
vector[2*i] = uint16(hashBytes[i] >> 4)
vector[2*i+1] = uint16(hashBytes[i] & 0x0F)
}
// Matrix-vector multiplication, and convert to 4 bits.
for i := 0; i < 64; i++ {
var sum uint16
for j := 0; j < 64; j++ {
sum += mat[i][j] * vector[j]
}
product[i] = sum >> 10
}
// Concatenate 4 LSBs back to 8 bit xor with sum1
var res [32]byte
for i := range res {
res[i] = hashBytes[i] ^ (byte(product[2*i]<<4) | byte(product[2*i+1]))
}
// Hash again
writer := hashes.NewHeavyHashWriter()
writer.InfallibleWrite(res[:])
return writer.Finalize()
}

View File

@@ -1,233 +0,0 @@
package pow
import (
"bytes"
"encoding/hex"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
"math/rand"
"testing"
)
func BenchmarkMatrix_HeavyHash(b *testing.B) {
input := []byte("BenchmarkMatrix_HeavyHash")
writer := hashes.NewPoWHashWriter()
writer.InfallibleWrite(input)
hash := writer.Finalize()
matrix := generateMatrix(hash)
for i := 0; i < b.N; i++ {
hash = matrix.HeavyHash(hash)
}
}
func BenchmarkMatrix_Generate(b *testing.B) {
r := rand.New(rand.NewSource(0))
h := [32]byte{}
r.Read(h[:])
hash := externalapi.NewDomainHashFromByteArray(&h)
for i := 0; i < b.N; i++ {
generateMatrix(hash)
}
}
func BenchmarkMatrix_Rank(b *testing.B) {
r := rand.New(rand.NewSource(0))
h := [32]byte{}
r.Read(h[:])
hash := externalapi.NewDomainHashFromByteArray(&h)
matrix := generateMatrix(hash)
for i := 0; i < b.N; i++ {
matrix.computeRank()
}
}
func TestMatrix_Rank(t *testing.T) {
var mat matrix
if mat.computeRank() != 0 {
t.Fatalf("The zero matrix should have rank 0, instead got: %d", mat.computeRank())
}
r := rand.New(rand.NewSource(0))
h := [32]byte{}
r.Read(h[:])
hash := externalapi.NewDomainHashFromByteArray(&h)
mat = *generateMatrix(hash)
if mat.computeRank() != 64 {
t.Fatalf("generateMatrix() should always return full rank matrix, instead got: %d", mat.computeRank())
}
for i := range mat {
mat[0][i] = mat[1][i] * 2
}
if mat.computeRank() != 63 {
t.Fatalf("Made a linear depenency between 2 rows, rank should be 63, instead got: %d", mat.computeRank())
}
for i := range mat {
mat[33][i] = mat[32][i] * 3
}
if mat.computeRank() != 62 {
t.Fatalf("Made anoter linear depenency between 2 rows, rank should be 62, instead got: %d", mat.computeRank())
}
}
func TestGenerateMatrix(t *testing.T) {
hashBytes := [32]byte{42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42}
hash := externalapi.NewDomainHashFromByteArray(&hashBytes)
expectedMatrix := matrix{
{4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 2, 10, 2, 10, 2, 10, 2, 10, 2, 10, 2, 10, 2, 10, 2, 10, 14, 1, 2, 2, 14, 10, 4, 12, 4, 12, 10, 10, 10, 10, 10, 10},
{9, 11, 1, 11, 1, 11, 9, 11, 9, 11, 9, 3, 12, 13, 11, 5, 15, 15, 5, 0, 6, 8, 1, 8, 6, 11, 15, 5, 3, 6, 7, 3, 2, 15, 14, 3, 7, 11, 14, 7, 3, 6, 14, 12, 3, 9, 5, 1, 1, 0, 8, 4, 10, 15, 9, 10, 6, 13, 1, 1, 7, 4, 4, 6},
{2, 6, 0, 8, 11, 15, 4, 0, 5, 2, 7, 13, 15, 3, 11, 12, 6, 2, 1, 8, 13, 4, 11, 4, 10, 14, 13, 2, 6, 15, 10, 6, 6, 5, 6, 9, 3, 3, 3, 1, 9, 12, 12, 15, 6, 0, 1, 5, 7, 13, 14, 1, 10, 10, 5, 14, 4, 0, 12, 13, 2, 15, 8, 4},
{8, 6, 5, 1, 0, 6, 4, 8, 13, 0, 8, 12, 7, 2, 4, 3, 10, 5, 9, 3, 12, 13, 2, 4, 13, 14, 7, 7, 9, 12, 10, 8, 11, 6, 14, 3, 12, 8, 8, 0, 2, 10, 0, 9, 1, 9, 7, 8, 5, 2, 9, 13, 15, 6, 13, 10, 1, 9, 1, 10, 6, 2, 10, 9},
{4, 2, 6, 14, 4, 2, 5, 7, 15, 6, 0, 4, 11, 9, 12, 0, 3, 2, 0, 4, 10, 5, 12, 3, 3, 4, 10, 1, 0, 13, 3, 12, 15, 0, 7, 10, 2, 2, 15, 0, 2, 15, 8, 2, 15, 12, 10, 6, 6, 2, 13, 3, 8, 14, 3, 13, 10, 5, 4, 5, 1, 6, 5, 10},
{0, 3, 13, 12, 11, 4, 11, 13, 1, 12, 4, 11, 15, 14, 13, 4, 7, 1, 3, 0, 10, 3, 8, 8, 1, 2, 5, 14, 4, 5, 14, 1, 1, 3, 3, 1, 5, 15, 7, 5, 11, 8, 8, 12, 10, 5, 7, 9, 2, 10, 13, 11, 4, 2, 12, 15, 10, 6, 6, 0, 6, 6, 3, 12},
{9, 12, 3, 3, 5, 8, 12, 13, 7, 4, 5, 11, 4, 0, 7, 2, 2, 15, 12, 14, 12, 5, 4, 2, 8, 8, 8, 13, 6, 1, 1, 5, 0, 15, 12, 13, 8, 5, 0, 4, 13, 1, 6, 1, 12, 14, 1, 0, 13, 12, 10, 10, 1, 4, 13, 13, 8, 4, 15, 13, 6, 6, 14, 10},
{14, 15, 8, 0, 7, 2, 5, 10, 5, 3, 12, 0, 11, 3, 4, 2, 8, 11, 6, 14, 14, 3, 3, 12, 3, 7, 6, 2, 6, 12, 15, 1, 1, 13, 0, 6, 9, 9, 7, 7, 13, 4, 4, 2, 15, 5, 2, 15, 13, 13, 10, 6, 9, 15, 2, 9, 6, 10, 6, 14, 14, 3, 5, 11},
{6, 4, 7, 8, 11, 0, 13, 11, 0, 7, 0, 0, 13, 6, 3, 11, 15, 14, 10, 2, 7, 8, 13, 14, 8, 15, 10, 8, 14, 6, 10, 14, 3, 11, 5, 11, 13, 5, 3, 12, 3, 0, 2, 0, 6, 14, 4, 12, 4, 4, 8, 15, 7, 8, 12, 11, 3, 9, 5, 13, 10, 14, 13, 4},
{10, 0, 0, 15, 1, 4, 13, 3, 15, 10, 2, 5, 11, 2, 9, 14, 7, 3, 2, 8, 6, 15, 0, 12, 1, 4, 1, 9, 3, 0, 15, 8, 9, 13, 0, 7, 9, 10, 6, 14, 3, 7, 9, 7, 4, 0, 11, 8, 4, 6, 5, 8, 8, 0, 5, 14, 7, 12, 12, 2, 5, 6, 5, 6},
{12, 0, 0, 14, 8, 3, 0, 3, 13, 10, 5, 13, 5, 7, 2, 4, 13, 11, 3, 1, 11, 2, 14, 5, 10, 5, 5, 9, 12, 15, 12, 8, 1, 0, 11, 13, 8, 1, 1, 11, 10, 0, 11, 15, 13, 9, 12, 14, 5, 4, 5, 14, 2, 7, 2, 1, 4, 12, 11, 11, 9, 12, 11, 15},
{3, 15, 9, 8, 13, 12, 15, 7, 8, 7, 14, 6, 10, 3, 0, 5, 2, 2, 6, 6, 3, 2, 5, 12, 11, 2, 10, 11, 13, 3, 9, 7, 7, 6, 8, 15, 14, 14, 11, 11, 9, 7, 1, 3, 8, 5, 11, 11, 1, 2, 15, 8, 13, 8, 11, 4, 1, 5, 3, 12, 5, 3, 7, 7},
{13, 13, 2, 14, 4, 3, 15, 2, 0, 15, 1, 5, 4, 1, 5, 1, 4, 14, 5, 1, 11, 13, 15, 1, 3, 3, 5, 13, 14, 1, 0, 4, 6, 1, 15, 7, 7, 0, 15, 8, 15, 3, 14, 7, 7, 8, 12, 10, 2, 14, 9, 2, 11, 11, 7, 10, 4, 3, 12, 13, 4, 13, 0, 14},
{12, 14, 15, 15, 2, 0, 0, 13, 4, 6, 4, 2, 14, 11, 5, 6, 14, 8, 14, 7, 13, 15, 6, 15, 7, 9, 1, 0, 11, 9, 9, 0, 2, 12, 8, 8, 14, 11, 7, 5, 3, 0, 11, 12, 9, 2, 8, 9, 0, 0, 9, 8, 9, 8, 2, 14, 12, 2, 0, 14, 13, 8, 4, 10},
{7, 10, 1, 15, 12, 14, 7, 4, 7, 13, 4, 8, 13, 12, 1, 7, 10, 6, 5, 14, 14, 3, 14, 4, 11, 14, 6, 12, 15, 12, 15, 12, 4, 5, 9, 8, 7, 7, 3, 0, 5, 7, 3, 8, 4, 4, 7, 5, 6, 12, 13, 0, 12, 10, 2, 5, 14, 9, 6, 4, 13, 13, 14, 5},
{14, 5, 8, 3, 4, 15, 13, 14, 14, 10, 7, 14, 15, 2, 11, 14, 13, 13, 12, 10, 6, 9, 5, 5, 6, 13, 15, 13, 7, 0, 15, 11, 4, 12, 15, 7, 7, 4, 3, 11, 8, 14, 5, 10, 2, 4, 4, 12, 3, 6, 1, 9, 15, 1, 1, 13, 7, 5, 0, 14, 15, 7, 8, 6},
{1, 2, 10, 5, 2, 13, 1, 11, 15, 10, 4, 9, 9, 12, 14, 13, 3, 5, 0, 3, 7, 11, 10, 3, 12, 5, 10, 2, 13, 7, 1, 7, 13, 8, 2, 8, 3, 14, 10, 3, 5, 12, 0, 9, 3, 9, 11, 2, 10, 9, 0, 6, 4, 0, 1, 14, 11, 0, 8, 6, 1, 15, 3, 10},
{13, 9, 0, 5, 8, 7, 12, 15, 10, 10, 5, 1, 1, 7, 6, 1, 14, 5, 15, 2, 3, 5, 3, 5, 7, 3, 7, 7, 1, 4, 3, 14, 5, 0, 12, 0, 12, 10, 10, 6, 12, 6, 3, 5, 5, 11, 10, 1, 11, 3, 13, 3, 9, 11, 1, 7, 14, 14, 0, 8, 15, 5, 2, 7},
{8, 5, 11, 6, 15, 0, 1, 13, 1, 6, 7, 15, 4, 3, 14, 12, 9, 3, 11, 6, 4, 12, 1, 11, 6, 12, 5, 11, 1, 12, 2, 3, 1, 2, 11, 12, 0, 5, 11, 5, 3, 13, 11, 3, 11, 14, 10, 8, 3, 9, 4, 8, 13, 11, 9, 11, 2, 4, 12, 3, 0, 14, 7, 11},
{10, 11, 4, 10, 7, 8, 3, 14, 15, 8, 15, 6, 9, 8, 5, 6, 12, 1, 15, 6, 5, 5, 14, 13, 2, 12, 14, 6, 5, 5, 14, 9, 1, 10, 11, 14, 8, 6, 14, 11, 1, 15, 6, 11, 11, 8, 1, 2, 8, 5, 4, 15, 6, 8, 0, 8, 0, 11, 0, 1, 0, 7, 8, 15},
{0, 15, 5, 0, 11, 4, 4, 2, 0, 4, 8, 12, 2, 2, 0, 8, 1, 2, 6, 5, 6, 12, 3, 1, 12, 1, 6, 10, 2, 5, 0, 2, 0, 11, 8, 6, 13, 4, 14, 4, 15, 5, 8, 11, 9, 6, 2, 6, 9, 1, 4, 2, 14, 10, 4, 4, 1, 1, 11, 8, 6, 11, 11, 9},
{7, 3, 6, 5, 9, 1, 11, 0, 15, 13, 13, 13, 4, 14, 14, 12, 3, 7, 9, 3, 1, 6, 5, 9, 7, 6, 2, 11, 10, 4, 11, 14, 10, 13, 11, 8, 11, 8, 1, 15, 5, 0, 10, 5, 6, 0, 5, 15, 11, 6, 6, 4, 10, 11, 8, 12, 0, 10, 11, 11, 11, 1, 13, 6},
{7, 15, 0, 0, 11, 5, 7, 13, 3, 7, 3, 2, 5, 12, 6, 11, 14, 4, 9, 8, 9, 9, 13, 0, 15, 2, 13, 2, 15, 6, 15, 1, 1, 7, 4, 0, 10, 1, 8, 14, 0, 10, 12, 4, 5, 13, 9, 0, 7, 12, 13, 11, 11, 8, 8, 15, 2, 15, 4, 4, 9, 3, 10, 7},
{0, 9, 3, 5, 14, 6, 7, 14, 7, 2, 13, 7, 3, 15, 9, 15, 2, 8, 0, 4, 6, 0, 15, 6, 2, 1, 14, 8, 5, 8, 2, 4, 2, 11, 9, 2, 15, 13, 11, 12, 8, 15, 3, 13, 2, 2, 10, 13, 1, 8, 7, 15, 13, 6, 7, 7, 4, 3, 14, 7, 0, 9, 15, 11},
{8, 13, 7, 7, 8, 8, 7, 8, 1, 4, 10, 1, 12, 4, 14, 11, 7, 12, 15, 0, 10, 15, 9, 2, 14, 2, 14, 2, 4, 5, 13, 3, 2, 10, 0, 15, 7, 6, 8, 11, 7, 6, 10, 10, 4, 7, 10, 6, 6, 14, 10, 4, 14, 6, 12, 2, 8, 1, 9, 13, 3, 4, 3, 14},
{10, 10, 6, 3, 8, 5, 10, 7, 11, 10, 9, 4, 8, 14, 9, 10, 0, 9, 8, 14, 11, 15, 8, 13, 13, 7, 13, 13, 13, 9, 12, 11, 6, 3, 9, 6, 0, 0, 6, 6, 11, 6, 4, 8, 1, 5, 1, 7, 9, 6, 13, 4, 3, 8, 8, 11, 9, 10, 6, 11, 12, 13, 14, 14},
{14, 10, 0, 15, 14, 4, 3, 0, 12, 4, 0, 14, 11, 9, 0, 6, 4, 6, 0, 9, 8, 14, 4, 4, 6, 8, 2, 8, 10, 3, 8, 0, 1, 1, 15, 4, 2, 4, 13, 9, 9, 4, 0, 5, 5, 1, 2, 5, 11, 6, 2, 1, 7, 8, 10, 10, 1, 5, 8, 6, 7, 0, 4, 14},
{0, 15, 10, 11, 13, 12, 7, 7, 4, 0, 9, 5, 2, 8, 0, 10, 6, 6, 7, 5, 6, 7, 9, 0, 1, 4, 8, 14, 10, 3, 5, 5, 11, 5, 1, 10, 6, 10, 0, 14, 1, 15, 11, 12, 8, 2, 7, 8, 4, 0, 3, 11, 9, 15, 3, 5, 15, 15, 14, 15, 3, 4, 5, 14},
{5, 12, 12, 8, 0, 0, 14, 1, 4, 15, 3, 2, 2, 6, 1, 10, 7, 10, 14, 5, 14, 0, 8, 5, 9, 0, 12, 8, 9, 10, 3, 12, 3, 2, 0, 0, 12, 12, 7, 13, 2, 6, 4, 7, 10, 10, 14, 1, 11, 6, 10, 3, 12, 2, 1, 10, 7, 13, 10, 12, 14, 11, 14, 8},
{9, 5, 3, 12, 4, 3, 10, 14, 7, 5, 11, 12, 2, 13, 9, 8, 5, 2, 6, 2, 4, 9, 10, 10, 4, 3, 4, 0, 11, 1, 10, 9, 4, 10, 4, 5, 8, 11, 1, 7, 13, 7, 6, 6, 3, 12, 0, 0, 15, 6, 12, 12, 13, 7, 14, 14, 11, 15, 7, 14, 12, 6, 15, 2},
{15, 2, 0, 12, 15, 14, 8, 14, 7, 14, 0, 3, 3, 11, 12, 2, 3, 14, 13, 5, 12, 9, 6, 11, 7, 4, 5, 1, 7, 12, 0, 11, 1, 5, 6, 6, 8, 6, 12, 2, 12, 3, 10, 3, 4, 10, 3, 3, 3, 10, 10, 14, 3, 13, 15, 0, 7, 6, 15, 6, 13, 7, 4, 11},
{11, 15, 5, 14, 0, 1, 1, 14, 2, 3, 15, 14, 4, 3, 11, 1, 6, 6, 0, 12, 3, 5, 15, 6, 3, 11, 13, 11, 7, 7, 8, 11, 5, 9, 10, 10, 9, 14, 7, 1, 7, 2, 8, 6, 6, 5, 1, 9, 6, 5, 8, 14, 2, 14, 2, 9, 3, 3, 4, 15, 13, 5, 2, 7},
{7, 8, 13, 9, 15, 8, 11, 7, 1, 9, 15, 12, 6, 9, 3, 1, 10, 10, 11, 0, 0, 8, 14, 5, 11, 12, 14, 4, 3, 9, 12, 9, 14, 0, 0, 9, 12, 4, 1, 13, 3, 6, 3, 4, 13, 10, 2, 9, 3, 7, 7, 10, 7, 10, 10, 3, 5, 15, 8, 9, 11, 7, 1, 14},
{5, 5, 9, 1, 15, 3, 3, 11, 6, 11, 13, 13, 4, 12, 7, 12, 4, 8, 14, 13, 7, 12, 13, 8, 10, 2, 1, 12, 11, 7, 0, 8, 10, 9, 15, 1, 3, 9, 10, 0, 9, 1, 14, 1, 1, 9, 2, 2, 8, 9, 5, 6, 3, 2, 15, 9, 15, 6, 3, 11, 14, 4, 0, 4},
{9, 2, 10, 2, 0, 9, 6, 13, 13, 0, 13, 14, 3, 12, 1, 15, 9, 3, 12, 2, 5, 15, 6, 6, 15, 11, 7, 11, 0, 4, 0, 11, 10, 12, 7, 9, 3, 0, 2, 2, 13, 13, 9, 6, 9, 2, 6, 4, 3, 6, 5, 10, 10, 9, 7, 2, 4, 9, 13, 11, 2, 13, 6, 8},
{13, 15, 9, 8, 6, 2, 3, 2, 2, 12, 5, 3, 8, 6, 11, 6, 15, 7, 10, 3, 15, 8, 7, 5, 3, 8, 4, 2, 11, 1, 0, 4, 1, 1, 6, 1, 13, 6, 5, 1, 2, 6, 7, 10, 4, 3, 10, 6, 2, 0, 7, 13, 15, 1, 13, 0, 12, 10, 15, 6, 2, 4, 14, 3},
{5, 11, 14, 4, 0, 7, 12, 4, 4, 14, 12, 3, 4, 10, 7, 14, 6, 4, 14, 7, 0, 12, 5, 9, 15, 6, 15, 6, 3, 12, 0, 10, 11, 7, 1, 14, 13, 5, 1, 14, 5, 15, 12, 1, 9, 13, 9, 13, 14, 5, 10, 11, 12, 10, 15, 11, 9, 13, 2, 14, 9, 12, 2, 11},
{2, 12, 5, 7, 1, 5, 2, 11, 8, 4, 15, 6, 9, 14, 5, 1, 15, 4, 3, 1, 11, 4, 2, 1, 4, 5, 4, 4, 7, 3, 3, 12, 4, 3, 2, 15, 13, 1, 14, 15, 1, 4, 6, 11, 13, 15, 6, 12, 12, 13, 6, 8, 10, 0, 10, 12, 1, 10, 3, 2, 9, 8, 2, 8},
{10, 12, 12, 6, 8, 5, 4, 4, 5, 3, 6, 7, 15, 5, 10, 3, 8, 15, 14, 5, 6, 2, 14, 4, 1, 7, 1, 3, 12, 3, 12, 4, 10, 15, 6, 6, 0, 6, 6, 8, 6, 9, 5, 7, 5, 1, 9, 2, 4, 9, 0, 8, 1, 1, 14, 3, 7, 14, 8, 9, 0, 4, 11, 7},
{13, 11, 14, 7, 0, 4, 0, 10, 12, 11, 10, 8, 6, 12, 13, 15, 9, 2, 14, 9, 3, 0, 12, 14, 11, 15, 4, 7, 15, 14, 4, 8, 15, 12, 9, 14, 7, 7, 9, 13, 14, 14, 4, 9, 13, 8, 1, 13, 6, 3, 12, 7, 0, 15, 6, 15, 7, 2, 3, 0, 9, 5, 13, 0},
{3, 8, 12, 11, 5, 9, 9, 14, 8, 14, 14, 5, 9, 9, 12, 10, 3, 12, 13, 0, 0, 0, 6, 7, 12, 4, 2, 3, 8, 8, 9, 15, 11, 1, 12, 13, 10, 15, 11, 1, 2, 13, 10, 1, 7, 2, 7, 11, 8, 15, 7, 6, 4, 6, 5, 11, 11, 15, 2, 1, 11, 1, 1, 8},
{10, 7, 7, 1, 4, 13, 9, 10, 2, 2, 3, 7, 12, 8, 5, 5, 5, 5, 3, 1, 5, 6, 8, 2, 8, 11, 5, 0, 4, 12, 12, 6, 7, 9, 14, 10, 11, 8, 0, 9, 11, 4, 14, 7, 7, 8, 2, 15, 12, 7, 4, 4, 13, 2, 0, 3, 14, 0, 1, 5, 2, 15, 7, 11},
{3, 8, 10, 4, 1, 7, 3, 13, 5, 14, 0, 9, 3, 1, 0, 11, 2, 15, 4, 9, 6, 5, 14, 0, 2, 8, 1, 14, 7, 6, 1, 5, 5, 7, 2, 0, 5, 3, 4, 15, 13, 10, 9, 13, 13, 12, 5, 11, 11, 14, 13, 10, 8, 14, 0, 8, 1, 7, 2, 10, 12, 12, 1, 11},
{11, 14, 4, 13, 3, 11, 10, 6, 15, 2, 5, 10, 14, 4, 13, 3, 12, 7, 12, 10, 4, 0, 0, 1, 14, 6, 1, 2, 2, 12, 9, 2, 3, 11, 1, 4, 10, 4, 4, 7, 7, 12, 4, 3, 12, 11, 9, 3, 15, 13, 6, 13, 7, 11, 5, 12, 5, 13, 15, 12, 0, 13, 12, 9},
{8, 7, 2, 2, 5, 3, 10, 15, 10, 8, 1, 0, 4, 5, 7, 6, 15, 13, 2, 14, 6, 2, 9, 5, 9, 0, 5, 12, 8, 6, 4, 12, 6, 8, 14, 15, 7, 15, 11, 2, 2, 12, 7, 9, 7, 11, 15, 7, 0, 4, 5, 13, 7, 2, 5, 9, 0, 5, 7, 6, 7, 12, 4, 1},
{11, 4, 2, 13, 6, 10, 9, 4, 12, 9, 9, 6, 4, 2, 14, 14, 9, 5, 5, 15, 15, 9, 8, 11, 4, 2, 8, 11, 14, 3, 8, 10, 14, 9, 6, 6, 4, 7, 11, 2, 3, 7, 5, 1, 14, 2, 9, 4, 0, 1, 10, 7, 6, 7, 1, 3, 13, 7, 3, 2, 12, 3, 6, 6},
{11, 1, 14, 3, 14, 3, 9, 9, 0, 11, 14, 6, 14, 7, 14, 8, 4, 2, 5, 6, 13, 3, 4, 10, 8, 8, 10, 11, 5, 1, 15, 15, 7, 0, 4, 14, 15, 13, 14, 13, 3, 2, 1, 6, 0, 6, 6, 4, 15, 6, 0, 12, 5, 11, 1, 7, 3, 3, 13, 12, 12, 6, 3, 2},
{7, 2, 10, 14, 14, 13, 4, 14, 10, 6, 0, 2, 7, 7, 2, 5, 14, 1, 5, 14, 15, 1, 2, 9, 2, 13, 1, 3, 6, 1, 3, 13, 10, 6, 11, 13, 1, 7, 13, 15, 2, 11, 9, 6, 13, 7, 9, 2, 3, 13, 10, 10, 6, 2, 5, 9, 1, 3, 0, 3, 1, 5, 3, 12},
{11, 14, 4, 2, 10, 11, 15, 5, 9, 7, 8, 11, 10, 9, 5, 7, 14, 3, 12, 2, 7, 15, 12, 15, 4, 15, 12, 9, 2, 6, 6, 6, 8, 5, 0, 7, 14, 15, 14, 14, 3, 12, 7, 12, 2, 4, 1, 7, 1, 3, 4, 7, 1, 9, 11, 15, 15, 3, 7, 1, 10, 9, 14, 14},
{4, 13, 11, 1, 9, 6, 5, 1, 11, 6, 6, 8, 3, 9, 8, 15, 13, 12, 3, 13, 5, 9, 10, 5, 12, 1, 15, 14, 12, 1, 10, 11, 5, 7, 3, 12, 9, 12, 0, 2, 2, 3, 14, 4, 2, 13, 1, 15, 11, 8, 3, 13, 0, 10, 5, 4, 6, 0, 14, 8, 1, 0, 6, 15},
{15, 2, 0, 5, 2, 14, 9, 0, 10, 5, 12, 8, 5, 6, 0, 1, 9, 4, 4, 1, 4, 6, 14, 5, 3, 0, 2, 2, 14, 9, 7, 0, 2, 15, 12, 0, 10, 12, 9, 12, 15, 1, 9, 4, 15, 3, 0, 13, 0, 6, 5, 0, 2, 6, 11, 9, 13, 15, 6, 3, 5, 4, 0, 8},
{4, 14, 8, 14, 13, 4, 4, 10, 6, 12, 15, 11, 7, 2, 15, 6, 9, 9, 1, 11, 13, 2, 7, 10, 4, 4, 5, 12, 14, 15, 8, 5, 6, 1, 11, 15, 4, 11, 5, 2, 5, 7, 3, 4, 5, 7, 3, 8, 10, 13, 7, 5, 6, 5, 10, 1, 12, 13, 3, 6, 2, 8, 7, 15},
{3, 15, 4, 9, 14, 12, 6, 1, 7, 0, 7, 15, 10, 6, 5, 5, 15, 5, 9, 4, 7, 6, 14, 2, 1, 4, 10, 3, 12, 1, 7, 1, 0, 10, 2, 11, 14, 13, 7, 10, 5, 11, 5, 11, 15, 5, 0, 3, 15, 1, 2, 14, 13, 13, 10, 9, 15, 12, 10, 5, 2, 10, 0, 6},
{4, 6, 5, 13, 11, 10, 15, 4, 2, 15, 13, 6, 7, 7, 4, 0, 4, 6, 7, 4, 9, 1, 6, 7, 6, 1, 4, 2, 0, 11, 6, 3, 14, 5, 9, 2, 2, 10, 1, 2, 13, 14, 4, 11, 4, 7, 12, 9, 8, 2, 2, 9, 5, 7, 9, 12, 8, 15, 0, 9, 12, 11, 1, 12},
{11, 12, 11, 9, 8, 15, 4, 12, 13, 10, 6, 6, 6, 12, 3, 0, 6, 15, 15, 10, 6, 12, 5, 7, 10, 2, 7, 1, 6, 12, 9, 11, 11, 14, 1, 12, 15, 0, 6, 2, 12, 15, 4, 15, 14, 8, 3, 4, 15, 4, 13, 3, 14, 1, 3, 7, 6, 13, 9, 1, 0, 12, 4, 14},
{12, 11, 13, 10, 10, 10, 3, 7, 12, 3, 13, 9, 6, 0, 12, 10, 4, 11, 5, 4, 11, 5, 7, 14, 6, 10, 12, 12, 13, 15, 12, 1, 13, 15, 15, 7, 1, 2, 8, 6, 1, 12, 12, 0, 4, 3, 3, 3, 7, 8, 9, 10, 7, 7, 0, 0, 11, 13, 15, 4, 9, 5, 10, 9},
{6, 12, 3, 0, 9, 11, 6, 4, 9, 9, 1, 5, 9, 14, 3, 7, 15, 3, 5, 0, 5, 11, 7, 6, 13, 5, 10, 2, 12, 10, 2, 6, 0, 1, 1, 13, 9, 3, 11, 7, 8, 2, 10, 9, 13, 6, 6, 4, 12, 0, 3, 10, 9, 4, 15, 11, 14, 1, 9, 3, 0, 14, 6, 1},
{11, 14, 10, 10, 11, 6, 4, 7, 10, 0, 7, 9, 3, 2, 13, 13, 9, 9, 2, 3, 3, 14, 10, 4, 14, 1, 10, 7, 14, 4, 9, 15, 3, 11, 5, 10, 7, 8, 3, 0, 1, 2, 2, 3, 12, 9, 6, 2, 11, 15, 3, 9, 3, 6, 8, 0, 4, 5, 7, 3, 0, 14, 7, 9},
{4, 11, 13, 12, 6, 2, 3, 15, 15, 3, 5, 1, 0, 5, 10, 2, 5, 3, 7, 10, 15, 0, 5, 3, 2, 10, 12, 10, 8, 3, 9, 15, 5, 3, 7, 13, 5, 7, 13, 12, 5, 10, 2, 9, 10, 1, 9, 4, 14, 1, 10, 13, 1, 2, 2, 12, 5, 3, 14, 7, 7, 8, 13, 13},
{10, 12, 11, 10, 0, 15, 4, 3, 0, 8, 3, 0, 15, 0, 3, 10, 10, 9, 15, 3, 13, 3, 8, 3, 8, 2, 14, 7, 1, 6, 13, 8, 2, 2, 12, 3, 3, 0, 10, 12, 0, 1, 1, 7, 5, 0, 13, 10, 7, 13, 9, 9, 13, 7, 0, 1, 0, 2, 14, 2, 13, 0, 8, 3},
{11, 3, 11, 10, 12, 15, 11, 6, 14, 8, 8, 5, 7, 11, 3, 1, 13, 7, 13, 4, 15, 7, 2, 3, 8, 7, 3, 8, 9, 15, 10, 15, 9, 0, 5, 4, 1, 7, 13, 8, 2, 7, 1, 10, 1, 12, 12, 1, 7, 12, 13, 5, 14, 10, 9, 15, 12, 2, 10, 3, 10, 3, 9, 12},
{9, 8, 11, 0, 5, 6, 1, 5, 9, 1, 0, 12, 12, 0, 12, 11, 2, 8, 4, 0, 1, 7, 7, 5, 1, 14, 1, 9, 13, 7, 2, 12, 8, 9, 12, 13, 1, 11, 5, 3, 12, 14, 15, 4, 9, 8, 12, 7, 11, 1, 3, 9, 11, 5, 7, 14, 4, 6, 12, 3, 4, 12, 7, 9},
{10, 12, 2, 14, 14, 1, 11, 8, 3, 7, 13, 7, 2, 1, 14, 13, 7, 6, 15, 8, 15, 12, 13, 10, 11, 15, 4, 2, 6, 13, 12, 3, 2, 10, 15, 14, 10, 11, 8, 14, 9, 3, 12, 9, 15, 2, 14, 14, 5, 13, 7, 6, 2, 1, 1, 4, 1, 0, 13, 10, 1, 0, 2, 9},
{10, 5, 11, 14, 12, 1, 12, 7, 12, 8, 10, 5, 6, 10, 0, 7, 5, 6, 11, 11, 13, 12, 0, 13, 0, 6, 11, 0, 14, 4, 2, 1, 12, 7, 1, 10, 7, 15, 5, 3, 14, 15, 1, 3, 1, 2, 10, 4, 11, 8, 2, 11, 2, 5, 5, 4, 15, 5, 10, 3, 1, 7, 2, 14},
}
mat := generateMatrix(hash)
if *mat != expectedMatrix {
t.Fatal("The generated matrix doesn't match the test vector")
}
}
func TestMatrix_HeavyHash(t *testing.T) {
expected, err := hex.DecodeString("87689f379943eaf9b7475ca95325687772bfcc68fc7899caeb4409ec4590c325")
if err != nil {
t.Fatal(err)
}
input := []byte{0xC1, 0xEC, 0xFD, 0xFC}
writer := hashes.NewPoWHashWriter()
writer.InfallibleWrite(input)
hashed := testMatrix.HeavyHash(writer.Finalize())
if !bytes.Equal(expected, hashed.ByteSlice()) {
t.Fatalf("expected: %x == %s", expected, hashed)
}
}
var testMatrix = matrix{
{13, 2, 14, 13, 2, 15, 14, 3, 10, 4, 1, 8, 4, 3, 8, 15, 15, 15, 15, 15, 2, 11, 15, 15, 15, 1, 7, 12, 12, 4, 2, 0, 6, 1, 14, 10, 12, 14, 15, 8, 10, 12, 0, 5, 13, 3, 14, 10, 10, 6, 12, 11, 11, 7, 6, 6, 10, 2, 2, 4, 11, 12, 0, 5},
{4, 13, 0, 2, 1, 15, 13, 13, 11, 2, 5, 12, 15, 7, 0, 10, 7, 2, 6, 3, 12, 0, 12, 0, 2, 6, 7, 7, 7, 7, 10, 12, 11, 14, 12, 12, 4, 11, 10, 0, 10, 11, 2, 10, 1, 7, 7, 12, 15, 9, 5, 14, 9, 12, 3, 0, 12, 13, 4, 13, 8, 15, 11, 6},
{14, 6, 15, 9, 8, 2, 2, 12, 2, 3, 4, 12, 13, 15, 4, 5, 13, 4, 3, 0, 14, 3, 5, 14, 3, 13, 4, 15, 9, 12, 7, 15, 5, 1, 13, 12, 9, 9, 8, 11, 14, 11, 4, 10, 12, 6, 12, 8, 6, 3, 9, 8, 1, 6, 0, 5, 8, 9, 12, 5, 14, 15, 2, 2},
{9, 6, 7, 6, 0, 11, 5, 6, 2, 14, 12, 6, 4, 13, 8, 9, 2, 1, 9, 7, 4, 5, 10, 8, 11, 11, 11, 15, 7, 11, 1, 14, 3, 8, 14, 8, 2, 8, 13, 7, 8, 8, 15, 7, 1, 13, 7, 9, 1, 7, 15, 15, 0, 0, 12, 15, 13, 5, 13, 10, 1, 5, 6, 13},
{4, 0, 12, 10, 6, 11, 14, 2, 2, 15, 4, 1, 2, 4, 2, 12, 13, 1, 9, 10, 8, 0, 2, 10, 13, 8, 9, 7, 5, 3, 8, 2, 6, 6, 1, 12, 3, 0, 1, 4, 2, 8, 3, 13, 6, 15, 0, 13, 14, 4, 15, 0, 7, 3, 7, 8, 5, 14, 14, 5, 5, 0, 1, 2},
{12, 14, 6, 3, 3, 4, 6, 7, 1, 3, 2, 7, 15, 15, 15, 10, 9, 12, 0, 6, 3, 8, 5, 0, 13, 5, 0, 6, 0, 14, 2, 12, 10, 4, 11, 2, 10, 7, 7, 6, 8, 11, 4, 4, 11, 9, 3, 12, 10, 5, 2, 6, 5, 5, 10, 13, 12, 10, 1, 6, 14, 7, 12, 4},
{7, 14, 6, 7, 7, 12, 4, 1, 8, 6, 8, 13, 13, 5, 12, 14, 10, 8, 6, 2, 12, 3, 8, 15, 5, 15, 15, 3, 14, 0, 8, 6, 9, 12, 9, 7, 3, 8, 4, 0, 7, 14, 3, 3, 13, 14, 3, 7, 3, 2, 2, 3, 3, 12, 6, 7, 4, 1, 14, 10, 6, 10, 2, 9},
{14, 11, 15, 5, 7, 10, 1, 11, 4, 2, 6, 2, 9, 7, 4, 0, 9, 12, 11, 2, 3, 13, 1, 5, 4, 10, 5, 6, 6, 12, 8, 1, 1, 15, 4, 2, 12, 12, 0, 4, 14, 3, 11, 1, 7, 5, 9, 4, 3, 15, 7, 3, 15, 9, 8, 3, 8, 3, 3, 6, 7, 6, 9, 2},
{10, 4, 6, 10, 5, 2, 15, 12, 0, 14, 14, 15, 14, 0, 12, 9, 1, 12, 4, 5, 5, 2, 10, 4, 2, 13, 11, 3, 1, 8, 10, 0, 7, 0, 12, 4, 11, 1, 14, 6, 14, 5, 5, 11, 11, 1, 3, 8, 0, 6, 11, 11, 8, 4, 7, 6, 14, 4, 9, 14, 9, 7, 13, 9},
{12, 7, 9, 8, 2, 3, 3, 5, 14, 8, 0, 9, 7, 4, 2, 15, 15, 3, 11, 11, 8, 5, 7, 5, 0, 15, 10, 8, 0, 13, 1, 14, 8, 10, 1, 4, 13, 1, 13, 3, 11, 11, 2, 3, 10, 6, 8, 14, 15, 2, 10, 10, 12, 7, 7, 6, 6, 3, 13, 8, 1, 14, 2, 1},
{2, 11, 6, 9, 13, 3, 12, 6, 0, 4, 6, 13, 8, 14, 6, 9, 10, 2, 10, 8, 4, 13, 6, 5, 0, 13, 15, 4, 2, 2, 1, 7, 5, 3, 3, 13, 7, 3, 5, 9, 15, 14, 14, 6, 0, 15, 11, 2, 4, 15, 6, 9, 8, 9, 15, 2, 6, 9, 15, 8, 4, 4, 11, 1},
{10, 11, 8, 3, 11, 13, 10, 2, 2, 5, 2, 14, 15, 10, 2, 11, 0, 1, 8, 2, 14, 1, 10, 0, 3, 7, 5, 10, 7, 8, 15, 7, 2, 5, 13, 4, 10, 3, 6, 2, 3, 9, 6, 11, 7, 14, 1, 11, 9, 3, 3, 7, 6, 0, 9, 11, 4, 10, 4, 1, 9, 7, 4, 15},
{13, 8, 15, 14, 11, 12, 5, 3, 9, 14, 1, 5, 14, 13, 14, 5, 13, 5, 4, 10, 9, 9, 0, 0, 6, 12, 5, 7, 2, 7, 2, 6, 6, 6, 1, 12, 9, 15, 7, 11, 11, 10, 11, 1, 10, 10, 0, 8, 1, 4, 5, 5, 8, 10, 10, 15, 6, 8, 13, 11, 11, 3, 15, 5},
{8, 11, 5, 10, 1, 10, 9, 1, 12, 7, 6, 11, 1, 1, 4, 1, 2, 8, 4, 4, 7, 7, 8, 2, 7, 1, 14, 1, 8, 15, 15, 12, 10, 4, 15, 11, 3, 6, 10, 7, 4, 0, 10, 9, 11, 7, 1, 14, 4, 14, 3, 14, 10, 4, 13, 12, 5, 3, 12, 7, 10, 8, 0, 3},
{9, 11, 6, 15, 14, 10, 0, 4, 7, 7, 6, 0, 7, 7, 12, 15, 5, 4, 12, 3, 7, 3, 0, 12, 2, 7, 11, 6, 7, 3, 2, 8, 5, 11, 9, 4, 3, 8, 11, 12, 3, 5, 14, 12, 4, 13, 12, 0, 3, 14, 4, 9, 1, 1, 9, 14, 10, 14, 8, 15, 6, 14, 10, 15},
{10, 14, 10, 0, 10, 12, 15, 0, 3, 9, 11, 10, 3, 5, 1, 1, 9, 1, 7, 15, 7, 8, 10, 10, 12, 11, 5, 1, 10, 3, 6, 6, 13, 0, 13, 1, 4, 5, 9, 4, 9, 15, 8, 4, 13, 13, 4, 5, 5, 11, 1, 13, 15, 3, 10, 15, 7, 11, 10, 15, 8, 12, 10, 3},
{8, 5, 11, 3, 8, 13, 15, 15, 3, 12, 1, 13, 1, 7, 1, 5, 6, 13, 7, 8, 5, 1, 12, 3, 10, 7, 12, 6, 14, 12, 15, 5, 3, 12, 2, 15, 11, 13, 1, 13, 8, 5, 8, 0, 13, 15, 7, 13, 6, 13, 10, 1, 11, 0, 8, 9, 5, 11, 2, 9, 9, 10, 4, 15},
{0, 4, 12, 14, 3, 1, 7, 5, 11, 13, 5, 3, 11, 12, 6, 8, 10, 15, 11, 8, 7, 10, 0, 2, 5, 15, 6, 10, 4, 2, 3, 1, 13, 7, 6, 12, 14, 7, 6, 14, 12, 10, 6, 14, 12, 0, 12, 11, 6, 9, 3, 1, 12, 15, 15, 3, 5, 5, 10, 11, 7, 15, 13, 3},
{12, 14, 2, 14, 13, 6, 15, 7, 8, 8, 14, 13, 9, 2, 2, 10, 3, 15, 6, 10, 11, 7, 13, 0, 12, 1, 5, 8, 8, 12, 1, 11, 1, 3, 2, 4, 10, 7, 7, 7, 3, 10, 7, 2, 2, 3, 0, 1, 13, 5, 8, 2, 14, 0, 11, 13, 9, 3, 13, 2, 14, 2, 15, 4},
{0, 0, 13, 6, 9, 12, 15, 7, 8, 0, 7, 4, 12, 15, 3, 2, 7, 1, 14, 4, 9, 3, 13, 12, 11, 12, 9, 9, 3, 7, 10, 9, 1, 9, 10, 2, 10, 14, 11, 0, 14, 4, 15, 12, 12, 9, 9, 8, 14, 1, 9, 14, 0, 6, 1, 0, 13, 9, 7, 6, 13, 2, 3, 9},
{8, 0, 10, 13, 0, 7, 9, 7, 5, 1, 0, 3, 7, 10, 3, 15, 1, 15, 3, 11, 2, 6, 3, 10, 0, 10, 10, 3, 4, 15, 8, 6, 11, 11, 7, 5, 8, 5, 7, 15, 1, 11, 7, 13, 13, 6, 13, 13, 4, 2, 3, 15, 9, 5, 10, 6, 6, 6, 3, 11, 15, 13, 1, 15},
{1, 1, 2, 10, 2, 2, 9, 5, 9, 2, 0, 1, 14, 2, 11, 6, 11, 6, 1, 0, 13, 7, 14, 1, 15, 14, 13, 7, 12, 11, 8, 11, 2, 11, 6, 10, 2, 3, 0, 0, 15, 0, 4, 6, 4, 12, 5, 5, 7, 14, 10, 6, 0, 3, 13, 0, 8, 1, 13, 10, 5, 1, 7, 5},
{0, 5, 2, 12, 10, 2, 5, 1, 14, 0, 1, 4, 15, 11, 8, 7, 11, 14, 15, 6, 4, 1, 6, 6, 7, 13, 12, 5, 13, 2, 1, 6, 2, 13, 5, 15, 0, 8, 8, 6, 5, 5, 2, 0, 3, 13, 14, 2, 10, 5, 7, 6, 14, 5, 1, 4, 11, 2, 11, 1, 8, 15, 2, 4},
{9, 9, 4, 5, 2, 5, 3, 12, 14, 5, 1, 3, 3, 0, 0, 6, 7, 14, 0, 15, 14, 11, 3, 10, 1, 9, 4, 14, 7, 14, 1, 0, 15, 11, 5, 9, 4, 0, 0, 10, 4, 4, 0, 7, 8, 15, 12, 8, 10, 8, 1, 2, 1, 11, 12, 14, 14, 14, 8, 10, 1, 5, 13, 10},
{5, 10, 4, 4, 11, 10, 0, 6, 0, 12, 10, 5, 9, 11, 8, 10, 11, 3, 11, 14, 12, 9, 4, 6, 11, 12, 8, 7, 6, 14, 0, 6, 12, 4, 5, 3, 9, 0, 11, 6, 1, 3, 2, 12, 8, 9, 7, 12, 14, 7, 12, 6, 11, 13, 0, 2, 1, 3, 1, 8, 12, 2, 15, 15},
{10, 11, 2, 3, 11, 10, 1, 7, 1, 10, 10, 14, 5, 13, 10, 3, 11, 15, 9, 14, 11, 11, 3, 15, 11, 6, 15, 13, 13, 1, 1, 10, 5, 1, 5, 11, 10, 3, 9, 12, 12, 1, 5, 6, 3, 3, 1, 1, 12, 8, 3, 15, 6, 2, 8, 14, 3, 4, 10, 9, 7, 13, 2, 6},
{12, 0, 1, 0, 4, 3, 3, 6, 8, 3, 1, 13, 6, 12, 1, 1, 1, 4, 12, 4, 4, 9, 9, 14, 15, 3, 6, 4, 11, 1, 12, 5, 6, 0, 10, 9, 1, 8, 14, 5, 2, 8, 4, 15, 12, 13, 7, 14, 12, 2, 6, 9, 4, 13, 0, 15, 10, 10, 6, 12, 7, 12, 9, 10},
{0, 8, 5, 11, 12, 12, 11, 7, 2, 9, 2, 15, 1, 1, 0, 0, 6, 5, 10, 1, 11, 12, 8, 7, 1, 7, 10, 4, 2, 8, 2, 5, 1, 1, 2, 9, 2, 0, 3, 7, 5, 1, 5, 5, 3, 1, 4, 3, 14, 8, 11, 7, 8, 0, 2, 13, 3, 15, 1, 13, 14, 15, 11, 13},
{8, 13, 5, 14, 2, 9, 9, 13, 15, 8, 2, 14, 4, 2, 6, 0, 1, 13, 10, 13, 6, 12, 15, 11, 6, 11, 9, 9, 2, 9, 6, 14, 2, 9, 12, 1, 13, 9, 5, 11, 10, 4, 4, 5, 8, 9, 13, 10, 9, 0, 5, 15, 4, 12, 7, 10, 6, 5, 5, 15, 8, 8, 11, 14},
{6, 9, 6, 7, 1, 15, 0, 1, 4, 15, 5, 3, 10, 9, 15, 9, 14, 12, 7, 6, 3, 0, 12, 8, 12, 2, 11, 8, 11, 8, 1, 10, 10, 7, 7, 5, 3, 5, 1, 2, 13, 11, 2, 5, 2, 10, 10, 1, 14, 14, 8, 1, 11, 1, 2, 6, 15, 10, 8, 7, 10, 7, 0, 3},
{12, 6, 11, 1, 1, 7, 8, 1, 5, 5, 8, 4, 6, 5, 6, 4, 2, 8, 4, 1, 0, 0, 14, 2, 10, 14, 14, 11, 2, 9, 14, 15, 12, 14, 9, 3, 7, 14, 4, 7, 12, 9, 3, 5, 1, 0, 12, 9, 10, 5, 11, 12, 10, 10, 6, 14, 6, 13, 13, 5, 5, 10, 13, 10},
{12, 6, 13, 0, 8, 0, 10, 6, 15, 15, 7, 3, 0, 10, 13, 14, 10, 13, 5, 13, 15, 14, 3, 4, 10, 10, 9, 6, 6, 15, 2, 7, 0, 10, 6, 14, 2, 9, 11, 7, 5, 5, 13, 14, 11, 15, 9, 4, 2, 0, 15, 5, 4, 14, 14, 1, 3, 4, 5, 8, 1, 1, 10, 12},
{2, 5, 0, 4, 11, 5, 5, 6, 10, 4, 6, 7, 10, 3, 0, 14, 14, 0, 12, 15, 11, 12, 13, 7, 6, 3, 9, 1, 9, 8, 8, 8, 4, 10, 3, 1, 7, 10, 3, 2, 12, 6, 15, 14, 0, 6, 8, 10, 1, 9, 12, 12, 15, 7, 1, 11, 15, 13, 0, 4, 10, 0, 12, 11},
{8, 12, 14, 15, 14, 15, 10, 0, 2, 14, 3, 1, 2, 6, 0, 2, 1, 7, 9, 0, 15, 13, 5, 14, 6, 8, 15, 4, 15, 6, 10, 6, 15, 3, 12, 8, 5, 4, 10, 5, 3, 0, 4, 13, 10, 9, 8, 4, 6, 3, 9, 6, 12, 11, 9, 13, 8, 10, 9, 9, 8, 12, 1, 2},
{11, 10, 15, 15, 5, 14, 15, 7, 5, 9, 14, 14, 7, 11, 6, 6, 3, 8, 2, 3, 4, 14, 11, 1, 12, 15, 11, 6, 0, 0, 13, 7, 14, 3, 12, 14, 0, 15, 6, 1, 11, 2, 11, 8, 3, 13, 4, 12, 10, 13, 7, 14, 9, 13, 3, 10, 2, 14, 13, 4, 12, 13, 14, 10},
{1, 11, 2, 12, 1, 10, 7, 12, 3, 3, 14, 9, 1, 10, 0, 11, 8, 10, 12, 12, 4, 12, 2, 11, 5, 0, 3, 15, 8, 2, 14, 3, 10, 2, 1, 13, 6, 14, 0, 0, 8, 11, 6, 13, 15, 10, 12, 7, 7, 11, 14, 9, 2, 7, 6, 8, 14, 9, 14, 10, 11, 9, 9, 12},
{5, 10, 14, 2, 1, 4, 11, 5, 10, 2, 13, 9, 6, 12, 11, 5, 13, 4, 5, 14, 8, 7, 15, 9, 8, 4, 5, 2, 9, 11, 5, 3, 12, 2, 6, 1, 7, 4, 11, 4, 15, 0, 5, 2, 13, 11, 11, 2, 15, 10, 0, 12, 5, 8, 10, 1, 4, 11, 3, 13, 11, 7, 9, 14},
{9, 8, 10, 5, 0, 2, 5, 8, 7, 3, 3, 6, 11, 1, 13, 15, 4, 4, 11, 6, 2, 6, 13, 11, 2, 6, 9, 4, 5, 13, 12, 2, 8, 7, 7, 12, 14, 15, 5, 12, 7, 0, 15, 15, 0, 5, 15, 0, 3, 9, 10, 15, 9, 11, 10, 10, 5, 3, 9, 3, 12, 13, 0, 13},
{1, 11, 15, 0, 10, 5, 3, 5, 6, 7, 1, 11, 4, 11, 4, 2, 5, 12, 2, 5, 5, 6, 1, 5, 14, 9, 1, 5, 14, 12, 6, 10, 0, 8, 5, 11, 11, 11, 12, 10, 8, 10, 10, 1, 14, 1, 0, 8, 4, 7, 0, 11, 3, 1, 11, 12, 11, 8, 14, 15, 9, 3, 1, 14},
{14, 11, 12, 12, 4, 6, 8, 14, 15, 1, 11, 2, 13, 3, 6, 2, 7, 1, 8, 1, 4, 9, 11, 15, 8, 1, 10, 13, 4, 13, 2, 7, 7, 10, 5, 2, 12, 12, 12, 3, 10, 8, 2, 11, 0, 3, 8, 9, 4, 2, 15, 7, 15, 6, 4, 6, 12, 7, 14, 9, 9, 8, 14, 12},
{15, 4, 8, 12, 11, 11, 9, 5, 0, 0, 7, 6, 10, 5, 8, 2, 5, 6, 14, 11, 13, 0, 13, 15, 5, 4, 9, 15, 13, 12, 14, 15, 10, 2, 3, 6, 10, 14, 1, 8, 6, 7, 10, 1, 14, 9, 12, 13, 7, 2, 12, 10, 6, 11, 15, 1, 15, 11, 13, 0, 6, 13, 7, 15},
{3, 3, 12, 5, 14, 9, 14, 14, 8, 0, 9, 1, 2, 2, 14, 11, 7, 1, 3, 1, 14, 15, 12, 8, 14, 2, 4, 13, 10, 5, 10, 8, 1, 7, 6, 5, 4, 2, 11, 5, 4, 13, 14, 6, 13, 15, 6, 6, 7, 12, 11, 5, 13, 10, 9, 13, 9, 14, 5, 6, 7, 14, 11, 7},
{14, 12, 11, 5, 0, 5, 10, 5, 7, 1, 7, 11, 1, 0, 13, 6, 5, 14, 3, 0, 5, 14, 6, 7, 8, 5, 8, 6, 6, 3, 6, 1, 8, 3, 10, 7, 15, 6, 11, 6, 6, 7, 13, 2, 2, 0, 0, 11, 1, 15, 2, 14, 5, 1, 4, 8, 0, 1, 8, 0, 1, 1, 2, 2},
{10, 13, 13, 3, 15, 14, 9, 12, 15, 15, 8, 5, 8, 10, 5, 9, 6, 6, 7, 15, 1, 0, 14, 9, 1, 11, 6, 11, 13, 4, 6, 14, 9, 12, 13, 8, 14, 6, 14, 2, 3, 15, 4, 4, 14, 4, 9, 12, 8, 0, 9, 11, 13, 10, 8, 14, 3, 5, 7, 11, 6, 7, 15, 2},
{9, 9, 11, 6, 11, 0, 5, 4, 8, 10, 8, 11, 2, 12, 8, 7, 11, 13, 6, 1, 13, 13, 11, 4, 5, 7, 7, 9, 6, 4, 12, 0, 11, 8, 6, 12, 11, 4, 15, 11, 12, 8, 11, 11, 1, 3, 6, 14, 9, 6, 7, 5, 0, 10, 3, 15, 13, 7, 0, 1, 13, 15, 1, 14},
{10, 6, 8, 7, 3, 6, 9, 15, 1, 3, 10, 14, 9, 0, 0, 10, 0, 15, 2, 0, 0, 0, 6, 0, 13, 9, 9, 1, 8, 6, 13, 2, 1, 9, 14, 9, 1, 4, 8, 4, 2, 0, 8, 5, 0, 11, 12, 15, 13, 1, 14, 14, 15, 7, 8, 4, 4, 12, 1, 12, 8, 3, 9, 5},
{12, 11, 1, 4, 10, 14, 8, 12, 2, 4, 15, 2, 9, 7, 7, 11, 15, 12, 10, 11, 7, 4, 13, 0, 8, 6, 8, 8, 10, 5, 5, 13, 3, 7, 9, 13, 13, 14, 6, 8, 1, 5, 7, 12, 4, 4, 6, 9, 13, 1, 6, 1, 6, 14, 5, 8, 2, 10, 4, 10, 1, 9, 6, 15},
{4, 13, 4, 9, 6, 11, 1, 8, 7, 11, 11, 1, 3, 10, 12, 11, 1, 10, 6, 10, 0, 7, 3, 0, 0, 6, 3, 9, 2, 1, 4, 8, 2, 10, 2, 15, 9, 15, 14, 14, 15, 14, 3, 2, 7, 6, 6, 10, 8, 8, 4, 11, 1, 13, 6, 0, 2, 10, 0, 11, 15, 14, 6, 9},
{15, 0, 12, 13, 0, 9, 10, 4, 11, 5, 10, 0, 8, 7, 3, 2, 12, 6, 3, 8, 5, 15, 14, 2, 13, 13, 6, 11, 5, 6, 9, 10, 14, 5, 14, 4, 9, 7, 5, 11, 13, 2, 7, 1, 14, 9, 0, 7, 8, 12, 11, 15, 2, 1, 5, 11, 3, 7, 5, 1, 6, 3, 8, 6},
{0, 3, 8, 1, 4, 6, 3, 1, 3, 8, 2, 0, 15, 15, 14, 15, 13, 10, 11, 9, 2, 11, 5, 12, 3, 3, 0, 1, 5, 3, 11, 6, 10, 11, 8, 5, 7, 15, 4, 12, 8, 8, 12, 12, 12, 1, 9, 4, 11, 6, 10, 11, 1, 12, 8, 12, 5, 6, 1, 14, 2, 10, 3, 0},
{10, 13, 6, 9, 11, 1, 4, 10, 0, 13, 8, 7, 4, 12, 15, 5, 14, 12, 6, 9, 0, 0, 10, 5, 13, 10, 15, 3, 0, 8, 7, 0, 9, 8, 10, 6, 11, 8, 10, 13, 11, 7, 5, 5, 9, 13, 1, 15, 0, 5, 15, 5, 4, 7, 9, 9, 15, 8, 2, 6, 3, 8, 5, 8},
{14, 0, 6, 2, 4, 12, 2, 13, 6, 10, 5, 2, 2, 1, 6, 11, 1, 6, 9, 13, 0, 13, 9, 3, 12, 4, 3, 8, 7, 0, 9, 12, 0, 1, 7, 10, 10, 7, 3, 9, 13, 5, 15, 4, 13, 0, 8, 5, 4, 14, 11, 3, 3, 13, 15, 9, 9, 12, 9, 5, 2, 0, 1, 14},
{4, 14, 13, 0, 14, 15, 11, 10, 11, 1, 3, 3, 9, 1, 12, 8, 6, 5, 15, 11, 1, 7, 5, 3, 8, 13, 0, 13, 11, 5, 8, 1, 8, 6, 13, 4, 13, 7, 12, 6, 5, 5, 7, 0, 12, 1, 1, 8, 1, 6, 4, 2, 8, 8, 15, 11, 11, 11, 4, 4, 4, 7, 13, 12},
{14, 15, 10, 0, 4, 3, 1, 9, 13, 7, 9, 9, 15, 5, 0, 3, 9, 6, 4, 7, 13, 11, 3, 2, 7, 1, 6, 8, 13, 7, 10, 4, 3, 9, 5, 9, 2, 6, 10, 7, 9, 13, 2, 14, 2, 14, 7, 2, 14, 2, 8, 8, 0, 9, 0, 9, 12, 6, 7, 7, 6, 8, 12, 13},
{5, 15, 8, 12, 11, 3, 13, 4, 5, 14, 10, 4, 15, 15, 1, 10, 9, 14, 6, 6, 4, 12, 4, 9, 12, 2, 15, 13, 2, 5, 12, 2, 3, 2, 15, 11, 12, 2, 6, 2, 11, 6, 7, 9, 12, 10, 5, 1, 1, 5, 9, 6, 14, 11, 3, 11, 6, 10, 11, 11, 0, 12, 15, 1},
{12, 6, 8, 10, 2, 5, 7, 9, 8, 14, 15, 15, 13, 10, 15, 3, 10, 10, 6, 10, 14, 10, 7, 5, 3, 7, 6, 12, 11, 12, 8, 9, 12, 9, 15, 15, 15, 7, 8, 3, 15, 14, 1, 12, 0, 0, 4, 0, 9, 10, 8, 7, 14, 10, 8, 14, 6, 2, 8, 1, 11, 10, 0, 1},
{12, 1, 2, 12, 7, 10, 4, 11, 5, 14, 10, 2, 2, 9, 4, 13, 3, 14, 3, 15, 5, 0, 14, 7, 7, 15, 6, 5, 2, 8, 15, 9, 6, 6, 13, 10, 9, 8, 6, 3, 14, 7, 12, 9, 7, 8, 13, 12, 14, 13, 6, 0, 5, 1, 9, 12, 14, 0, 11, 11, 6, 3, 11, 7},
{15, 4, 8, 12, 8, 11, 4, 15, 1, 6, 2, 13, 1, 7, 7, 12, 0, 8, 14, 14, 10, 14, 0, 12, 0, 3, 3, 11, 7, 4, 2, 13, 0, 0, 11, 2, 5, 8, 12, 11, 6, 5, 6, 0, 0, 4, 0, 0, 1, 9, 9, 11, 3, 2, 13, 4, 13, 9, 15, 4, 7, 8, 3, 2},
{3, 13, 8, 8, 12, 10, 5, 4, 7, 13, 10, 13, 14, 3, 2, 12, 11, 0, 9, 5, 6, 4, 14, 4, 6, 9, 2, 5, 10, 3, 9, 10, 5, 0, 12, 5, 15, 5, 15, 15, 2, 12, 3, 11, 0, 15, 9, 14, 1, 5, 6, 6, 14, 5, 8, 0, 5, 9, 3, 7, 7, 12, 15, 1},
{1, 11, 7, 4, 13, 3, 0, 8, 11, 9, 15, 1, 4, 12, 2, 12, 10, 4, 14, 3, 9, 14, 14, 2, 3, 11, 12, 4, 5, 10, 6, 15, 2, 13, 13, 9, 9, 1, 11, 12, 12, 14, 1, 5, 15, 1, 7, 14, 12, 10, 11, 13, 13, 5, 2, 4, 7, 7, 9, 4, 14, 15, 13, 10},
{14, 15, 9, 14, 9, 5, 13, 2, 0, 0, 14, 8, 6, 2, 0, 7, 11, 10, 2, 13, 2, 14, 9, 6, 4, 11, 5, 14, 6, 1, 6, 14, 6, 3, 9, 5, 2, 9, 3, 11, 1, 14, 5, 4, 12, 5, 3, 5, 11, 3, 11, 6, 13, 7, 13, 7, 4, 9, 4, 13, 8, 3, 5, 11},
{13, 12, 12, 13, 8, 2, 4, 2, 10, 6, 3, 5, 7, 7, 6, 13, 8, 6, 15, 4, 12, 7, 15, 4, 3, 9, 8, 15, 0, 3, 12, 1, 9, 8, 13, 10, 15, 4, 14, 1, 6, 15, 0, 4, 8, 9, 3, 1, 3, 15, 5, 5, 1, 11, 11, 10, 11, 10, 8, 8, 5, 4, 13, 0},
{8, 4, 15, 9, 14, 9, 5, 8, 8, 10, 5, 15, 9, 8, 12, 5, 11, 10, 2, 12, 13, 1, 0, 2, 6, 13, 11, 9, 12, 0, 5, 0, 11, 5, 14, 12, 3, 4, 2, 10, 3, 12, 5, 15, 4, 8, 14, 1, 0, 13, 9, 5, 2, 4, 13, 8, 2, 5, 8, 9, 15, 3, 5, 5},
{0, 3, 3, 4, 6, 5, 5, 1, 3, 2, 14, 5, 10, 7, 15, 11, 7, 13, 15, 4, 0, 12, 9, 15, 12, 0, 3, 1, 14, 1, 12, 9, 13, 8, 9, 15, 12, 3, 5, 11, 3, 11, 4, 1, 9, 4, 13, 7, 4, 10, 6, 14, 13, 0, 9, 11, 15, 15, 3, 3, 13, 15, 10, 15},
}

View File

@@ -12,75 +12,47 @@ import (
"math/big"
)
// State is an intermediate data structure with pre-computed values to speed up mining.
type State struct {
mat matrix
Timestamp int64
Nonce uint64
Target big.Int
prePowHash externalapi.DomainHash
}
// NewState creates a new state with pre-computed values to speed up mining
// It takes the target from the Bits field
func NewState(header externalapi.MutableBlockHeader) *State {
target := difficulty.CompactToBig(header.Bits())
// Zero out the time and nonce.
timestamp, nonce := header.TimeInMilliseconds(), header.Nonce()
header.SetTimeInMilliseconds(0)
header.SetNonce(0)
prePowHash := consensushashing.HeaderHash(header)
header.SetTimeInMilliseconds(timestamp)
header.SetNonce(nonce)
return &State{
Target: *target,
prePowHash: *prePowHash,
mat: *generateMatrix(prePowHash),
Timestamp: timestamp,
Nonce: nonce,
}
}
// CalculateProofOfWorkValue hashes the internal header and returns its big.Int value
func (state *State) CalculateProofOfWorkValue() *big.Int {
// PRE_POW_HASH || TIME || 32 zero byte padding || NONCE
writer := hashes.NewPoWHashWriter()
writer.InfallibleWrite(state.prePowHash.ByteSlice())
err := serialization.WriteElement(writer, state.Timestamp)
if err != nil {
panic(errors.Wrap(err, "this should never happen. Hash digest should never return an error"))
}
zeroes := [32]byte{}
writer.InfallibleWrite(zeroes[:])
err = serialization.WriteElement(writer, state.Nonce)
if err != nil {
panic(errors.Wrap(err, "this should never happen. Hash digest should never return an error"))
}
powHash := writer.Finalize()
heavyHash := state.mat.HeavyHash(powHash)
return toBig(heavyHash)
}
// IncrementNonce the nonce in State by 1
func (state *State) IncrementNonce() {
state.Nonce++
}
// CheckProofOfWork check's if the block has a valid PoW according to the provided target
// CheckProofOfWorkWithTarget check's if the block has a valid PoW according to the provided target
// it does not check if the difficulty itself is valid or less than the maximum for the appropriate network
func (state *State) CheckProofOfWork() bool {
func CheckProofOfWorkWithTarget(header externalapi.MutableBlockHeader, target *big.Int) bool {
// The block pow must be less than the claimed target
powNum := state.CalculateProofOfWorkValue()
powNum := CalculateProofOfWorkValue(header)
// The block hash must be less or equal than the claimed target.
return powNum.Cmp(&state.Target) <= 0
return powNum.Cmp(target) <= 0
}
// CheckProofOfWorkByBits check's if the block has a valid PoW according to its Bits field
// it does not check if the difficulty itself is valid or less than the maximum for the appropriate network
func CheckProofOfWorkByBits(header externalapi.MutableBlockHeader) bool {
return NewState(header).CheckProofOfWork()
return CheckProofOfWorkWithTarget(header, difficulty.CompactToBig(header.Bits()))
}
// CalculateProofOfWorkValue hashes the given header and returns its big.Int value
func CalculateProofOfWorkValue(header externalapi.MutableBlockHeader) *big.Int {
// Zero out the time and nonce.
timestamp, nonce := header.TimeInMilliseconds(), header.Nonce()
header.SetTimeInMilliseconds(0)
header.SetNonce(0)
prePowHash := consensushashing.HeaderHash(header)
header.SetTimeInMilliseconds(timestamp)
header.SetNonce(nonce)
// PRE_POW_HASH || TIME || 32 zero byte padding || NONCE
writer := hashes.NewPoWHashWriter()
writer.InfallibleWrite(prePowHash.ByteSlice())
err := serialization.WriteElement(writer, timestamp)
if err != nil {
panic(errors.Wrap(err, "this should never happen. Hash digest should never return an error"))
}
zeroes := [32]byte{}
writer.InfallibleWrite(zeroes[:])
err = serialization.WriteElement(writer, nonce)
if err != nil {
panic(errors.Wrap(err, "this should never happen. Hash digest should never return an error"))
}
return toBig(writer.Finalize())
}
// ToBig converts a externalapi.DomainHash into a big.Int treated as a little endian string.
@@ -97,17 +69,10 @@ func toBig(hash *externalapi.DomainHash) *big.Int {
// BlockLevel returns the block level of the given header.
func BlockLevel(header externalapi.BlockHeader) int {
// Genesis is defined to be the root of all blocks at all levels, so we define it to be the maximal
// block level.
if len(header.DirectParents()) == 0 {
return constants.MaxBlockLevel
proofOfWorkValue := CalculateProofOfWorkValue(header.ToMutable())
for blockLevel := 0; ; blockLevel++ {
if blockLevel == constants.MaxBlockLevel || proofOfWorkValue.Bit(blockLevel+1) != 0 {
return blockLevel
}
}
proofOfWorkValue := NewState(header.ToMutable()).CalculateProofOfWorkValue()
level := constants.MaxBlockLevel - proofOfWorkValue.BitLen()
// If the block has a level lower than genesis make it zero.
if level < 0 {
level = 0
}
return level
}

View File

@@ -1,37 +0,0 @@
package pow
import (
"encoding/binary"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"math/bits"
)
type xoShiRo256PlusPlus struct {
s0 uint64
s1 uint64
s2 uint64
s3 uint64
}
func newxoShiRo256PlusPlus(hash *externalapi.DomainHash) *xoShiRo256PlusPlus {
hashArray := hash.ByteArray()
return &xoShiRo256PlusPlus{
s0: binary.LittleEndian.Uint64(hashArray[:8]),
s1: binary.LittleEndian.Uint64(hashArray[8:16]),
s2: binary.LittleEndian.Uint64(hashArray[16:24]),
s3: binary.LittleEndian.Uint64(hashArray[24:32]),
}
}
func (x *xoShiRo256PlusPlus) Uint64() uint64 {
res := bits.RotateLeft64(x.s0+x.s3, 23) + x.s0
t := x.s1 << 17
x.s2 ^= x.s0
x.s3 ^= x.s1
x.s1 ^= x.s2
x.s0 ^= x.s3
x.s2 ^= t
x.s3 = bits.RotateLeft64(x.s3, 45)
return res
}

View File

@@ -1,17 +0,0 @@
package pow
import "testing"
// Test vectors are from here: https://github.com/rust-random/rngs/blob/17aa826cc38d3e8408c9489ac859fa9397acd479/rand_xoshiro/src/xoshiro256plusplus.rs#L121
func TestXoShiRo256PlusPlus_Uint64(t *testing.T) {
state := xoShiRo256PlusPlus{1, 2, 3, 4}
expected := []uint64{41943041, 58720359, 3588806011781223, 3591011842654386,
9228616714210784205, 9973669472204895162, 14011001112246962877,
12406186145184390807, 15849039046786891736, 10450023813501588000}
for _, ex := range expected {
val := state.Uint64()
if val != ex {
t.Errorf("expected: %d, found: %d", ex, val)
}
}
}

View File

@@ -22,7 +22,7 @@ import (
//
const (
defaultMaxCoinbasePayloadLength = 204
defaultMaxCoinbasePayloadLength = 150
// defaultMaxBlockMass is a bound on the mass of a block, larger values increase the bound d
// on the round trip time of a block, which affects the other parameters as described below
defaultMaxBlockMass = 500_000
@@ -49,7 +49,7 @@ const (
defaultMergeSetSizeLimit = defaultGHOSTDAGK * 10
defaultSubsidyGenesisReward = 1 * constants.SompiPerKaspa
defaultMinSubsidy = 1 * constants.SompiPerKaspa
defaultMaxSubsidy = 500 * constants.SompiPerKaspa
defaultMaxSubsidy = 1000 * constants.SompiPerKaspa
defaultBaseSubsidy = 50 * constants.SompiPerKaspa
defaultFixedSubsidySwitchPruningPointInterval uint64 = 7
defaultCoinbasePayloadScriptPublicKeyMaxLength = 150

View File

@@ -19,31 +19,8 @@ var genesisTxPayload = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Blue score
0x00, 0xE1, 0xF5, 0x05, 0x00, 0x00, 0x00, 0x00, // Subsidy
0x00, 0x00, //script version
0x01, // Varint
0x00, // OP-FALSE
0xd7, 0x95, 0xd7, 0x9e, 0xd7, 0x94, 0x20, 0xd7, // ומה די עליך ועל אחיך ייטב בשאר כספא ודהבה למעבד כרעות אלהכם תעבדון
0x93, 0xd7, 0x99, 0x20, 0xd7, 0xa2, 0xd7, 0x9c,
0xd7, 0x99, 0xd7, 0x9a, 0x20, 0xd7, 0x95, 0xd7,
0xa2, 0xd7, 0x9c, 0x20, 0xd7, 0x90, 0xd7, 0x97,
0xd7, 0x99, 0xd7, 0x9a, 0x20, 0xd7, 0x99, 0xd7,
0x99, 0xd7, 0x98, 0xd7, 0x91, 0x20, 0xd7, 0x91,
0xd7, 0xa9, 0xd7, 0x90, 0xd7, 0xa8, 0x20, 0xd7,
0x9b, 0xd7, 0xa1, 0xd7, 0xa4, 0xd7, 0x90, 0x20,
0xd7, 0x95, 0xd7, 0x93, 0xd7, 0x94, 0xd7, 0x91,
0xd7, 0x94, 0x20, 0xd7, 0x9c, 0xd7, 0x9e, 0xd7,
0xa2, 0xd7, 0x91, 0xd7, 0x93, 0x20, 0xd7, 0x9b,
0xd7, 0xa8, 0xd7, 0xa2, 0xd7, 0x95, 0xd7, 0xaa,
0x20, 0xd7, 0x90, 0xd7, 0x9c, 0xd7, 0x94, 0xd7,
0x9b, 0xd7, 0x9d, 0x20, 0xd7, 0xaa, 0xd7, 0xa2,
0xd7, 0x91, 0xd7, 0x93, 0xd7, 0x95, 0xd7, 0x9f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Bitcoin block hash 0000000000000000000b1f8e1c17b0133d439174e52efbb0c41c3583a8aa66b0
0x00, 0x0b, 0x1f, 0x8e, 0x1c, 0x17, 0xb0, 0x13,
0x3d, 0x43, 0x91, 0x74, 0xe5, 0x2e, 0xfb, 0xb0,
0xc4, 0x1c, 0x35, 0x83, 0xa8, 0xaa, 0x66, 0xb0,
0x0f, 0xca, 0x37, 0xca, 0x66, 0x7c, 0x2d, 0x55, // Checkpoint block hash 0fca37ca667c2d550a6c4416dad9717e50927128c424fa4edbebc436ab13aeef
0x0a, 0x6c, 0x44, 0x16, 0xda, 0xd9, 0x71, 0x7e,
0x50, 0x92, 0x71, 0x28, 0xc4, 0x24, 0xfa, 0x4e,
0xdb, 0xeb, 0xc4, 0x36, 0xab, 0x13, 0xae, 0xef,
0x01, // Varint
0x00, // OP-FALSE
}
// genesisCoinbaseTx is the coinbase transaction for the genesis blocks for
@@ -54,13 +31,19 @@ var genesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(0, []*externa
// genesisHash is the hash of the first block in the block DAG for the main
// network (genesis block).
var genesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x58, 0xc2, 0xd4, 0x19, 0x9e, 0x21, 0xf9, 0x10, 0xd1, 0x57, 0x1d, 0x11, 0x49, 0x69, 0xce, 0xce, 0xf4, 0x8f, 0x9, 0xf9, 0x34, 0xd4, 0x2c, 0xcb, 0x6a, 0x28, 0x1a, 0x15, 0x86, 0x8f, 0x29, 0x99,
0x75, 0xd8, 0x51, 0xcc, 0x91, 0xba, 0x55, 0x32,
0xfd, 0xf9, 0x3e, 0xf2, 0xa5, 0x28, 0x92, 0x83,
0x35, 0xe0, 0x61, 0xa2, 0xe4, 0x77, 0x76, 0x5e,
0xd7, 0x10, 0x5d, 0x8e, 0x78, 0xc7, 0xb8, 0x0d,
})
// genesisMerkleRoot is the hash of the first transaction in the genesis block
// for the main network.
var genesisMerkleRoot = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x8e, 0xc8, 0x98, 0x56, 0x8c, 0x68, 0x1, 0xd1, 0x3d, 0xf4, 0xee, 0x6e, 0x2a, 0x1b, 0x54, 0xb7, 0xe6, 0x23, 0x6f, 0x67, 0x1f, 0x20, 0x95, 0x4f, 0x5, 0x30, 0x64, 0x10, 0x51, 0x8e, 0xeb, 0x32,
0xea, 0xb2, 0xe3, 0xfa, 0xd6, 0x72, 0x95, 0x38,
0x04, 0xe0, 0x43, 0x1a, 0xf2, 0x2d, 0xc5, 0xeb,
0x27, 0xd8, 0x0a, 0xdd, 0x4c, 0xc5, 0xd9, 0x80,
0x90, 0xc7, 0x6c, 0x43, 0xce, 0x81, 0x23, 0x68,
})
// genesisBlock defines the genesis block of the block DAG which serves as the
@@ -71,13 +54,11 @@ var genesisBlock = externalapi.DomainBlock{
[]externalapi.BlockLevelParents{},
genesisMerkleRoot,
&externalapi.DomainHash{},
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x71, 0x0f, 0x27, 0xdf, 0x42, 0x3e, 0x63, 0xaa, 0x6c, 0xdb, 0x72, 0xb8, 0x9e, 0xa5, 0xa0, 0x6c, 0xff, 0xa3, 0x99, 0xd6, 0x6f, 0x16, 0x77, 0x04, 0x45, 0x5b, 0x5a, 0xf5, 0x9d, 0xef, 0x8e, 0x20,
}),
1637609671037,
486722099,
0x3392c,
1312860, // Checkpoint DAA score
externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray()),
0x17c2df949fe,
0x207fffff,
0x0,
0,
0,
big.NewInt(0),
&externalapi.DomainHash{},
@@ -105,10 +86,10 @@ var devnetGenesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(0,
// devGenesisHash is the hash of the first block in the block DAG for the development
// network (genesis block).
var devnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x34, 0x3b, 0x53, 0xb7, 0xad, 0x82, 0x8a, 0x33,
0x4b, 0x03, 0xb1, 0xbb, 0x13, 0x15, 0xb0, 0x75,
0xf3, 0xf0, 0x40, 0xfb, 0x64, 0x0c, 0xca, 0x19,
0xc9, 0x61, 0xe7, 0x69, 0xdb, 0x98, 0x98, 0x3e,
0xf1, 0xa4, 0x65, 0xfe, 0xd2, 0x08, 0x54, 0xc1,
0x72, 0x60, 0xdc, 0x8b, 0x29, 0x67, 0x90, 0x1f,
0xa9, 0x48, 0x47, 0x4e, 0x63, 0xa1, 0xfc, 0x09,
0x6d, 0x10, 0x7b, 0x08, 0x23, 0x37, 0xfd, 0x59,
})
// devnetGenesisMerkleRoot is the hash of the first transaction in the genesis block
@@ -131,7 +112,7 @@ var devnetGenesisBlock = externalapi.DomainBlock{
externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray()),
0x11e9db49828,
0x1e7fffff,
0x48e5e,
0x268db,
0,
0,
big.NewInt(0),
@@ -159,10 +140,10 @@ var simnetGenesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(0,
// simnetGenesisHash is the hash of the first block in the block DAG for
// the simnet (genesis block).
var simnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x41, 0x1f, 0x8c, 0xd2, 0x6f, 0x3d, 0x41, 0xae,
0xa3, 0x9e, 0x78, 0x57, 0x39, 0x27, 0xda, 0x24,
0xd2, 0x39, 0x95, 0x70, 0x5b, 0x57, 0x9f, 0x30,
0x95, 0x9b, 0x91, 0x27, 0xe9, 0x6b, 0x79, 0xe3,
0x9b, 0x66, 0xc8, 0x1a, 0xd5, 0xb8, 0xc2, 0xf7,
0x45, 0x45, 0x47, 0xd6, 0x83, 0x83, 0xf8, 0x27,
0x45, 0x46, 0xcf, 0x98, 0x1c, 0x74, 0x46, 0xcf,
0x25, 0x5e, 0xd3, 0xe7, 0xe4, 0x68, 0x47, 0x6f,
})
// simnetGenesisMerkleRoot is the hash of the first transaction in the genesis block
@@ -183,9 +164,9 @@ var simnetGenesisBlock = externalapi.DomainBlock{
simnetGenesisMerkleRoot,
&externalapi.DomainHash{},
externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray()),
0x17c5f62fbb6,
0x17c2df94abb,
0x207fffff,
0x2,
0x0,
0,
0,
big.NewInt(0),
@@ -213,10 +194,10 @@ var testnetGenesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(0,
// testnetGenesisHash is the hash of the first block in the block DAG for the test
// network (genesis block).
var testnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0xf8, 0x96, 0xa3, 0x03, 0x48, 0x73, 0xbe, 0x17,
0x39, 0xfc, 0x43, 0x59, 0x23, 0x68, 0x99, 0xfd,
0x3d, 0x65, 0xd2, 0xbc, 0x94, 0xf9, 0x78, 0x0d,
0xf0, 0xd0, 0xda, 0x3e, 0xb1, 0xcc, 0x43, 0x70,
0x47, 0x4f, 0xfd, 0xd1, 0xf8, 0x3d, 0x1d, 0x00,
0xcb, 0x07, 0x17, 0x91, 0x56, 0x60, 0xd4, 0xa7,
0x27, 0x5e, 0x43, 0xc2, 0x74, 0xe6, 0x76, 0x6e,
0x34, 0xe4, 0x13, 0xbb, 0x24, 0x02, 0x30, 0x66,
})
// testnetGenesisMerkleRoot is the hash of the first transaction in the genesis block
@@ -237,9 +218,9 @@ var testnetGenesisBlock = externalapi.DomainBlock{
testnetGenesisMerkleRoot,
&externalapi.DomainHash{},
externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray()),
0x17c5f62fbb6,
0x17c2df94abb,
0x1e7fffff,
0x14582,
0x35f16,
0,
0,
big.NewInt(0),

View File

@@ -211,15 +211,7 @@ var MainnetParams = Params{
Net: appmessage.Mainnet,
RPCPort: "16110",
DefaultPort: "16111",
DNSSeeds: []string{
"mainnet-dnsseed.daglabs-dev.com",
// This DNS seeder is run by Denis Mashkevich
"mainnet-dnsseed-1.kaspanet.org",
// This DNS seeder is run by Denis Mashkevich
"mainnet-dnsseed-2.kaspanet.org",
// This DNS seeder is run by Elichai Turkel
"kaspa.turkel.in",
},
DNSSeeds: []string{"mainnet-dnsseed.daglabs-dev.com"},
// DAG parameters
GenesisBlock: &genesisBlock,

View File

@@ -40,7 +40,7 @@ func TestValidateAndInsertTransaction(t *testing.T) {
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
transactionsToInsert := make([]*externalapi.DomainTransaction, 10)
for i := range transactionsToInsert {
transactionsToInsert[i] = createTransactionWithUTXOEntry(t, i, 0)
transactionsToInsert[i] = createTransactionWithUTXOEntry(t, i)
_, err = miningManager.ValidateAndInsertTransaction(transactionsToInsert[i], false, true)
if err != nil {
t.Fatalf("ValidateAndInsertTransaction: %v", err)
@@ -89,7 +89,7 @@ func TestImmatureSpend(t *testing.T) {
tcAsConsensusPointer := &tcAsConsensus
consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer)
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
tx := createTransactionWithUTXOEntry(t, 0, consensusConfig.GenesisBlock.Header.DAAScore())
tx := createTransactionWithUTXOEntry(t, 0)
_, err = miningManager.ValidateAndInsertTransaction(tx, false, false)
txRuleError := &mempool.TxRuleError{}
if !errors.As(err, txRuleError) || txRuleError.RejectCode != mempool.RejectImmatureSpend {
@@ -119,7 +119,7 @@ func TestInsertDoubleTransactionsToMempool(t *testing.T) {
tcAsConsensusPointer := &tcAsConsensus
consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer)
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
transaction := createTransactionWithUTXOEntry(t, 0, 0)
transaction := createTransactionWithUTXOEntry(t, 0)
_, err = miningManager.ValidateAndInsertTransaction(transaction, false, true)
if err != nil {
t.Fatalf("ValidateAndInsertTransaction: %v", err)
@@ -186,7 +186,7 @@ func TestHandleNewBlockTransactions(t *testing.T) {
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
transactionsToInsert := make([]*externalapi.DomainTransaction, 10)
for i := range transactionsToInsert {
transaction := createTransactionWithUTXOEntry(t, i, 0)
transaction := createTransactionWithUTXOEntry(t, i)
transactionsToInsert[i] = transaction
_, err = miningManager.ValidateAndInsertTransaction(transaction, false, true)
if err != nil {
@@ -253,12 +253,12 @@ func TestDoubleSpendWithBlock(t *testing.T) {
tcAsConsensusPointer := &tcAsConsensus
consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer)
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
transactionInTheMempool := createTransactionWithUTXOEntry(t, 0, 0)
transactionInTheMempool := createTransactionWithUTXOEntry(t, 0)
_, err = miningManager.ValidateAndInsertTransaction(transactionInTheMempool, false, true)
if err != nil {
t.Fatalf("ValidateAndInsertTransaction: %v", err)
}
doubleSpendTransactionInTheBlock := createTransactionWithUTXOEntry(t, 0, 0)
doubleSpendTransactionInTheBlock := createTransactionWithUTXOEntry(t, 0)
doubleSpendTransactionInTheBlock.Inputs[0].PreviousOutpoint = transactionInTheMempool.Inputs[0].PreviousOutpoint
blockTransactions := []*externalapi.DomainTransaction{nil, doubleSpendTransactionInTheBlock}
_, err = miningManager.HandleNewBlockTransactions(blockTransactions)
@@ -571,7 +571,7 @@ func TestRevalidateHighPriorityTransactions(t *testing.T) {
})
}
func createTransactionWithUTXOEntry(t *testing.T, i int, daaScore uint64) *externalapi.DomainTransaction {
func createTransactionWithUTXOEntry(t *testing.T, i int) *externalapi.DomainTransaction {
prevOutTxID := externalapi.DomainTransactionID{}
prevOutPoint := externalapi.DomainOutpoint{TransactionID: prevOutTxID, Index: uint32(i)}
scriptPublicKey, redeemScript := testutils.OpTrueScript()
@@ -587,7 +587,7 @@ func createTransactionWithUTXOEntry(t *testing.T, i int, daaScore uint64) *exter
100000000, // 1 KAS
scriptPublicKey,
true,
daaScore),
uint64(0)),
}
txOut := externalapi.DomainTransactionOutput{
Value: 10000,

View File

@@ -29,6 +29,7 @@ import (
const (
defaultConfigFilename = "kaspad.conf"
defaultDataDirname = "data"
defaultLogLevel = "info"
defaultLogDirname = "logs"
defaultLogFilename = "kaspad.log"
@@ -85,7 +86,7 @@ type Flags struct {
Listeners []string `long:"listen" description:"Add an interface/port to listen for connections (default all interfaces port: 16111, testnet: 16211)"`
TargetOutboundPeers int `long:"outpeers" description:"Target number of outbound peers"`
MaxInboundPeers int `long:"maxinpeers" description:"Max number of inbound peers"`
EnableBanning bool `long:"enablebanning" description:"Enable banning of misbehaving peers"`
DisableBanning bool `long:"nobanning" description:"Disable banning of misbehaving peers"`
BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"`
BanThreshold uint32 `long:"banthreshold" description:"Maximum allowed ban score before disconnecting and banning misbehaving peers."`
Whitelists []string `long:"whitelist" description:"Add an IP network or IP that will not be banned. (eg. 192.168.1.0/24 or ::1)"`
@@ -120,7 +121,6 @@ type Flags struct {
MaxUTXOCacheSize uint64 `long:"maxutxocachesize" description:"Max size of loaded UTXO into ram from the disk in bytes"`
UTXOIndex bool `long:"utxoindex" description:"Enable the UTXO index"`
IsArchivalNode bool `long:"archival" description:"Run as an archival node: don't delete old block data when moving the pruning point (Warning: heavy disk usage)'"`
AllowSubmitBlockWhenNotSynced bool `long:"allow-submit-block-when-not-synced" hidden:"true" description:"Allow the node to accept blocks from RPC while not synced (this flag is mainly used for testing)"`
EnableSanityCheckPruningUTXOSet bool `long:"enable-sanity-check-pruning-utxo" hidden:"true" description:"When moving the pruning point - check that the utxo set matches the utxo commitment"`
NetworkFlags
ServiceOptions *ServiceOptions

View File

@@ -48,7 +48,6 @@ type overrideDAGParamsConfig struct {
EnableNonNativeSubnetworks *bool `json:"enableNonNativeSubnetworks"`
DisableDifficultyAdjustment *bool `json:"disableDifficultyAdjustment"`
SkipProofOfWork *bool `json:"skipProofOfWork"`
HardForkOmitGenesisFromParentsDAAScore *uint64 `json:"hardForkOmitGenesisFromParentsDaaScore"`
}
// ResolveNetwork parses the network command line argument and sets NetParams accordingly.

View File

@@ -66,7 +66,7 @@ func (c *ConnectionManager) checkRequestedConnections(connSet connectionSet) {
log.Debugf("Connecting to connection request %s", connReq.address)
err := c.initiateConnection(connReq.address)
if err != nil {
log.Infof("Couldn't connect to requested connection %s: %s", address, err)
log.Infof("Couldn't connect to %s: %s", address, err)
// if connection request is one try - remove from pending and ignore failure
if !connReq.isPermanent {
delete(c.pendingRequested, address)

View File

@@ -41,7 +41,7 @@ func (c *ConnectionManager) checkOutgoingConnections(connSet connectionSet) {
err := c.initiateConnection(addressString)
if err != nil {
log.Debugf("Couldn't connect to %s: %s", addressString, err)
log.Infof("Couldn't connect to %s: %s", addressString, err)
c.addressManager.MarkConnectionFailure(netAddress)
continue
}

View File

@@ -18,7 +18,7 @@ type p2pServer struct {
gRPCServer
}
const p2pMaxMessageSize = 1024 * 1024 * 1024 // 1GB
const p2pMaxMessageSize = 100 * 1024 * 1024 // 100MB
// p2pMaxInboundConnections is the max amount of inbound connections for the P2P server.
// Note that inbound connections are not limited by the gRPC server. (A value of 0 means
@@ -45,7 +45,7 @@ func (p *p2pServer) MessageStream(stream protowire.P2P_MessageStreamServer) erro
func (p *p2pServer) Connect(address string) (server.Connection, error) {
log.Debugf("%s Dialing to %s", p.name, address)
const dialTimeout = 1 * time.Second
const dialTimeout = 30 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), dialTimeout)
defer cancel()

View File

@@ -2,7 +2,6 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
//go:build !windows && !plan9
// +build !windows,!plan9
package limits

View File

@@ -2,7 +2,6 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package signal

View File

@@ -7,7 +7,9 @@ import (
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/util/difficulty"
"math"
"math/big"
"math/rand"
"os"
"testing"
@@ -160,15 +162,17 @@ func measureMachineHashNanoseconds(t *testing.T) int64 {
defer t.Logf("Finished measuring machine hash rate")
genesisBlock := dagconfig.DevnetParams.GenesisBlock
state := pow.NewState(genesisBlock.Header.ToMutable())
targetDifficulty := difficulty.CompactToBig(genesisBlock.Header.Bits())
headerForMining := genesisBlock.Header.ToMutable()
machineHashesPerSecondMeasurementDuration := 10 * time.Second
hashes := int64(0)
state.Nonce = rand.Uint64()
nonce := rand.Uint64()
loopForDuration(machineHashesPerSecondMeasurementDuration, func(isFinished *bool) {
state.CheckProofOfWork()
headerForMining.SetNonce(nonce)
pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty)
hashes++
state.IncrementNonce()
nonce++
})
return machineHashesPerSecondMeasurementDuration.Nanoseconds() / hashes
@@ -198,18 +202,17 @@ func runDAATest(t *testing.T, testName string, runDuration time.Duration,
startTime := time.Now()
loopForDuration(runDuration, func(isFinished *bool) {
templateBlock := fetchBlockForMining(t, rpcClient)
targetDifficulty := difficulty.CompactToBig(templateBlock.Header.Bits())
headerForMining := templateBlock.Header.ToMutable()
minerState := pow.NewState(headerForMining)
// Try hashes until we find a valid block
miningStartTime := time.Now()
minerState.Nonce = rand.Uint64()
nonce := rand.Uint64()
for {
hashStartTime := time.Now()
if minerState.CheckProofOfWork() {
headerForMining.SetNonce(minerState.Nonce)
templateBlock.Header = headerForMining.ToImmutable()
blockFound := tryNonceForMiningAndIncrementNonce(headerForMining, &nonce, targetDifficulty, templateBlock)
if blockFound {
break
}
@@ -224,7 +227,6 @@ func runDAATest(t *testing.T, testName string, runDuration time.Duration,
if *isFinished {
return
}
minerState.IncrementNonce()
}
// Collect stats about block rate
@@ -263,6 +265,19 @@ func fetchBlockForMining(t *testing.T, rpcClient *rpcclient.RPCClient) *external
return templateBlock
}
func tryNonceForMiningAndIncrementNonce(headerForMining externalapi.MutableBlockHeader, nonce *uint64,
targetDifficulty *big.Int, templateBlock *externalapi.DomainBlock) bool {
headerForMining.SetNonce(*nonce)
if pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
templateBlock.Header = headerForMining.ToImmutable()
return true
}
*nonce++
return false
}
func waitUntilTargetHashDurationHadElapsed(startTime time.Time, hashStartTime time.Time,
targetHashNanosecondsFunction func(totalElapsedDuration time.Duration) int64) {

View File

@@ -4,9 +4,11 @@ import (
"fmt"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/consensus/utils/mining"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/util"
"math/rand"
"github.com/kaspanet/kaspad/util/difficulty"
"math"
"os"
"os/exec"
"strings"
@@ -76,8 +78,8 @@ func realMain() error {
genesisTimestamp := activeConfig().NetParams().GenesisBlock.Header.TimeInMilliseconds()
mutableHeader.SetTimeInMilliseconds(genesisTimestamp + 1000)
block.Header = mutableHeader.ToImmutable()
mining.SolveBlock(block, rand.New(rand.NewSource(time.Now().UnixNano())))
_, err = rpcClient.SubmitBlock(block)
solvedBlock := solveBlock(block)
_, err = rpcClient.SubmitBlock(solvedBlock)
if err != nil {
return err
}
@@ -115,7 +117,6 @@ func startNode() (teardown func(), err error) {
"--logdir", dataDir,
"--rpclisten", rpcAddress,
"--loglevel", "debug",
"--allow-submit-block-when-not-synced",
)
if err != nil {
return nil, err
@@ -182,8 +183,8 @@ func mineBlock(rpcClient *rpc.Client, miningAddress util.Address) error {
if err != nil {
return err
}
mining.SolveBlock(block, rand.New(rand.NewSource(time.Now().UnixNano())))
_, err = rpcClient.SubmitBlock(block)
solvedBlock := solveBlock(block)
_, err = rpcClient.SubmitBlock(solvedBlock)
if err != nil {
return err
}
@@ -199,10 +200,9 @@ func mineTips(numOfTips int, miningAddress util.Address, rpcClient *rpc.Client)
if err != nil {
return err
}
rd := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := 0; i < numOfTips; i++ {
mining.SolveBlock(block, rd)
_, err = rpcClient.SubmitBlock(block)
solvedBlock := solveBlock(block)
_, err = rpcClient.SubmitBlock(solvedBlock)
if err != nil {
return err
}
@@ -296,6 +296,23 @@ func getCurrentTipsLength(rpcClient *rpc.Client) (int, error) {
return len(dagInfo.TipHashes), nil
}
var latestNonce uint64 = 0 // Use to make the nonce unique.
// The nonce is unique for each block in this function.
func solveBlock(block *externalapi.DomainBlock) *externalapi.DomainBlock {
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
headerForMining := block.Header.ToMutable()
maxUInt64 := uint64(math.MaxUint64)
for i := latestNonce; i < maxUInt64; i++ {
headerForMining.SetNonce(i)
if pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
block.Header = headerForMining.ToImmutable()
latestNonce = i + 1
return block
}
}
panic("Failed to solve block!")
}
func killWithSigterm(cmd *exec.Cmd, commandName string) {
err := cmd.Process.Signal(syscall.SIGTERM)
if err != nil {

View File

@@ -1 +1 @@
{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000, "hardForkOmitGenesisFromParentsDaaScore": 2505}
{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000}

View File

@@ -38,7 +38,6 @@ func startNode(name string, rpcAddress, listen, connect, profilePort, dataDir st
"--listen", listen,
"--profile", profilePort,
"--loglevel", "debug",
"--allow-submit-block-when-not-synced",
}
if connect != "" {
args = append(args, "--connect", connect)

View File

@@ -44,7 +44,6 @@ func startNodes() (teardown func(), err error) {
"--rpclisten", syncerRPCAddress,
"--listen", syncerListen,
"--loglevel", "debug",
"--allow-submit-block-when-not-synced",
)
if err != nil {
return nil, err

View File

@@ -36,7 +36,6 @@ func setConfig(t *testing.T, harness *appHarness) {
harness.config.Listeners = []string{harness.p2pAddress}
harness.config.RPCListeners = []string{harness.rpcAddress}
harness.config.UTXOIndex = harness.utxoIndex
harness.config.AllowSubmitBlockWhenNotSynced = true
if harness.overrideDAGParams != nil {
harness.config.ActiveNetParams = harness.overrideDAGParams

View File

@@ -3,8 +3,6 @@ package integration
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/mining"
"math/rand"
"reflect"
"sync"
"testing"
@@ -119,7 +117,7 @@ func TestIBDWithPruning(t *testing.T) {
}
// This should trigger resolving the syncee virtual
syncerTip := mineNextBlockWithMockTimestamps(t, syncer, rand.New(rand.NewSource(time.Now().UnixNano())))
syncerTip := mineNextBlockWithMockTimestamps(t, syncer)
time.Sleep(time.Second)
synceeSelectedTip, err := syncee.rpcClient.GetSelectedTipHash()
if err != nil {
@@ -186,13 +184,12 @@ func TestIBDWithPruning(t *testing.T) {
// iteration to find the highest shared chain
// block.
const synceeOnlyBlocks = 2
rd := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := 0; i < synceeOnlyBlocks; i++ {
mineNextBlockWithMockTimestamps(t, syncee1, rd)
mineNextBlockWithMockTimestamps(t, syncee1)
}
for i := 0; i < numBlocks-1; i++ {
mineNextBlockWithMockTimestamps(t, syncer, rd)
mineNextBlockWithMockTimestamps(t, syncer)
}
testSync(syncer, syncee1)
@@ -206,7 +203,7 @@ var currentMockTimestamp int64 = 0
// mineNextBlockWithMockTimestamps mines blocks with large timestamp differences
// between every two blocks. This is done to avoid the timestamp threshold validation
// of ibd-with-headers-proof
func mineNextBlockWithMockTimestamps(t *testing.T, harness *appHarness, rd *rand.Rand) *externalapi.DomainBlock {
func mineNextBlockWithMockTimestamps(t *testing.T, harness *appHarness) *externalapi.DomainBlock {
blockTemplate, err := harness.rpcClient.GetBlockTemplate(harness.miningAddress)
if err != nil {
t.Fatalf("Error getting block template: %+v", err)
@@ -226,7 +223,7 @@ func mineNextBlockWithMockTimestamps(t *testing.T, harness *appHarness, rd *rand
mutableHeader.SetTimeInMilliseconds(currentMockTimestamp)
block.Header = mutableHeader.ToImmutable()
mining.SolveBlock(block, rd)
solveBlock(block)
_, err = harness.rpcClient.SubmitBlock(block)
if err != nil {

View File

@@ -3,13 +3,28 @@ package integration
import (
"math/rand"
"testing"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/mining"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/util/difficulty"
)
func solveBlock(block *externalapi.DomainBlock) *externalapi.DomainBlock {
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
headerForMining := block.Header.ToMutable()
initialNonce := rand.Uint64()
for i := initialNonce; i != initialNonce-1; i++ {
headerForMining.SetNonce(i)
if pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
block.Header = headerForMining.ToImmutable()
return block
}
}
panic("Failed to solve block! This should never happen")
}
func mineNextBlock(t *testing.T, harness *appHarness) *externalapi.DomainBlock {
blockTemplate, err := harness.rpcClient.GetBlockTemplate(harness.miningAddress)
if err != nil {
@@ -21,8 +36,7 @@ func mineNextBlock(t *testing.T, harness *appHarness) *externalapi.DomainBlock {
t.Fatalf("Error converting block: %s", err)
}
rd := rand.New(rand.NewSource(time.Now().UnixNano()))
mining.SolveBlock(block, rd)
solveBlock(block)
_, err = harness.rpcClient.SubmitBlock(block)
if err != nil {

View File

@@ -14,7 +14,7 @@ import (
func TestGetHashrateString(t *testing.T) {
var results = map[string]string{
dagconfig.MainnetParams.Name: "1.53 GH/s",
dagconfig.MainnetParams.Name: "2 H/s",
dagconfig.TestnetParams.Name: "131.07 KH/s",
dagconfig.DevnetParams.Name: "131.07 KH/s",
dagconfig.SimnetParams.Name: "2.00 KH/s",

View File

@@ -11,7 +11,7 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs
const (
appMajor uint = 0
appMinor uint = 11
appPatch uint = 6
appPatch uint = 0
)
// appBuild is defined as a variable so it can be overridden during the build